barman-2.18/0000755000621200062120000000000014172556766011103 5ustar 00000000000000barman-2.18/MANIFEST.in0000644000621200062120000000030414172556763012633 0ustar 00000000000000recursive-include barman *.py recursive-include rpm * recursive-include doc * include scripts/barman.bash_completion include AUTHORS NEWS ChangeLog LICENSE MANIFEST.in setup.py INSTALL README.rst barman-2.18/barman/0000755000621200062120000000000014172556766012343 5ustar 00000000000000barman-2.18/barman/hooks.py0000644000621200062120000002620214172556763014037 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains the logic to run hook scripts """ import json import logging import time from barman import version from barman.command_wrappers import Command from barman.exceptions import AbortedRetryHookScript, UnknownBackupIdException from barman.utils import force_str _logger = logging.getLogger(__name__) class HookScriptRunner(object): def __init__( self, backup_manager, name, phase=None, error=None, retry=False, **extra_env ): """ Execute a hook script managing its environment """ self.backup_manager = backup_manager self.name = name self.extra_env = extra_env self.phase = phase self.error = error self.retry = retry self.environment = None self.exit_status = None self.exception = None self.script = None self.reset() def reset(self): """ Reset the status of the class. """ self.environment = dict(self.extra_env) config_file = self.backup_manager.config.config.config_file self.environment.update( { "BARMAN_VERSION": version.__version__, "BARMAN_SERVER": self.backup_manager.config.name, "BARMAN_CONFIGURATION": config_file, "BARMAN_HOOK": self.name, "BARMAN_RETRY": str(1 if self.retry else 0), } ) if self.error: self.environment["BARMAN_ERROR"] = force_str(self.error) if self.phase: self.environment["BARMAN_PHASE"] = self.phase script_config_name = "%s_%s" % (self.phase, self.name) else: script_config_name = self.name self.script = getattr(self.backup_manager.config, script_config_name, None) self.exit_status = None self.exception = None def env_from_backup_info(self, backup_info): """ Prepare the environment for executing a script :param BackupInfo backup_info: the backup metadata """ try: previous_backup = self.backup_manager.get_previous_backup( backup_info.backup_id ) if previous_backup: previous_backup_id = previous_backup.backup_id else: previous_backup_id = "" except UnknownBackupIdException: previous_backup_id = "" try: next_backup = self.backup_manager.get_next_backup(backup_info.backup_id) if next_backup: next_backup_id = next_backup.backup_id else: next_backup_id = "" except UnknownBackupIdException: next_backup_id = "" self.environment.update( { "BARMAN_BACKUP_DIR": backup_info.get_basebackup_directory(), "BARMAN_BACKUP_ID": backup_info.backup_id, "BARMAN_PREVIOUS_ID": previous_backup_id, "BARMAN_NEXT_ID": next_backup_id, "BARMAN_STATUS": backup_info.status, "BARMAN_ERROR": backup_info.error or "", } ) def env_from_wal_info(self, wal_info, full_path=None, error=None): """ Prepare the environment for executing a script :param WalFileInfo wal_info: the backup metadata :param str full_path: override wal_info.fullpath() result :param str|Exception error: An error message in case of failure """ self.environment.update( { "BARMAN_SEGMENT": wal_info.name, "BARMAN_FILE": str( full_path if full_path is not None else wal_info.fullpath(self.backup_manager.server) ), "BARMAN_SIZE": str(wal_info.size), "BARMAN_TIMESTAMP": str(wal_info.time), "BARMAN_COMPRESSION": wal_info.compression or "", "BARMAN_ERROR": force_str(error or ""), } ) def env_from_recover( self, backup_info, dest, tablespaces, remote_command, error=None, **kwargs ): """ Prepare the environment for executing a script :param BackupInfo backup_info: the backup metadata :param str dest: the destination directory :param dict[str,str]|None tablespaces: a tablespace name -> location map (for relocation) :param str|None remote_command: default None. The remote command to recover the base backup, in case of remote backup. :param str|Exception error: An error message in case of failure """ self.env_from_backup_info(backup_info) # Prepare a JSON representation of tablespace map tablespaces_map = "" if tablespaces: tablespaces_map = json.dumps(tablespaces, sort_keys=True) # Prepare a JSON representation of additional recovery options # Skip any empty argument kwargs_filtered = dict([(k, v) for k, v in kwargs.items() if v]) recover_options = "" if kwargs_filtered: recover_options = json.dumps(kwargs_filtered, sort_keys=True) self.environment.update( { "BARMAN_DESTINATION_DIRECTORY": str(dest), "BARMAN_TABLESPACES": tablespaces_map, "BARMAN_REMOTE_COMMAND": str(remote_command or ""), "BARMAN_RECOVER_OPTIONS": recover_options, "BARMAN_ERROR": force_str(error or ""), } ) def run(self): """ Run a a hook script if configured. This method must never throw any exception """ # noinspection PyBroadException try: if self.script: _logger.debug("Attempt to run %s: %s", self.name, self.script) cmd = Command( self.script, env_append=self.environment, path=self.backup_manager.server.path, shell=True, check=False, ) self.exit_status = cmd() if self.exit_status != 0: details = "%s returned %d\nOutput details:\n" % ( self.script, self.exit_status, ) details += cmd.out details += cmd.err _logger.warning(details) else: _logger.debug("%s returned %d", self.script, self.exit_status) return self.exit_status except Exception as e: _logger.exception("Exception running %s", self.name) self.exception = e return None class RetryHookScriptRunner(HookScriptRunner): """ A 'retry' hook script is a special kind of hook script that Barman tries to run indefinitely until it either returns a SUCCESS or ABORT exit code. Retry hook scripts are executed immediately before (pre) and after (post) the command execution. Standard hook scripts are executed immediately before (pre) and after (post) the retry hook scripts. """ # Failed attempts before sleeping for NAP_TIME seconds ATTEMPTS_BEFORE_NAP = 5 # Short break after a failure (in seconds) BREAK_TIME = 3 # Long break (nap, in seconds) after ATTEMPTS_BEFORE_NAP failures NAP_TIME = 60 # ABORT (and STOP) exit code EXIT_ABORT_STOP = 63 # ABORT (and CONTINUE) exit code EXIT_ABORT_CONTINUE = 62 # SUCCESS exit code EXIT_SUCCESS = 0 def __init__(self, backup_manager, name, phase=None, error=None, **extra_env): super(RetryHookScriptRunner, self).__init__( backup_manager, name, phase, error, retry=True, **extra_env ) def run(self): """ Run a a 'retry' hook script, if required by configuration. Barman will retry to run the script indefinitely until it returns a EXIT_SUCCESS, or an EXIT_ABORT_CONTINUE, or an EXIT_ABORT_STOP code. There are BREAK_TIME seconds of sleep between every try. Every ATTEMPTS_BEFORE_NAP failures, Barman will sleep for NAP_TIME seconds. """ # If there is no script, exit if self.script is not None: # Keep track of the number of attempts attempts = 1 while True: # Run the script using the standard hook method (inherited) super(RetryHookScriptRunner, self).run() # Run the script until it returns EXIT_ABORT_CONTINUE, # or an EXIT_ABORT_STOP, or EXIT_SUCCESS if self.exit_status in ( self.EXIT_ABORT_CONTINUE, self.EXIT_ABORT_STOP, self.EXIT_SUCCESS, ): break # Check for the number of attempts if attempts <= self.ATTEMPTS_BEFORE_NAP: attempts += 1 # Take a short break _logger.debug("Retry again in %d seconds", self.BREAK_TIME) time.sleep(self.BREAK_TIME) else: # Reset the attempt number and take a longer nap _logger.debug( "Reached %d failures. Take a nap " "then retry again in %d seconds", self.ATTEMPTS_BEFORE_NAP, self.NAP_TIME, ) attempts = 1 time.sleep(self.NAP_TIME) # Outside the loop check for the exit code. if self.exit_status == self.EXIT_ABORT_CONTINUE: # Warn the user if the script exited with EXIT_ABORT_CONTINUE # Notify EXIT_ABORT_CONTINUE exit status because success and # failures are already managed in the superclass run method _logger.warning( "%s was aborted (got exit status %d, Barman resumes)", self.script, self.exit_status, ) elif self.exit_status == self.EXIT_ABORT_STOP: # Log the error and raise AbortedRetryHookScript exception _logger.error( "%s was aborted (got exit status %d, Barman requested to stop)", self.script, self.exit_status, ) raise AbortedRetryHookScript(self) return self.exit_status barman-2.18/barman/postgres.py0000644000621200062120000016664514172556763014602 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module represents the interface towards a PostgreSQL server. """ import atexit import logging from abc import ABCMeta import psycopg2 from psycopg2.errorcodes import DUPLICATE_OBJECT, OBJECT_IN_USE, UNDEFINED_OBJECT from psycopg2.extensions import STATUS_IN_TRANSACTION, STATUS_READY from psycopg2.extras import DictCursor, NamedTupleCursor from barman.exceptions import ( ConninfoException, PostgresAppNameError, PostgresConnectionError, PostgresDuplicateReplicationSlot, PostgresException, PostgresInvalidReplicationSlot, PostgresIsInRecovery, PostgresReplicationSlotInUse, PostgresReplicationSlotsFull, BackupFunctionsAccessRequired, PostgresSuperuserRequired, PostgresUnsupportedFeature, ) from barman.infofile import Tablespace from barman.postgres_plumbing import function_name_map from barman.remote_status import RemoteStatusMixin from barman.utils import force_str, simplify_version, with_metaclass from barman.xlog import DEFAULT_XLOG_SEG_SIZE # This is necessary because the CONFIGURATION_LIMIT_EXCEEDED constant # has been added in psycopg2 2.5, but Barman supports version 2.4.2+ so # in case of import error we declare a constant providing the correct value. try: from psycopg2.errorcodes import CONFIGURATION_LIMIT_EXCEEDED except ImportError: CONFIGURATION_LIMIT_EXCEEDED = "53400" _logger = logging.getLogger(__name__) _live_connections = [] """ List of connections to be closed at the interpreter shutdown """ @atexit.register def _atexit(): """ Ensure that all the connections are correctly closed at interpreter shutdown """ # Take a copy of the list because the conn.close() method modify it for conn in list(_live_connections): _logger.warning( "Forcing %s cleanup during process shut down.", conn.__class__.__name__ ) conn.close() class PostgreSQL(with_metaclass(ABCMeta, RemoteStatusMixin)): """ This abstract class represents a generic interface to a PostgreSQL server. """ CHECK_QUERY = "SELECT 1" def __init__(self, conninfo): """ Abstract base class constructor for PostgreSQL interface. :param str conninfo: Connection information (aka DSN) """ super(PostgreSQL, self).__init__() self.conninfo = conninfo self._conn = None self.allow_reconnect = True # Build a dictionary with connection info parameters # This is mainly used to speed up search in conninfo try: self.conn_parameters = self.parse_dsn(conninfo) except (ValueError, TypeError) as e: _logger.debug(e) raise ConninfoException( 'Cannot connect to postgres: "%s" ' "is not a valid connection string" % conninfo ) @staticmethod def parse_dsn(dsn): """ Parse connection parameters from 'conninfo' :param str dsn: Connection information (aka DSN) :rtype: dict[str,str] """ # TODO: this might be made more robust in the future return dict(x.split("=", 1) for x in dsn.split()) @staticmethod def encode_dsn(parameters): """ Build a connection string from a dictionary of connection parameters :param dict[str,str] parameters: Connection parameters :rtype: str """ # TODO: this might be made more robust in the future return " ".join(["%s=%s" % (k, v) for k, v in sorted(parameters.items())]) def get_connection_string(self, application_name=None): """ Return the connection string, adding the application_name parameter if requested, unless already defined by user in the connection string :param str application_name: the application_name to add :return str: the connection string """ conn_string = self.conninfo # check if the application name is already defined by user if application_name and "application_name" not in self.conn_parameters: # Then add the it to the connection string conn_string += " application_name=%s" % application_name # adopt a secure schema-usage pattern. See: # https://www.postgresql.org/docs/current/libpq-connect.html if "options" not in self.conn_parameters: conn_string += " options=-csearch_path=" return conn_string def connect(self): """ Generic function for Postgres connection (using psycopg2) """ if not self._check_connection(): try: self._conn = psycopg2.connect(self.conninfo) self._conn.autocommit = True # If psycopg2 fails to connect to the host, # raise the appropriate exception except psycopg2.DatabaseError as e: raise PostgresConnectionError(force_str(e).strip()) # Register the connection to the list of live connections _live_connections.append(self) return self._conn def _check_connection(self): """ Return false if the connection is broken :rtype: bool """ # If the connection is not present return False if not self._conn: return False # Check if the connection works by running 'SELECT 1' cursor = None initial_status = None try: initial_status = self._conn.status cursor = self._conn.cursor() cursor.execute(self.CHECK_QUERY) # Rollback if initial status was IDLE because the CHECK QUERY # has started a new transaction. if initial_status == STATUS_READY: self._conn.rollback() except psycopg2.DatabaseError: # Connection is broken, so we need to reconnect self.close() # Raise an error if reconnect is not allowed if not self.allow_reconnect: raise PostgresConnectionError( "Connection lost, reconnection not allowed" ) return False finally: if cursor: cursor.close() return True def close(self): """ Close the connection to PostgreSQL """ if self._conn: # If the connection is still alive, rollback and close it if not self._conn.closed: if self._conn.status == STATUS_IN_TRANSACTION: self._conn.rollback() self._conn.close() # Remove the connection from the live connections list self._conn = None _live_connections.remove(self) def _cursor(self, *args, **kwargs): """ Return a cursor """ conn = self.connect() return conn.cursor(*args, **kwargs) @property def server_version(self): """ Version of PostgreSQL (returned by psycopg2) """ conn = self.connect() return conn.server_version @property def server_txt_version(self): """ Human readable version of PostgreSQL (calculated from server_version) :rtype: str|None """ try: conn = self.connect() major = int(conn.server_version / 10000) minor = int(conn.server_version / 100 % 100) patch = int(conn.server_version % 100) if major < 10: return "%d.%d.%d" % (major, minor, patch) if minor != 0: _logger.warning( "Unexpected non zero minor version %s in %s", minor, conn.server_version, ) return "%d.%d" % (major, patch) except PostgresConnectionError as e: _logger.debug( "Error retrieving PostgreSQL version: %s", force_str(e).strip() ) return None @property def server_major_version(self): """ PostgreSQL major version (calculated from server_txt_version) :rtype: str|None """ result = self.server_txt_version if result is not None: return simplify_version(result) return None class StreamingConnection(PostgreSQL): """ This class represents a streaming connection to a PostgreSQL server. """ CHECK_QUERY = "IDENTIFY_SYSTEM" def __init__(self, conninfo): """ Streaming connection constructor :param str conninfo: Connection information (aka DSN) """ super(StreamingConnection, self).__init__(conninfo) # Make sure we connect using the 'replication' option which # triggers streaming replication protocol communication self.conn_parameters["replication"] = "true" # ensure that the datestyle is set to iso, working around an # issue in some psycopg2 versions self.conn_parameters["options"] = "-cdatestyle=iso" # Override 'dbname' parameter. This operation is required to mimic # the behaviour of pg_receivexlog and pg_basebackup self.conn_parameters["dbname"] = "replication" # Rebuild the conninfo string from the modified parameter lists self.conninfo = self.encode_dsn(self.conn_parameters) def connect(self): """ Connect to the PostgreSQL server. It reuses an existing connection. :returns: the connection to the server """ if self._check_connection(): return self._conn # Build a connection self._conn = super(StreamingConnection, self).connect() return self._conn def fetch_remote_status(self): """ Returns the status of the connection to the PostgreSQL server. This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ result = dict.fromkeys( ( "connection_error", "streaming_supported", "streaming", "streaming_systemid", "timeline", "xlogpos", ), None, ) try: # If the server is too old to support `pg_receivexlog`, # exit immediately. # This needs to be protected by the try/except because # `self.server_version` can raise a PostgresConnectionError if self.server_version < 90200: result["streaming_supported"] = False return result result["streaming_supported"] = True # Execute a IDENTIFY_SYSYEM to check the connection cursor = self._cursor() cursor.execute("IDENTIFY_SYSTEM") row = cursor.fetchone() # If something has been returned, barman is connected # to a replication backend if row: result["streaming"] = True # IDENTIFY_SYSTEM always returns at least two values result["streaming_systemid"] = row[0] result["timeline"] = row[1] # PostgreSQL 9.1+ returns also the current xlog flush location if len(row) > 2: result["xlogpos"] = row[2] except psycopg2.ProgrammingError: # This is not a streaming connection result["streaming"] = False except PostgresConnectionError as e: result["connection_error"] = force_str(e).strip() _logger.warning( "Error retrieving PostgreSQL status: %s", force_str(e).strip() ) return result def create_physical_repslot(self, slot_name): """ Create a physical replication slot using the streaming connection :param str slot_name: Replication slot name """ cursor = self._cursor() try: # In the following query, the slot name is directly passed # to the CREATE_REPLICATION_SLOT command, without any # quoting. This is a characteristic of the streaming # connection, otherwise if will fail with a generic # "syntax error" cursor.execute("CREATE_REPLICATION_SLOT %s PHYSICAL" % slot_name) _logger.info("Replication slot '%s' successfully created", slot_name) except psycopg2.DatabaseError as exc: if exc.pgcode == DUPLICATE_OBJECT: # A replication slot with the same name exists raise PostgresDuplicateReplicationSlot() elif exc.pgcode == CONFIGURATION_LIMIT_EXCEEDED: # Unable to create a new physical replication slot. # All slots are full. raise PostgresReplicationSlotsFull() else: raise PostgresException(force_str(exc).strip()) def drop_repslot(self, slot_name): """ Drop a physical replication slot using the streaming connection :param str slot_name: Replication slot name """ cursor = self._cursor() try: # In the following query, the slot name is directly passed # to the DROP_REPLICATION_SLOT command, without any # quoting. This is a characteristic of the streaming # connection, otherwise if will fail with a generic # "syntax error" cursor.execute("DROP_REPLICATION_SLOT %s" % slot_name) _logger.info("Replication slot '%s' successfully dropped", slot_name) except psycopg2.DatabaseError as exc: if exc.pgcode == UNDEFINED_OBJECT: # A replication slot with the that name does not exist raise PostgresInvalidReplicationSlot() if exc.pgcode == OBJECT_IN_USE: # The replication slot is still in use raise PostgresReplicationSlotInUse() else: raise PostgresException(force_str(exc).strip()) class PostgreSQLConnection(PostgreSQL): """ This class represents a standard client connection to a PostgreSQL server. """ # Streaming replication client types STANDBY = 1 WALSTREAMER = 2 ANY_STREAMING_CLIENT = (STANDBY, WALSTREAMER) def __init__( self, conninfo, immediate_checkpoint=False, slot_name=None, application_name="barman", ): """ PostgreSQL connection constructor. :param str conninfo: Connection information (aka DSN) :param bool immediate_checkpoint: Whether to do an immediate checkpoint when start a backup :param str|None slot_name: Replication slot name """ super(PostgreSQLConnection, self).__init__(conninfo) self.immediate_checkpoint = immediate_checkpoint self.slot_name = slot_name self.application_name = application_name self.configuration_files = None def connect(self): """ Connect to the PostgreSQL server. It reuses an existing connection. """ if self._check_connection(): return self._conn self._conn = super(PostgreSQLConnection, self).connect() server_version = self._conn.server_version use_app_name = "application_name" in self.conn_parameters if server_version >= 90000 and not use_app_name: try: cur = self._conn.cursor() # Do not use parameter substitution with SET cur.execute("SET application_name TO %s" % self.application_name) cur.close() # If psycopg2 fails to set the application name, # raise the appropriate exception except psycopg2.ProgrammingError as e: raise PostgresAppNameError(force_str(e).strip()) return self._conn @property def server_txt_version(self): """ Human readable version of PostgreSQL (returned by the server). Note: The return value of this function is used when composing include patterns which are passed to rsync when copying tablespaces. If the value does not exactly match the PostgreSQL version then Barman may fail to copy tablespace files during a backup. """ try: cur = self._cursor() cur.execute("SELECT version()") version_string = cur.fetchone()[0] platform, version = version_string.split()[:2] # EPAS <= 10 will return a version string which starts with # EnterpriseDB followed by the PostgreSQL version with an # additional version field. This additional field must be discarded # so that we return the exact PostgreSQL version. Later versions of # EPAS report the PostgreSQL version directly so do not need # special handling. if platform == "EnterpriseDB": return ".".join(version.split(".")[:-1]) else: return version except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving PostgreSQL version: %s", force_str(e).strip() ) return None @property def has_pgespresso(self): """ Returns true if the `pgespresso` extension is available """ try: # pg_extension is only available from Postgres 9.1+ if self.server_version < 90100: return False cur = self._cursor() cur.execute( "SELECT count(*) FROM pg_extension WHERE extname = 'pgespresso'" ) q_result = cur.fetchone()[0] return q_result > 0 except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving pgespresso information: %s", force_str(e).strip() ) return None @property def is_in_recovery(self): """ Returns true if PostgreSQL server is in recovery mode (hot standby) """ try: # pg_is_in_recovery is only available from Postgres 9.0+ if self.server_version < 90000: return False cur = self._cursor() cur.execute("SELECT pg_is_in_recovery()") return cur.fetchone()[0] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error calling pg_is_in_recovery() function: %s", force_str(e).strip() ) return None @property def is_superuser(self): """ Returns true if current user has superuser privileges """ try: cur = self._cursor() cur.execute("SELECT usesuper FROM pg_user WHERE usename = CURRENT_USER") return cur.fetchone()[0] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error calling is_superuser() function: %s", force_str(e).strip() ) return None @property def has_backup_privileges(self): """ Returns true if current user is superuser or, for PostgreSQL 10 or above, is a standard user that has grants to read server settings and to execute all the functions needed for exclusive/concurrent backup control and WAL control. """ # pg_monitor / pg_read_all_settings only available from v10 if self.server_version < 100000: return self.is_superuser backup_check_query = """ SELECT usesuper OR ( ( pg_has_role(CURRENT_USER, 'pg_monitor', 'MEMBER') OR ( pg_has_role(CURRENT_USER, 'pg_read_all_settings', 'MEMBER') AND pg_has_role(CURRENT_USER, 'pg_read_all_stats', 'MEMBER') ) ) AND has_function_privilege( CURRENT_USER, 'pg_start_backup(text,bool,bool)', 'EXECUTE') AND ( has_function_privilege( CURRENT_USER, 'pg_stop_backup()', 'EXECUTE') OR has_function_privilege( CURRENT_USER, 'pg_stop_backup(bool,bool)', 'EXECUTE') ) AND has_function_privilege( CURRENT_USER, 'pg_switch_wal()', 'EXECUTE') AND has_function_privilege( CURRENT_USER, 'pg_create_restore_point(text)', 'EXECUTE') ) FROM pg_user WHERE usename = CURRENT_USER """ try: cur = self._cursor() cur.execute(backup_check_query) return cur.fetchone()[0] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error checking privileges for functions needed for backups: %s", force_str(e).strip(), ) return None @property def current_xlog_info(self): """ Get detailed information about the current WAL position in PostgreSQL. This method returns a dictionary containing the following data: * location * file_name * file_offset * timestamp When executed on a standby server file_name and file_offset are always None :rtype: psycopg2.extras.DictRow """ try: cur = self._cursor(cursor_factory=DictCursor) if not self.is_in_recovery: cur.execute( "SELECT location, " "({pg_walfile_name_offset}(location)).*, " "CURRENT_TIMESTAMP AS timestamp " "FROM {pg_current_wal_lsn}() AS location".format(**self.name_map) ) return cur.fetchone() else: cur.execute( "SELECT location, " "NULL AS file_name, " "NULL AS file_offset, " "CURRENT_TIMESTAMP AS timestamp " "FROM {pg_last_wal_replay_lsn}() AS location".format( **self.name_map ) ) return cur.fetchone() except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving current xlog detailed information: %s", force_str(e).strip(), ) return None @property def current_xlog_file_name(self): """ Get current WAL file from PostgreSQL :return str: current WAL file in PostgreSQL """ current_xlog_info = self.current_xlog_info if current_xlog_info is not None: return current_xlog_info["file_name"] return None @property def xlog_segment_size(self): """ Retrieve the size of one WAL file. In PostgreSQL 11, users will be able to change the WAL size at runtime. Up to PostgreSQL 10, included, the WAL size can be changed at compile time :return: The wal size (In bytes) """ # Prior to PostgreSQL 8.4, the wal segment size was not configurable, # even in compilation if self.server_version < 80400: return DEFAULT_XLOG_SEG_SIZE try: cur = self._cursor(cursor_factory=DictCursor) # We can't use the `get_setting` method here, because it # use `SHOW`, returning an human readable value such as "16MB", # while we prefer a raw value such as 16777216. cur.execute("SELECT setting FROM pg_settings WHERE name='wal_segment_size'") result = cur.fetchone() wal_segment_size = int(result[0]) # Prior to PostgreSQL 11, the wal segment size is returned in # blocks if self.server_version < 110000: cur.execute( "SELECT setting FROM pg_settings WHERE name='wal_block_size'" ) result = cur.fetchone() wal_block_size = int(result[0]) wal_segment_size *= wal_block_size return wal_segment_size except ValueError as e: _logger.error( "Error retrieving current xlog segment size: %s", force_str(e).strip(), ) return None @property def current_xlog_location(self): """ Get current WAL location from PostgreSQL :return str: current WAL location in PostgreSQL """ current_xlog_info = self.current_xlog_info if current_xlog_info is not None: return current_xlog_info["location"] return None @property def current_size(self): """ Returns the total size of the PostgreSQL server (requires superuser or pg_read_all_stats) """ if not self.has_backup_privileges: return None try: cur = self._cursor() cur.execute("SELECT sum(pg_tablespace_size(oid)) FROM pg_tablespace") return cur.fetchone()[0] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving PostgreSQL total size: %s", force_str(e).strip() ) return None @property def archive_timeout(self): """ Retrieve the archive_timeout setting in PostgreSQL :return: The archive timeout (in seconds) """ try: cur = self._cursor(cursor_factory=DictCursor) # We can't use the `get_setting` method here, because it # uses `SHOW`, returning an human readable value such as "5min", # while we prefer a raw value such as 300. cur.execute("SELECT setting FROM pg_settings WHERE name='archive_timeout'") result = cur.fetchone() archive_timeout = int(result[0]) return archive_timeout except ValueError as e: _logger.error("Error retrieving archive_timeout: %s", force_str(e).strip()) return None @property def checkpoint_timeout(self): """ Retrieve the checkpoint_timeout setting in PostgreSQL :return: The checkpoint timeout (in seconds) """ try: cur = self._cursor(cursor_factory=DictCursor) # We can't use the `get_setting` method here, because it # uses `SHOW`, returning an human readable value such as "5min", # while we prefer a raw value such as 300. cur.execute( "SELECT setting FROM pg_settings WHERE name='checkpoint_timeout'" ) result = cur.fetchone() checkpoint_timeout = int(result[0]) return checkpoint_timeout except ValueError as e: _logger.error( "Error retrieving checkpoint_timeout: %s", force_str(e).strip() ) return None def get_archiver_stats(self): """ This method gathers statistics from pg_stat_archiver. Only for Postgres 9.4+ or greater. If not available, returns None. :return dict|None: a dictionary containing Postgres statistics from pg_stat_archiver or None """ try: # pg_stat_archiver is only available from Postgres 9.4+ if self.server_version < 90400: return None cur = self._cursor(cursor_factory=DictCursor) # Select from pg_stat_archiver statistics view, # retrieving statistics about WAL archiver process activity, # also evaluating if the server is archiving without issues # and the archived WALs per second rate. # # We are using current_settings to check for archive_mode=always. # current_setting does normalise its output so we can just # check for 'always' settings using a direct string # comparison cur.execute( "SELECT *, " "current_setting('archive_mode') IN ('on', 'always') " "AND (last_failed_wal IS NULL " "OR last_failed_wal LIKE '%.history' " "AND substring(last_failed_wal from 1 for 8) " "<= substring(last_archived_wal from 1 for 8) " "OR last_failed_time <= last_archived_time) " "AS is_archiving, " "CAST (archived_count AS NUMERIC) " "/ EXTRACT (EPOCH FROM age(now(), stats_reset)) " "AS current_archived_wals_per_second " "FROM pg_stat_archiver" ) return cur.fetchone() except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving pg_stat_archive data: %s", force_str(e).strip() ) return None def fetch_remote_status(self): """ Get the status of the PostgreSQL server This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ # PostgreSQL settings to get from the server (requiring superuser) pg_superuser_settings = ["data_directory"] # PostgreSQL settings to get from the server pg_settings = [] pg_query_keys = [ "server_txt_version", "is_superuser", "is_in_recovery", "current_xlog", "pgespresso_installed", "replication_slot_support", "replication_slot", "synchronous_standby_names", "postgres_systemid", ] # Initialise the result dictionary setting all the values to None result = dict.fromkeys( pg_superuser_settings + pg_settings + pg_query_keys, None ) try: # Retrieve wal_level, hot_standby and max_wal_senders # only if version is >= 9.0 if self.server_version >= 90000: pg_settings.append("wal_level") pg_settings.append("hot_standby") pg_settings.append("max_wal_senders") # Retrieve wal_keep_segments from version 9.0 onwards, until # version 13.0, where it was renamed to wal_keep_size if self.server_version < 130000: pg_settings.append("wal_keep_segments") else: pg_settings.append("wal_keep_size") if self.server_version >= 90300: pg_settings.append("data_checksums") if self.server_version >= 90400: pg_settings.append("max_replication_slots") if self.server_version >= 90500: pg_settings.append("wal_compression") # retrieves superuser settings if self.has_backup_privileges: for name in pg_superuser_settings: result[name] = self.get_setting(name) # retrieves standard settings for name in pg_settings: result[name] = self.get_setting(name) result["is_superuser"] = self.is_superuser result["has_backup_privileges"] = self.has_backup_privileges result["is_in_recovery"] = self.is_in_recovery result["server_txt_version"] = self.server_txt_version result["pgespresso_installed"] = self.has_pgespresso current_xlog_info = self.current_xlog_info if current_xlog_info: result["current_lsn"] = current_xlog_info["location"] result["current_xlog"] = current_xlog_info["file_name"] else: result["current_lsn"] = None result["current_xlog"] = None result["current_size"] = self.current_size result["archive_timeout"] = self.archive_timeout result["checkpoint_timeout"] = self.checkpoint_timeout result["xlog_segment_size"] = self.xlog_segment_size result.update(self.get_configuration_files()) # Retrieve the replication_slot status result["replication_slot_support"] = False if self.server_version >= 90400: result["replication_slot_support"] = True if self.slot_name is not None: result["replication_slot"] = self.get_replication_slot( self.slot_name ) # Retrieve the list of synchronous standby names result["synchronous_standby_names"] = [] if self.server_version >= 90100: result[ "synchronous_standby_names" ] = self.get_synchronous_standby_names() if self.server_version >= 90600: result["postgres_systemid"] = self.get_systemid() except (PostgresConnectionError, psycopg2.Error) as e: _logger.warning( "Error retrieving PostgreSQL status: %s", force_str(e).strip() ) return result def get_systemid(self): """ Get a Postgres instance systemid """ if self.server_version < 90600: return try: cur = self._cursor() cur.execute("SELECT system_identifier::text FROM pg_control_system()") return cur.fetchone()[0] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving PostgreSQL system Id: %s", force_str(e).strip() ) return None def get_setting(self, name): """ Get a Postgres setting with a given name :param name: a parameter name """ try: cur = self._cursor() cur.execute('SHOW "%s"' % name.replace('"', '""')) return cur.fetchone()[0] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving PostgreSQL setting '%s': %s", name.replace('"', '""'), force_str(e).strip(), ) return None def get_tablespaces(self): """ Returns a list of tablespaces or None if not present """ try: cur = self._cursor() if self.server_version >= 90200: cur.execute( "SELECT spcname, oid, " "pg_tablespace_location(oid) AS spclocation " "FROM pg_tablespace " "WHERE pg_tablespace_location(oid) != ''" ) else: cur.execute( "SELECT spcname, oid, spclocation " "FROM pg_tablespace WHERE spclocation != ''" ) # Generate a list of tablespace objects return [Tablespace._make(item) for item in cur.fetchall()] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving PostgreSQL tablespaces: %s", force_str(e).strip() ) return None def get_configuration_files(self): """ Get postgres configuration files or an empty dictionary in case of error :rtype: dict """ if self.configuration_files: return self.configuration_files try: self.configuration_files = {} cur = self._cursor() cur.execute( "SELECT name, setting FROM pg_settings " "WHERE name IN ('config_file', 'hba_file', 'ident_file')" ) for cname, cpath in cur.fetchall(): self.configuration_files[cname] = cpath # Retrieve additional configuration files # If PostgreSQL is older than 8.4 disable this check if self.server_version >= 80400: cur.execute( "SELECT DISTINCT sourcefile AS included_file " "FROM pg_settings " "WHERE sourcefile IS NOT NULL " "AND sourcefile NOT IN " "(SELECT setting FROM pg_settings " "WHERE name = 'config_file') " "ORDER BY 1" ) # Extract the values from the containing single element tuples included_files = [included_file for included_file, in cur.fetchall()] if len(included_files) > 0: self.configuration_files["included_files"] = included_files except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving PostgreSQL configuration files location: %s", force_str(e).strip(), ) self.configuration_files = {} return self.configuration_files def create_restore_point(self, target_name): """ Create a restore point with the given target name The method executes the pg_create_restore_point() function through a PostgreSQL connection. Only for Postgres versions >= 9.1 when not in replication. If requirements are not met, the operation is skipped. :param str target_name: name of the restore point :returns: the restore point LSN :rtype: str|None """ if self.server_version < 90100: return None # Not possible if on a standby # Called inside the pg_connect context to reuse the connection if self.is_in_recovery: return None try: cur = self._cursor() cur.execute("SELECT pg_create_restore_point(%s)", [target_name]) _logger.info("Restore point '%s' successfully created", target_name) return cur.fetchone()[0] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error issuing pg_create_restore_point() command: %s", force_str(e).strip(), ) return None def start_exclusive_backup(self, label): """ Calls pg_start_backup() on the PostgreSQL server This method returns a dictionary containing the following data: * location * file_name * file_offset * timestamp :param str label: descriptive string to identify the backup :rtype: psycopg2.extras.DictRow """ try: conn = self.connect() # Rollback to release the transaction, as the pg_start_backup # invocation can last up to PostgreSQL's checkpoint_timeout conn.rollback() # Start an exclusive backup cur = conn.cursor(cursor_factory=DictCursor) if self.server_version < 80400: cur.execute( "SELECT location, " "({pg_walfile_name_offset}(location)).*, " "now() AS timestamp " "FROM pg_start_backup(%s) AS location".format(**self.name_map), (label,), ) else: cur.execute( "SELECT location, " "({pg_walfile_name_offset}(location)).*, " "now() AS timestamp " "FROM pg_start_backup(%s,%s) AS location".format(**self.name_map), (label, self.immediate_checkpoint), ) start_row = cur.fetchone() # Rollback to release the transaction, as the connection # is to be retained until the end of backup conn.rollback() return start_row except (PostgresConnectionError, psycopg2.Error) as e: msg = "pg_start_backup(): %s" % force_str(e).strip() _logger.debug(msg) raise PostgresException(msg) def start_concurrent_backup(self, label): """ Calls pg_start_backup on the PostgreSQL server using the API introduced with version 9.6 This method returns a dictionary containing the following data: * location * timeline * timestamp :param str label: descriptive string to identify the backup :rtype: psycopg2.extras.DictRow """ try: conn = self.connect() # Rollback to release the transaction, as the pg_start_backup # invocation can last up to PostgreSQL's checkpoint_timeout conn.rollback() # Start the backup using the api introduced in postgres 9.6 cur = conn.cursor(cursor_factory=DictCursor) cur.execute( "SELECT location, " "(SELECT timeline_id " "FROM pg_control_checkpoint()) AS timeline, " "now() AS timestamp " "FROM pg_start_backup(%s, %s, FALSE) AS location", (label, self.immediate_checkpoint), ) start_row = cur.fetchone() # Rollback to release the transaction, as the connection # is to be retained until the end of backup conn.rollback() return start_row except (PostgresConnectionError, psycopg2.Error) as e: msg = "pg_start_backup command: %s" % (force_str(e).strip(),) _logger.debug(msg) raise PostgresException(msg) def stop_exclusive_backup(self): """ Calls pg_stop_backup() on the PostgreSQL server This method returns a dictionary containing the following data: * location * file_name * file_offset * timestamp :rtype: psycopg2.extras.DictRow """ try: conn = self.connect() # Rollback to release the transaction, as the pg_stop_backup # invocation could will wait until the current WAL file is shipped conn.rollback() # Stop the backup cur = conn.cursor(cursor_factory=DictCursor) cur.execute( "SELECT location, " "({pg_walfile_name_offset}(location)).*, " "now() AS timestamp " "FROM pg_stop_backup() AS location".format(**self.name_map) ) return cur.fetchone() except (PostgresConnectionError, psycopg2.Error) as e: msg = "Error issuing pg_stop_backup command: %s" % force_str(e).strip() _logger.debug(msg) raise PostgresException( "Cannot terminate exclusive backup. " "You might have to manually execute pg_stop_backup " "on your PostgreSQL server" ) def stop_concurrent_backup(self): """ Calls pg_stop_backup on the PostgreSQL server using the API introduced with version 9.6 This method returns a dictionary containing the following data: * location * timeline * backup_label * timestamp :rtype: psycopg2.extras.DictRow """ try: conn = self.connect() # Rollback to release the transaction, as the pg_stop_backup # invocation could will wait until the current WAL file is shipped conn.rollback() # Stop the backup using the api introduced with version 9.6 cur = conn.cursor(cursor_factory=DictCursor) cur.execute( "SELECT end_row.lsn AS location, " "(SELECT CASE WHEN pg_is_in_recovery() " "THEN min_recovery_end_timeline ELSE timeline_id END " "FROM pg_control_checkpoint(), pg_control_recovery()" ") AS timeline, " "end_row.labelfile AS backup_label, " "now() AS timestamp FROM pg_stop_backup(FALSE) AS end_row" ) return cur.fetchone() except (PostgresConnectionError, psycopg2.Error) as e: msg = "Error issuing pg_stop_backup command: %s" % force_str(e).strip() _logger.debug(msg) raise PostgresException(msg) def pgespresso_start_backup(self, label): """ Execute a pgespresso_start_backup This method returns a dictionary containing the following data: * backup_label * timestamp :param str label: descriptive string to identify the backup :rtype: psycopg2.extras.DictRow """ try: conn = self.connect() # Rollback to release the transaction, # as the pgespresso_start_backup invocation can last # up to PostgreSQL's checkpoint_timeout conn.rollback() # Start the concurrent backup using pgespresso cur = conn.cursor(cursor_factory=DictCursor) cur.execute( "SELECT pgespresso_start_backup(%s,%s) AS backup_label, " "now() AS timestamp", (label, self.immediate_checkpoint), ) start_row = cur.fetchone() # Rollback to release the transaction, as the connection # is to be retained until the end of backup conn.rollback() return start_row except (PostgresConnectionError, psycopg2.Error) as e: msg = "pgespresso_start_backup(): %s" % force_str(e).strip() _logger.debug(msg) raise PostgresException(msg) def pgespresso_stop_backup(self, backup_label): """ Execute a pgespresso_stop_backup This method returns a dictionary containing the following data: * end_wal * timestamp :param str backup_label: backup label as returned by pgespress_start_backup :rtype: psycopg2.extras.DictRow """ try: conn = self.connect() # Issue a rollback to release any unneeded lock conn.rollback() cur = conn.cursor(cursor_factory=DictCursor) cur.execute( "SELECT pgespresso_stop_backup(%s) AS end_wal, now() AS timestamp", (backup_label,), ) return cur.fetchone() except (PostgresConnectionError, psycopg2.Error) as e: msg = "Error issuing pgespresso_stop_backup() command: %s" % ( force_str(e).strip() ) _logger.debug(msg) raise PostgresException( "%s\n" "HINT: You might have to manually execute " "pgespresso_abort_backup() on your PostgreSQL " "server" % msg ) def switch_wal(self): """ Execute a pg_switch_wal() To be SURE of the switch of a xlog, we collect the xlogfile name before and after the switch. The method returns the just closed xlog file name if the current xlog file has changed, it returns an empty string otherwise. The method returns None if something went wrong during the execution of the pg_switch_wal command. :rtype: str|None """ try: conn = self.connect() if not self.has_backup_privileges: raise BackupFunctionsAccessRequired( "Postgres user '%s' is missing required privileges " '(see "Preliminary steps" in the Barman manual)' % self.conn_parameters.get("user") ) # If this server is in recovery there is nothing to do if self.is_in_recovery: raise PostgresIsInRecovery() cur = conn.cursor() # Collect the xlog file name before the switch cur.execute( "SELECT {pg_walfile_name}(" "{pg_current_wal_insert_lsn}())".format(**self.name_map) ) pre_switch = cur.fetchone()[0] # Switch cur.execute( "SELECT {pg_walfile_name}({pg_switch_wal}())".format(**self.name_map) ) # Collect the xlog file name after the switch cur.execute( "SELECT {pg_walfile_name}(" "{pg_current_wal_insert_lsn}())".format(**self.name_map) ) post_switch = cur.fetchone()[0] if pre_switch < post_switch: return pre_switch else: return "" except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error issuing {pg_switch_wal}() command: %s".format(**self.name_map), force_str(e).strip(), ) return None def checkpoint(self): """ Execute a checkpoint """ try: conn = self.connect() # Requires superuser privilege if not self.is_superuser: raise PostgresSuperuserRequired() cur = conn.cursor() cur.execute("CHECKPOINT") except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug("Error issuing CHECKPOINT: %s", force_str(e).strip()) def get_replication_stats(self, client_type=STANDBY): """ Returns streaming replication information """ try: cur = self._cursor(cursor_factory=NamedTupleCursor) if not self.has_backup_privileges: raise BackupFunctionsAccessRequired( "Postgres user '%s' is missing required privileges " '(see "Preliminary steps" in the Barman manual)' % self.conn_parameters.get("user") ) # pg_stat_replication is a system view that contains one # row per WAL sender process with information about the # replication status of a standby server. It has been # introduced in PostgreSQL 9.1. Current fields are: # # - pid (procpid in 9.1) # - usesysid # - usename # - application_name # - client_addr # - client_hostname # - client_port # - backend_start # - backend_xmin (9.4+) # - state # - sent_lsn (sent_location before 10) # - write_lsn (write_location before 10) # - flush_lsn (flush_location before 10) # - replay_lsn (replay_location before 10) # - sync_priority # - sync_state # if self.server_version < 90100: raise PostgresUnsupportedFeature("9.1") from_repslot = "" where_clauses = [] if self.server_version >= 100000: # Current implementation (10+) what = "r.*, rs.slot_name" # Look for replication slot name from_repslot = ( "LEFT JOIN pg_replication_slots rs ON (r.pid = rs.active_pid) " ) where_clauses += ["(rs.slot_type IS NULL OR rs.slot_type = 'physical')"] elif self.server_version >= 90500: # PostgreSQL 9.5/9.6 what = ( "pid, " "usesysid, " "usename, " "application_name, " "client_addr, " "client_hostname, " "client_port, " "backend_start, " "backend_xmin, " "state, " "sent_location AS sent_lsn, " "write_location AS write_lsn, " "flush_location AS flush_lsn, " "replay_location AS replay_lsn, " "sync_priority, " "sync_state, " "rs.slot_name" ) # Look for replication slot name from_repslot = ( "LEFT JOIN pg_replication_slots rs ON (r.pid = rs.active_pid) " ) where_clauses += ["(rs.slot_type IS NULL OR rs.slot_type = 'physical')"] elif self.server_version >= 90400: # PostgreSQL 9.4 what = ( "pid, " "usesysid, " "usename, " "application_name, " "client_addr, " "client_hostname, " "client_port, " "backend_start, " "backend_xmin, " "state, " "sent_location AS sent_lsn, " "write_location AS write_lsn, " "flush_location AS flush_lsn, " "replay_location AS replay_lsn, " "sync_priority, " "sync_state" ) elif self.server_version >= 90200: # PostgreSQL 9.2/9.3 what = ( "pid, " "usesysid, " "usename, " "application_name, " "client_addr, " "client_hostname, " "client_port, " "backend_start, " "CAST (NULL AS xid) AS backend_xmin, " "state, " "sent_location AS sent_lsn, " "write_location AS write_lsn, " "flush_location AS flush_lsn, " "replay_location AS replay_lsn, " "sync_priority, " "sync_state" ) else: # PostgreSQL 9.1 what = ( "procpid AS pid, " "usesysid, " "usename, " "application_name, " "client_addr, " "client_hostname, " "client_port, " "backend_start, " "CAST (NULL AS xid) AS backend_xmin, " "state, " "sent_location AS sent_lsn, " "write_location AS write_lsn, " "flush_location AS flush_lsn, " "replay_location AS replay_lsn, " "sync_priority, " "sync_state" ) # Streaming client if client_type == self.STANDBY: # Standby server where_clauses += ["{replay_lsn} IS NOT NULL".format(**self.name_map)] elif client_type == self.WALSTREAMER: # WAL streamer where_clauses += ["{replay_lsn} IS NULL".format(**self.name_map)] if where_clauses: where = "WHERE %s " % " AND ".join(where_clauses) else: where = "" # Execute the query cur.execute( "SELECT %s, " "pg_is_in_recovery() AS is_in_recovery, " "CASE WHEN pg_is_in_recovery() " " THEN {pg_last_wal_receive_lsn}() " " ELSE {pg_current_wal_lsn}() " "END AS current_lsn " "FROM pg_stat_replication r " "%s" "%s" "ORDER BY sync_state DESC, sync_priority".format(**self.name_map) % (what, from_repslot, where) ) # Generate a list of standby objects return cur.fetchall() except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving status of standby servers: %s", force_str(e).strip() ) return None def get_replication_slot(self, slot_name): """ Retrieve from the PostgreSQL server a physical replication slot with a specific slot_name. This method returns a dictionary containing the following data: * slot_name * active * restart_lsn :param str slot_name: the replication slot name :rtype: psycopg2.extras.DictRow """ if self.server_version < 90400: # Raise exception if replication slot are not supported # by PostgreSQL version raise PostgresUnsupportedFeature("9.4") else: cur = self._cursor(cursor_factory=NamedTupleCursor) try: cur.execute( "SELECT slot_name, " "active, " "restart_lsn " "FROM pg_replication_slots " "WHERE slot_type = 'physical' " "AND slot_name = '%s'" % slot_name ) # Retrieve the replication slot information return cur.fetchone() except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug( "Error retrieving replication_slots: %s", force_str(e).strip() ) raise def get_synchronous_standby_names(self): """ Retrieve the list of named synchronous standby servers from PostgreSQL This method returns a list of names :return list: synchronous standby names """ if self.server_version < 90100: # Raise exception if synchronous replication is not supported raise PostgresUnsupportedFeature("9.1") else: synchronous_standby_names = self.get_setting("synchronous_standby_names") # Return empty list if not defined if synchronous_standby_names is None: return [] # Normalise the list of sync standby names # On PostgreSQL 9.6 it is possible to specify the number of # required synchronous standby using this format: # n (name1, name2, ... nameN). # We only need the name list, so we discard everything else. # The name list starts after the first parenthesis or at pos 0 names_start = synchronous_standby_names.find("(") + 1 names_end = synchronous_standby_names.rfind(")") if names_end < 0: names_end = len(synchronous_standby_names) names_list = synchronous_standby_names[names_start:names_end] # We can blindly strip double quotes because PostgreSQL enforces # the format of the synchronous_standby_names content return [x.strip().strip('"') for x in names_list.split(",")] @property def name_map(self): """ Return a map with function and directory names according to the current PostgreSQL version. Each entry has the `current` name as key and the name for the specific version as value. :rtype: dict[str] """ # Avoid raising an error if the connection is not available try: server_version = self.server_version except PostgresConnectionError: _logger.debug( "Impossible to detect the PostgreSQL version, " "name_map will return names from latest version" ) server_version = None return function_name_map(server_version) barman-2.18/barman/config.py0000644000621200062120000010433514172556763014165 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module is responsible for all the things related to Barman configuration, such as parsing configuration file. """ import collections import datetime import inspect import logging.handlers import os import re import sys from glob import iglob from barman import output try: from ConfigParser import ConfigParser, NoOptionError except ImportError: from configparser import ConfigParser, NoOptionError # create a namedtuple object called PathConflict with 'label' and 'server' PathConflict = collections.namedtuple("PathConflict", "label server") _logger = logging.getLogger(__name__) FORBIDDEN_SERVER_NAMES = ["all"] DEFAULT_USER = "barman" DEFAULT_LOG_LEVEL = logging.INFO DEFAULT_LOG_FORMAT = "%(asctime)s [%(process)s] %(name)s %(levelname)s: %(message)s" _TRUE_RE = re.compile(r"""^(true|t|yes|1|on)$""", re.IGNORECASE) _FALSE_RE = re.compile(r"""^(false|f|no|0|off)$""", re.IGNORECASE) _TIME_INTERVAL_RE = re.compile( r""" ^\s* # N (day|month|week|hour) with optional 's' (\d+)\s+(day|month|week|hour)s? \s*$ """, re.IGNORECASE | re.VERBOSE, ) _SLOT_NAME_RE = re.compile("^[0-9a-z_]+$") _SI_SUFFIX_RE = re.compile(r"""(\d+)\s*(k|Ki|M|Mi|G|Gi|T|Ti)?\s*$""") REUSE_BACKUP_VALUES = ("copy", "link", "off") # Possible copy methods for backups (must be all lowercase) BACKUP_METHOD_VALUES = ["rsync", "postgres", "local-rsync"] CREATE_SLOT_VALUES = ["manual", "auto"] class CsvOption(set): """ Base class for CSV options. Given a comma delimited string, this class is a list containing the submitted options. Internally, it uses a set in order to avoid option replication. Allowed values for the CSV option are contained in the 'value_list' attribute. The 'conflicts' attribute specifies for any value, the list of values that are prohibited (and thus generate a conflict). If a conflict is found, raises a ValueError exception. """ value_list = [] conflicts = {} def __init__(self, value, key, source): # Invoke parent class init and initialize an empty set super(CsvOption, self).__init__() # Parse not None values if value is not None: self.parse(value, key, source) # Validates the object structure before returning the new instance self.validate(key, source) def parse(self, value, key, source): """ Parses a list of values and correctly assign the set of values (removing duplication) and checking for conflicts. """ if not value: return values_list = value.split(",") for val in sorted(values_list): val = val.strip().lower() if val in self.value_list: # check for conflicting values. if a conflict is # found the option is not valid then, raise exception. if val in self.conflicts and self.conflicts[val] in self: raise ValueError( "Invalid configuration value '%s' for " "key %s in %s: cannot contain both " "'%s' and '%s'." "Configuration directive ignored." % (val, key, source, val, self.conflicts[val]) ) else: # otherwise use parsed value self.add(val) else: # not allowed value, reject the configuration raise ValueError( "Invalid configuration value '%s' for " "key %s in %s: Unknown option" % (val, key, source) ) def validate(self, key, source): """ Override this method for special validation needs """ def to_json(self): """ Output representation of the obj for JSON serialization The result is a string which can be parsed by the same class """ return ",".join(self) class BackupOptions(CsvOption): """ Extends CsvOption class providing all the details for the backup_options field """ # constants containing labels for allowed values EXCLUSIVE_BACKUP = "exclusive_backup" CONCURRENT_BACKUP = "concurrent_backup" EXTERNAL_CONFIGURATION = "external_configuration" # list holding all the allowed values for the BackupOption class value_list = [EXCLUSIVE_BACKUP, CONCURRENT_BACKUP, EXTERNAL_CONFIGURATION] # map holding all the possible conflicts between the allowed values conflicts = { EXCLUSIVE_BACKUP: CONCURRENT_BACKUP, CONCURRENT_BACKUP: EXCLUSIVE_BACKUP, } class RecoveryOptions(CsvOption): """ Extends CsvOption class providing all the details for the recovery_options field """ # constants containing labels for allowed values GET_WAL = "get-wal" # list holding all the allowed values for the RecoveryOptions class value_list = [GET_WAL] def parse_boolean(value): """ Parse a string to a boolean value :param str value: string representing a boolean :raises ValueError: if the string is an invalid boolean representation """ if _TRUE_RE.match(value): return True if _FALSE_RE.match(value): return False raise ValueError("Invalid boolean representation (use 'true' or 'false')") def parse_time_interval(value): """ Parse a string, transforming it in a time interval. Accepted format: N (day|month|week)s :param str value: the string to evaluate """ # if empty string or none return none if value is None or value == "": return None result = _TIME_INTERVAL_RE.match(value) # if the string doesn't match, the option is invalid if not result: raise ValueError("Invalid value for a time interval %s" % value) # if the int conversion value = int(result.groups()[0]) unit = result.groups()[1][0].lower() # Calculates the time delta if unit == "d": time_delta = datetime.timedelta(days=value) elif unit == "w": time_delta = datetime.timedelta(weeks=value) elif unit == "m": time_delta = datetime.timedelta(days=(31 * value)) elif unit == "h": time_delta = datetime.timedelta(hours=value) else: # This should never happen raise ValueError("Invalid unit time %s" % unit) return time_delta def parse_si_suffix(value): """ Parse a string, transforming it into integer and multiplying by the SI or IEC suffix eg a suffix of Ki multiplies the integer value by 1024 and returns the new value Accepted format: N (k|Ki|M|Mi|G|Gi|T|Ti) :param str value: the string to evaluate """ # if empty string or none return none if value is None or value == "": return None result = _SI_SUFFIX_RE.match(value) if not result: raise ValueError("Invalid value for a number %s" % value) # if the int conversion value = int(result.groups()[0]) unit = result.groups()[1] # Calculates the value if unit == "k": value *= 1000 elif unit == "Ki": value *= 1024 elif unit == "M": value *= 1000000 elif unit == "Mi": value *= 1048576 elif unit == "G": value *= 1000000000 elif unit == "Gi": value *= 1073741824 elif unit == "T": value *= 1000000000000 elif unit == "Ti": value *= 1099511627776 return value def parse_reuse_backup(value): """ Parse a string to a valid reuse_backup value. Valid values are "copy", "link" and "off" :param str value: reuse_backup value :raises ValueError: if the value is invalid """ if value is None: return None if value.lower() in REUSE_BACKUP_VALUES: return value.lower() raise ValueError( "Invalid value (use '%s' or '%s')" % ("', '".join(REUSE_BACKUP_VALUES[:-1]), REUSE_BACKUP_VALUES[-1]) ) def parse_backup_method(value): """ Parse a string to a valid backup_method value. Valid values are contained in BACKUP_METHOD_VALUES list :param str value: backup_method value :raises ValueError: if the value is invalid """ if value is None: return None if value.lower() in BACKUP_METHOD_VALUES: return value.lower() raise ValueError( "Invalid value (must be one in: '%s')" % ("', '".join(BACKUP_METHOD_VALUES)) ) def parse_slot_name(value): """ Replication slot names may only contain lower case letters, numbers, and the underscore character. This function parse a replication slot name :param str value: slot_name value :return: """ if value is None: return None value = value.lower() if not _SLOT_NAME_RE.match(value): raise ValueError( "Replication slot names may only contain lower case letters, " "numbers, and the underscore character." ) return value def parse_create_slot(value): """ Parse a string to a valid create_slot value. Valid values are "manual" and "auto" :param str value: create_slot value :raises ValueError: if the value is invalid """ if value is None: return None value = value.lower() if value in CREATE_SLOT_VALUES: return value raise ValueError( "Invalid value (use '%s' or '%s')" % ("', '".join(CREATE_SLOT_VALUES[:-1]), CREATE_SLOT_VALUES[-1]) ) class ServerConfig(object): """ This class represents the configuration for a specific Server instance. """ KEYS = [ "active", "archiver", "archiver_batch_size", "backup_directory", "backup_method", "backup_options", "bandwidth_limit", "basebackup_retry_sleep", "basebackup_retry_times", "basebackups_directory", "check_timeout", "compression", "conninfo", "custom_compression_filter", "custom_decompression_filter", "custom_compression_magic", "description", "disabled", "errors_directory", "forward_config_path", "immediate_checkpoint", "incoming_wals_directory", "last_backup_maximum_age", "last_backup_minimum_size", "last_wal_maximum_age", "max_incoming_wals_queue", "minimum_redundancy", "network_compression", "parallel_jobs", "path_prefix", "post_archive_retry_script", "post_archive_script", "post_backup_retry_script", "post_backup_script", "post_delete_script", "post_delete_retry_script", "post_recovery_retry_script", "post_recovery_script", "post_wal_delete_script", "post_wal_delete_retry_script", "pre_archive_retry_script", "pre_archive_script", "pre_backup_retry_script", "pre_backup_script", "pre_delete_script", "pre_delete_retry_script", "pre_recovery_retry_script", "pre_recovery_script", "pre_wal_delete_script", "pre_wal_delete_retry_script", "primary_ssh_command", "recovery_options", "create_slot", "retention_policy", "retention_policy_mode", "reuse_backup", "slot_name", "ssh_command", "streaming_archiver", "streaming_archiver_batch_size", "streaming_archiver_name", "streaming_backup_name", "streaming_conninfo", "streaming_wals_directory", "tablespace_bandwidth_limit", "wal_retention_policy", "wals_directory", ] BARMAN_KEYS = [ "archiver", "archiver_batch_size", "backup_method", "backup_options", "bandwidth_limit", "basebackup_retry_sleep", "basebackup_retry_times", "check_timeout", "compression", "configuration_files_directory", "custom_compression_filter", "custom_decompression_filter", "custom_compression_magic", "forward_config_path", "immediate_checkpoint", "last_backup_maximum_age", "last_backup_minimum_size", "last_wal_maximum_age", "max_incoming_wals_queue", "minimum_redundancy", "network_compression", "parallel_jobs", "path_prefix", "post_archive_retry_script", "post_archive_script", "post_backup_retry_script", "post_backup_script", "post_delete_script", "post_delete_retry_script", "post_recovery_retry_script", "post_recovery_script", "post_wal_delete_script", "post_wal_delete_retry_script", "pre_archive_retry_script", "pre_archive_script", "pre_backup_retry_script", "pre_backup_script", "pre_delete_script", "pre_delete_retry_script", "pre_recovery_retry_script", "pre_recovery_script", "pre_wal_delete_script", "pre_wal_delete_retry_script", "primary_ssh_command", "recovery_options", "create_slot", "retention_policy", "retention_policy_mode", "reuse_backup", "slot_name", "streaming_archiver", "streaming_archiver_batch_size", "streaming_archiver_name", "streaming_backup_name", "tablespace_bandwidth_limit", "wal_retention_policy", ] DEFAULTS = { "active": "true", "archiver": "off", "archiver_batch_size": "0", "backup_directory": "%(barman_home)s/%(name)s", "backup_method": "rsync", "backup_options": "", "basebackup_retry_sleep": "30", "basebackup_retry_times": "0", "basebackups_directory": "%(backup_directory)s/base", "check_timeout": "30", "disabled": "false", "errors_directory": "%(backup_directory)s/errors", "forward_config_path": "false", "immediate_checkpoint": "false", "incoming_wals_directory": "%(backup_directory)s/incoming", "minimum_redundancy": "0", "network_compression": "false", "parallel_jobs": "1", "recovery_options": "", "create_slot": "manual", "retention_policy_mode": "auto", "streaming_archiver": "off", "streaming_archiver_batch_size": "0", "streaming_archiver_name": "barman_receive_wal", "streaming_backup_name": "barman_streaming_backup", "streaming_conninfo": "%(conninfo)s", "streaming_wals_directory": "%(backup_directory)s/streaming", "wal_retention_policy": "main", "wals_directory": "%(backup_directory)s/wals", } FIXED = [ "disabled", ] PARSERS = { "active": parse_boolean, "archiver": parse_boolean, "archiver_batch_size": int, "backup_method": parse_backup_method, "backup_options": BackupOptions, "basebackup_retry_sleep": int, "basebackup_retry_times": int, "check_timeout": int, "disabled": parse_boolean, "forward_config_path": parse_boolean, "immediate_checkpoint": parse_boolean, "last_backup_maximum_age": parse_time_interval, "last_backup_minimum_size": parse_si_suffix, "last_wal_maximum_age": parse_time_interval, "max_incoming_wals_queue": int, "network_compression": parse_boolean, "parallel_jobs": int, "recovery_options": RecoveryOptions, "create_slot": parse_create_slot, "reuse_backup": parse_reuse_backup, "streaming_archiver": parse_boolean, "streaming_archiver_batch_size": int, "slot_name": parse_slot_name, } def invoke_parser(self, key, source, value, new_value): """ Function used for parsing configuration values. If needed, it uses special parsers from the PARSERS map, and handles parsing exceptions. Uses two values (value and new_value) to manage configuration hierarchy (server config overwrites global config). :param str key: the name of the configuration option :param str source: the section that contains the configuration option :param value: the old value of the option if present. :param str new_value: the new value that needs to be parsed :return: the parsed value of a configuration option """ # If the new value is None, returns the old value if new_value is None: return value # If we have a parser for the current key, use it to obtain the # actual value. If an exception is thrown, print a warning and # ignore the value. # noinspection PyBroadException if key in self.PARSERS: parser = self.PARSERS[key] try: # If the parser is a subclass of the CsvOption class # we need a different invocation, which passes not only # the value to the parser, but also the key name # and the section that contains the configuration if inspect.isclass(parser) and issubclass(parser, CsvOption): value = parser(new_value, key, source) else: value = parser(new_value) except Exception as e: output.warning( "Ignoring invalid configuration value '%s' for key %s in %s: %s", new_value, key, source, e, ) else: value = new_value return value def __init__(self, config, name): self.msg_list = [] self.config = config self.name = name self.barman_home = config.barman_home self.barman_lock_directory = config.barman_lock_directory config.validate_server_config(self.name) for key in ServerConfig.KEYS: value = None # Skip parameters that cannot be configured by users if key not in ServerConfig.FIXED: # Get the setting from the [name] section of config file # A literal None value is converted to an empty string new_value = config.get(name, key, self.__dict__, none_value="") source = "[%s] section" % name value = self.invoke_parser(key, source, value, new_value) # If the setting isn't present in [name] section of config file # check if it has to be inherited from the [barman] section if value is None and key in ServerConfig.BARMAN_KEYS: new_value = config.get("barman", key, self.__dict__, none_value="") source = "[barman] section" value = self.invoke_parser(key, source, value, new_value) # If the setting isn't present in [name] section of config file # and is not inherited from global section use its default # (if present) if value is None and key in ServerConfig.DEFAULTS: new_value = ServerConfig.DEFAULTS[key] % self.__dict__ source = "DEFAULTS" value = self.invoke_parser(key, source, value, new_value) # An empty string is a None value (bypassing inheritance # from global configuration) if value is not None and value == "" or value == "None": value = None setattr(self, key, value) def to_json(self): """ Return an equivalent dictionary that can be encoded in json """ json_dict = dict(vars(self)) # remove the reference to main Config object del json_dict["config"] return json_dict def get_bwlimit(self, tablespace=None): """ Return the configured bandwidth limit for the provided tablespace If tablespace is None, it returns the global bandwidth limit :param barman.infofile.Tablespace tablespace: the tablespace to copy :rtype: str """ # Default to global bandwidth limit bwlimit = self.bandwidth_limit if tablespace: # A tablespace can be copied using a per-tablespace bwlimit tbl_bw_limit = self.tablespace_bandwidth_limit if tbl_bw_limit and tablespace.name in tbl_bw_limit: bwlimit = tbl_bw_limit[tablespace.name] return bwlimit class Config(object): """This class represents the barman configuration. Default configuration files are /etc/barman.conf, /etc/barman/barman.conf and ~/.barman.conf for a per-user configuration """ CONFIG_FILES = [ "~/.barman.conf", "/etc/barman.conf", "/etc/barman/barman.conf", ] _QUOTE_RE = re.compile(r"""^(["'])(.*)\1$""") def __init__(self, filename=None): # In Python 3 ConfigParser has changed to be strict by default. # Barman wants to preserve the Python 2 behavior, so we are # explicitly building it passing strict=False. try: # Python 3.x self._config = ConfigParser(strict=False) except TypeError: # Python 2.x self._config = ConfigParser() if filename: if hasattr(filename, "read"): try: # Python 3.x self._config.read_file(filename) except AttributeError: # Python 2.x self._config.readfp(filename) else: # check for the existence of the user defined file if not os.path.exists(filename): sys.exit("Configuration file '%s' does not exist" % filename) self._config.read(os.path.expanduser(filename)) else: # Check for the presence of configuration files # inside default directories for path in self.CONFIG_FILES: full_path = os.path.expanduser(path) if os.path.exists(full_path) and full_path in self._config.read( full_path ): filename = full_path break else: sys.exit( "Could not find any configuration file at " "default locations.\n" "Check Barman's documentation for more help." ) self.config_file = filename self._servers = None self.servers_msg_list = [] self._parse_global_config() def get(self, section, option, defaults=None, none_value=None): """Method to get the value from a given section from Barman configuration """ if not self._config.has_section(section): return None try: value = self._config.get(section, option, raw=False, vars=defaults) if value.lower() == "none": value = none_value if value is not None: value = self._QUOTE_RE.sub(lambda m: m.group(2), value) return value except NoOptionError: return None def _parse_global_config(self): """ This method parses the global [barman] section """ self.barman_home = self.get("barman", "barman_home") self.barman_lock_directory = ( self.get("barman", "barman_lock_directory") or self.barman_home ) self.user = self.get("barman", "barman_user") or DEFAULT_USER self.log_file = self.get("barman", "log_file") self.log_format = self.get("barman", "log_format") or DEFAULT_LOG_FORMAT self.log_level = self.get("barman", "log_level") or DEFAULT_LOG_LEVEL # save the raw barman section to be compared later in # _is_global_config_changed() method self._global_config = set(self._config.items("barman")) def _is_global_config_changed(self): """Return true if something has changed in global configuration""" return self._global_config != set(self._config.items("barman")) def load_configuration_files_directory(self): """ Read the "configuration_files_directory" option and load all the configuration files with the .conf suffix that lie in that folder """ config_files_directory = self.get("barman", "configuration_files_directory") if not config_files_directory: return if not os.path.isdir(os.path.expanduser(config_files_directory)): _logger.warn( 'Ignoring the "configuration_files_directory" option as "%s" ' "is not a directory", config_files_directory, ) return for cfile in sorted( iglob(os.path.join(os.path.expanduser(config_files_directory), "*.conf")) ): filename = os.path.basename(cfile) if os.path.isfile(cfile): # Load a file _logger.debug("Including configuration file: %s", filename) self._config.read(cfile) if self._is_global_config_changed(): msg = ( "the configuration file %s contains a not empty [" "barman] section" % filename ) _logger.fatal(msg) raise SystemExit("FATAL: %s" % msg) else: # Add an info that a file has been discarded _logger.warn("Discarding configuration file: %s (not a file)", filename) def _populate_servers(self): """ Populate server list from configuration file Also check for paths errors in configuration. If two or more paths overlap in a single server, that server is disabled. If two or more directory paths overlap between different servers an error is raised. """ # Populate servers if self._servers is not None: return self._servers = {} # Cycle all the available configurations sections for section in self._config.sections(): if section == "barman": # skip global settings continue # Exit if the section has a reserved name if section in FORBIDDEN_SERVER_NAMES: msg = ( "the reserved word '%s' is not allowed as server name." "Please rename it." % section ) _logger.fatal(msg) raise SystemExit("FATAL: %s" % msg) # Create a ServerConfig object self._servers[section] = ServerConfig(self, section) # Check for conflicting paths in Barman configuration self._check_conflicting_paths() def _check_conflicting_paths(self): """ Look for conflicting paths intra-server and inter-server """ # All paths in configuration servers_paths = {} # Global errors list self.servers_msg_list = [] # Cycle all the available configurations sections for section in sorted(self._config.sections()): if section == "barman": # skip global settings continue # Paths map section_conf = self._servers[section] config_paths = { "backup_directory": section_conf.backup_directory, "basebackups_directory": section_conf.basebackups_directory, "errors_directory": section_conf.errors_directory, "incoming_wals_directory": section_conf.incoming_wals_directory, "streaming_wals_directory": section_conf.streaming_wals_directory, "wals_directory": section_conf.wals_directory, } # Check for path errors for label, path in sorted(config_paths.items()): # If the path does not conflict with the others, add it to the # paths map real_path = os.path.realpath(path) if real_path not in servers_paths: servers_paths[real_path] = PathConflict(label, section) else: if section == servers_paths[real_path].server: # Internal path error. # Insert the error message into the server.msg_list if real_path == path: self._servers[section].msg_list.append( "Conflicting path: %s=%s conflicts with " "'%s' for server '%s'" % ( label, path, servers_paths[real_path].label, servers_paths[real_path].server, ) ) else: # Symbolic link self._servers[section].msg_list.append( "Conflicting path: %s=%s (symlink to: %s) " "conflicts with '%s' for server '%s'" % ( label, path, real_path, servers_paths[real_path].label, servers_paths[real_path].server, ) ) # Disable the server self._servers[section].disabled = True else: # Global path error. # Insert the error message into the global msg_list if real_path == path: self.servers_msg_list.append( "Conflicting path: " "%s=%s for server '%s' conflicts with " "'%s' for server '%s'" % ( label, path, section, servers_paths[real_path].label, servers_paths[real_path].server, ) ) else: # Symbolic link self.servers_msg_list.append( "Conflicting path: " "%s=%s (symlink to: %s) for server '%s' " "conflicts with '%s' for server '%s'" % ( label, path, real_path, section, servers_paths[real_path].label, servers_paths[real_path].server, ) ) def server_names(self): """This method returns a list of server names""" self._populate_servers() return self._servers.keys() def servers(self): """This method returns a list of server parameters""" self._populate_servers() return self._servers.values() def get_server(self, name): """ Get the configuration of the specified server :param str name: the server name """ self._populate_servers() return self._servers.get(name, None) def validate_global_config(self): """ Validate global configuration parameters """ # Check for the existence of unexpected parameters in the # global section of the configuration file keys = [ "barman_home", "barman_lock_directory", "barman_user", "log_file", "log_level", "configuration_files_directory", ] keys.extend(ServerConfig.KEYS) self._validate_with_keys(self._global_config, keys, "barman") def validate_server_config(self, server): """ Validate configuration parameters for a specified server :param str server: the server name """ # Check for the existence of unexpected parameters in the # server section of the configuration file self._validate_with_keys(self._config.items(server), ServerConfig.KEYS, server) @staticmethod def _validate_with_keys(config_items, allowed_keys, section): """ Check every config parameter against a list of allowed keys :param config_items: list of tuples containing provided parameters along with their values :param allowed_keys: list of allowed keys :param section: source section (for error reporting) """ for parameter in config_items: # if the parameter name is not in the list of allowed values, # then output a warning name = parameter[0] if name not in allowed_keys: output.warning( 'Invalid configuration option "%s" in [%s] ' "section.", name, section, ) # easy raw config diagnostic with python -m # noinspection PyProtectedMember def _main(): print("Active configuration settings:") r = Config() r.load_configuration_files_directory() for section in r._config.sections(): print("Section: %s" % section) for option in r._config.options(section): print("\t%s = %s " % (option, r.get(section, option))) if __name__ == "__main__": _main() barman-2.18/barman/version.py0000644000621200062120000000144414172556766014405 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains the current Barman version. """ __version__ = '2.18' barman-2.18/barman/__init__.py0000644000621200062120000000157214172556763014456 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ The main Barman module """ from __future__ import absolute_import from .version import __version__ __config__ = None __all__ = ["__version__", "__config__"] barman-2.18/barman/backup.py0000644000621200062120000016354714172556763014177 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module represents a backup. """ import datetime import logging import os import shutil import tempfile from contextlib import closing from glob import glob import dateutil.parser import dateutil.tz from barman import output, xlog from barman.annotations import KeepManager, KeepManagerMixin from barman.backup_executor import ( PassiveBackupExecutor, PostgresBackupExecutor, RsyncBackupExecutor, ) from barman.compression import CompressionManager from barman.config import BackupOptions from barman.exceptions import ( AbortedRetryHookScript, CompressionIncompatibility, SshCommandException, UnknownBackupIdException, CommandFailedException, ) from barman.hooks import HookScriptRunner, RetryHookScriptRunner from barman.infofile import BackupInfo, LocalBackupInfo, WalFileInfo from barman.lockfile import ServerBackupSyncLock from barman.recovery_executor import RecoveryExecutor from barman.remote_status import RemoteStatusMixin from barman.utils import ( force_str, fsync_dir, fsync_file, human_readable_timedelta, pretty_size, ) from barman.command_wrappers import PgVerifyBackup _logger = logging.getLogger(__name__) class BackupManager(RemoteStatusMixin, KeepManagerMixin): """Manager of the backup archive for a server""" DEFAULT_STATUS_FILTER = BackupInfo.STATUS_COPY_DONE def __init__(self, server): """ Constructor :param server: barman.server.Server """ super(BackupManager, self).__init__(server=server) self.server = server self.config = server.config self._backup_cache = None self.compression_manager = CompressionManager(self.config, server.path) self.executor = None try: if server.passive_node: self.executor = PassiveBackupExecutor(self) elif self.config.backup_method == "postgres": self.executor = PostgresBackupExecutor(self) elif self.config.backup_method == "local-rsync": self.executor = RsyncBackupExecutor(self, local_mode=True) else: self.executor = RsyncBackupExecutor(self) except SshCommandException as e: self.config.disabled = True self.config.msg_list.append(force_str(e).strip()) @property def mode(self): """ Property defining the BackupInfo mode content """ if self.executor: return self.executor.mode return None def get_available_backups(self, status_filter=DEFAULT_STATUS_FILTER): """ Get a list of available backups :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup list returned """ # If the filter is not a tuple, create a tuple using the filter if not isinstance(status_filter, tuple): status_filter = tuple( status_filter, ) # Load the cache if necessary if self._backup_cache is None: self._load_backup_cache() # Filter the cache using the status filter tuple backups = {} for key, value in self._backup_cache.items(): if value.status in status_filter: backups[key] = value return backups def _load_backup_cache(self): """ Populate the cache of the available backups, reading information from disk. """ self._backup_cache = {} # Load all the backups from disk reading the backup.info files for filename in glob("%s/*/backup.info" % self.config.basebackups_directory): backup = LocalBackupInfo(self.server, filename) self._backup_cache[backup.backup_id] = backup def backup_cache_add(self, backup_info): """ Register a BackupInfo object to the backup cache. NOTE: Initialise the cache - in case it has not been done yet :param barman.infofile.BackupInfo backup_info: the object we want to register in the cache """ # Load the cache if needed if self._backup_cache is None: self._load_backup_cache() # Insert the BackupInfo object into the cache self._backup_cache[backup_info.backup_id] = backup_info def backup_cache_remove(self, backup_info): """ Remove a BackupInfo object from the backup cache This method _must_ be called after removing the object from disk. :param barman.infofile.BackupInfo backup_info: the object we want to remove from the cache """ # Nothing to do if the cache is not loaded if self._backup_cache is None: return # Remove the BackupInfo object from the backups cache del self._backup_cache[backup_info.backup_id] def get_backup(self, backup_id): """ Return the backup information for the given backup id. If the backup_id is None or backup.info file doesn't exists, it returns None. :param str|None backup_id: the ID of the backup to return :rtype: BackupInfo|None """ if backup_id is not None: # Get all the available backups from the cache available_backups = self.get_available_backups(BackupInfo.STATUS_ALL) # Return the BackupInfo if present, or None return available_backups.get(backup_id) return None @staticmethod def find_previous_backup_in( available_backups, backup_id, status_filter=DEFAULT_STATUS_FILTER ): """ Find the next backup (if any) in the supplied dict of BackupInfo objects. """ ids = sorted(available_backups.keys()) try: current = ids.index(backup_id) while current > 0: res = available_backups[ids[current - 1]] if res.status in status_filter: return res current -= 1 return None except ValueError: raise UnknownBackupIdException("Could not find backup_id %s" % backup_id) def get_previous_backup(self, backup_id, status_filter=DEFAULT_STATUS_FILTER): """ Get the previous backup (if any) in the catalog :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup returned """ if not isinstance(status_filter, tuple): status_filter = tuple(status_filter) backup = LocalBackupInfo(self.server, backup_id=backup_id) available_backups = self.get_available_backups(status_filter + (backup.status,)) return self.find_previous_backup_in(available_backups, backup_id, status_filter) @staticmethod def should_remove_wals( backup, available_backups, keep_manager, skip_wal_cleanup_if_standalone, status_filter=DEFAULT_STATUS_FILTER, ): """ Determine whether we should remove the WALs for the specified backup. Returns the following tuple: - `(bool should_remove_wals, list wal_ranges_to_protect)` Where `should_remove_wals` is a boolean which is True if the WALs associated with this backup should be removed and False otherwise. `wal_ranges_to_protect` is a list of `(begin_wal, end_wal)` tuples which define *inclusive* ranges where any matching WAL should not be deleted. The rules for determining whether we should remove WALs are as follows: 1. If there is no previous backup then we can clean up the WALs. 2. If there is a previous backup and it has no keep annotation then do not clean up the WALs. We need to allow PITR from that older backup to the current time. 3. If there is a previous backup and it has a keep target of "full" then do nothing. We need to allow PITR from that keep:full backup to the current time. 4. If there is a previous backup and it has a keep target of "standalone": a. If that previous backup is the oldest backup then delete WALs up to the begin_wal of the next backup except for WALs which are >= begin_wal and <= end_wal of the keep:standalone backup - we can therefore add `(begin_wal, end_wal)` to `wal_ranges_to_protect` and return True. b. If that previous backup is not the oldest backup then we add the `(begin_wal, end_wal)` to `wal_ranges_to_protect` and go to 2 above. We will either end up returning False, because we hit a backup with keep:full or no keep annotation, or all backups to the oldest backup will be keep:standalone in which case we will delete up to the begin_wal of the next backup, preserving the WALs needed by each keep:standalone backups by adding them to `wal_ranges_to_protect`. This is a static method so it can be re-used by barman-cloud which will pass in its own dict of available_backups. :param BackupInfo backup_info: The backup for which we are determining whether we can clean up WALs. :param dict[str,BackupInfo] available_backups: A dict of BackupInfo objects keyed by backup_id which represent all available backups for the current server. :param KeepManagerMixin keep_manager: An object implementing the KeepManagerMixin interface. This will be either a BackupManager (in barman) or a CloudBackupCatalog (in barman-cloud). :param bool skip_wal_cleanup_if_standalone: If set to True then we should skip removing WALs for cases where all previous backups are standalone archival backups (i.e. they have a keep annotation of "standalone"). The default is True. It is only safe to set this to False if the backup is being deleted due to a retention policy rather than a `barman delete` command. :param status_filter: The status of the backups to check when determining if we should remove WALs. default to DEFAULT_STATUS_FILTER. """ previous_backup = BackupManager.find_previous_backup_in( available_backups, backup.backup_id, status_filter=status_filter ) wal_ranges_to_protect = [] while True: if previous_backup is None: # No previous backup so we should remove WALs and return any WAL ranges # we have found so far return True, wal_ranges_to_protect elif ( keep_manager.get_keep_target(previous_backup.backup_id) == KeepManager.TARGET_STANDALONE ): # A previous backup exists and it is a standalone backup - if we have # been asked to skip wal cleanup on standalone backups then we # should not remove wals if skip_wal_cleanup_if_standalone: return False, [] # Otherwise we add to the WAL ranges to protect wal_ranges_to_protect.append( (previous_backup.begin_wal, previous_backup.end_wal) ) # and continue iterating through previous backups until we find either # no previous backup or a non-standalone backup previous_backup = BackupManager.find_previous_backup_in( available_backups, previous_backup.backup_id, status_filter=status_filter, ) continue else: # A previous backup exists and it is not a standalone backup so we # must not remove any WALs and we can discard any wal_ranges_to_protect # since they are no longer relevant return False, [] @staticmethod def find_next_backup_in( available_backups, backup_id, status_filter=DEFAULT_STATUS_FILTER ): """ Find the next backup (if any) in the supplied dict of BackupInfo objects. """ ids = sorted(available_backups.keys()) try: current = ids.index(backup_id) while current < (len(ids) - 1): res = available_backups[ids[current + 1]] if res.status in status_filter: return res current += 1 return None except ValueError: raise UnknownBackupIdException("Could not find backup_id %s" % backup_id) def get_next_backup(self, backup_id, status_filter=DEFAULT_STATUS_FILTER): """ Get the next backup (if any) in the catalog :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup returned """ if not isinstance(status_filter, tuple): status_filter = tuple(status_filter) backup = LocalBackupInfo(self.server, backup_id=backup_id) available_backups = self.get_available_backups(status_filter + (backup.status,)) return self.find_next_backup_in(available_backups, backup_id, status_filter) def get_last_backup_id(self, status_filter=DEFAULT_STATUS_FILTER): """ Get the id of the latest/last backup in the catalog (if exists) :param status_filter: The status of the backup to return, default to DEFAULT_STATUS_FILTER. :return string|None: ID of the backup """ available_backups = self.get_available_backups(status_filter) if len(available_backups) == 0: return None ids = sorted(available_backups.keys()) return ids[-1] def get_first_backup_id(self, status_filter=DEFAULT_STATUS_FILTER): """ Get the id of the oldest/first backup in the catalog (if exists) :param status_filter: The status of the backup to return, default to DEFAULT_STATUS_FILTER. :return string|None: ID of the backup """ available_backups = self.get_available_backups(status_filter) if len(available_backups) == 0: return None ids = sorted(available_backups.keys()) return ids[0] @staticmethod def get_timelines_to_protect(remove_until, deleted_backup, available_backups): """ Returns all timelines in available_backups which are not associated with the backup at remove_until. This is so that we do not delete WALs on any other timelines. """ timelines_to_protect = set() # If remove_until is not set there are no backup left if remove_until: # Retrieve the list of extra timelines that contains at least # a backup. On such timelines we don't want to delete any WAL for value in available_backups.values(): # Ignore the backup that is being deleted if value == deleted_backup: continue timelines_to_protect.add(value.timeline) # Remove the timeline of `remove_until` from the list. # We have enough information to safely delete unused WAL files # on it. timelines_to_protect -= set([remove_until.timeline]) return timelines_to_protect def delete_backup(self, backup, skip_wal_cleanup_if_standalone=True): """ Delete a backup :param backup: the backup to delete :param bool skip_wal_cleanup_if_standalone: By default we will skip removing WALs if the oldest backups are standalong archival backups (i.e. they have a keep annotation of "standalone"). If this function is being called in the context of a retention policy however, it is safe to set skip_wal_cleanup_if_standalone to False and clean up WALs associated with those backups. :return bool: True if deleted, False if could not delete the backup """ if self.should_keep_backup(backup.backup_id): output.warning( "Skipping delete of backup %s for server %s " "as it has a current keep request. If you really " "want to delete this backup please remove the keep " "and try again.", backup.backup_id, self.config.name, ) return False available_backups = self.get_available_backups(status_filter=(BackupInfo.DONE,)) minimum_redundancy = self.server.config.minimum_redundancy # Honour minimum required redundancy if backup.status == BackupInfo.DONE and minimum_redundancy >= len( available_backups ): output.warning( "Skipping delete of backup %s for server %s " "due to minimum redundancy requirements " "(minimum redundancy = %s, " "current redundancy = %s)", backup.backup_id, self.config.name, minimum_redundancy, len(available_backups), ) return False # Keep track of when the delete operation started. delete_start_time = datetime.datetime.now() # Run the pre_delete_script if present. script = HookScriptRunner(self, "delete_script", "pre") script.env_from_backup_info(backup) script.run() # Run the pre_delete_retry_script if present. retry_script = RetryHookScriptRunner(self, "delete_retry_script", "pre") retry_script.env_from_backup_info(backup) retry_script.run() output.info( "Deleting backup %s for server %s", backup.backup_id, self.config.name ) should_remove_wals, wal_ranges_to_protect = BackupManager.should_remove_wals( backup, self.get_available_backups( BackupManager.DEFAULT_STATUS_FILTER + (backup.status,) ), keep_manager=self, skip_wal_cleanup_if_standalone=skip_wal_cleanup_if_standalone, ) next_backup = self.get_next_backup(backup.backup_id) # Delete all the data contained in the backup try: self.delete_backup_data(backup) except OSError as e: output.error( "Failure deleting backup %s for server %s.\n%s", backup.backup_id, self.config.name, e, ) return False if should_remove_wals: # There is no previous backup or all previous backups are archival # standalone backups, so we can remove unused WALs (those WALs not # required by standalone archival backups). # If there is a next backup then all unused WALs up to the begin_wal # of the next backup can be removed. # If there is no next backup then there are no remaining backups so: # - In the case of exclusive backup (default), remove all unused # WAL files. # - In the case of concurrent backup, removes only unused WAL files # prior to the start of the backup being deleted, as they # might be useful to any concurrent backup started immediately # after. remove_until = None # means to remove all WAL files if next_backup: remove_until = next_backup elif BackupOptions.CONCURRENT_BACKUP in self.config.backup_options: remove_until = backup timelines_to_protect = self.get_timelines_to_protect( remove_until, backup, self.get_available_backups(BackupInfo.STATUS_ARCHIVING), ) output.info("Delete associated WAL segments:") for name in self.remove_wal_before_backup( remove_until, timelines_to_protect, wal_ranges_to_protect ): output.info("\t%s", name) # As last action, remove the backup directory, # ending the delete operation try: self.delete_basebackup(backup) except OSError as e: output.error( "Failure deleting backup %s for server %s.\n%s\n" "Please manually remove the '%s' directory", backup.backup_id, self.config.name, e, backup.get_basebackup_directory(), ) return False self.backup_cache_remove(backup) # Save the time of the complete removal of the backup delete_end_time = datetime.datetime.now() output.info( "Deleted backup %s (start time: %s, elapsed time: %s)", backup.backup_id, delete_start_time.ctime(), human_readable_timedelta(delete_end_time - delete_start_time), ) # Remove the sync lockfile if exists sync_lock = ServerBackupSyncLock( self.config.barman_lock_directory, self.config.name, backup.backup_id ) if os.path.exists(sync_lock.filename): _logger.debug("Deleting backup sync lockfile: %s" % sync_lock.filename) os.unlink(sync_lock.filename) # Run the post_delete_retry_script if present. try: retry_script = RetryHookScriptRunner(self, "delete_retry_script", "post") retry_script.env_from_backup_info(backup) retry_script.run() except AbortedRetryHookScript as e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning( "Ignoring stop request after receiving " "abort (exit code %d) from post-delete " "retry hook script: %s", e.hook.exit_status, e.hook.script, ) # Run the post_delete_script if present. script = HookScriptRunner(self, "delete_script", "post") script.env_from_backup_info(backup) script.run() return True def backup(self, wait=False, wait_timeout=None): """ Performs a backup for the server :param bool wait: wait for all the required WAL files to be archived :param int|None wait_timeout: :return BackupInfo: the generated BackupInfo """ _logger.debug("initialising backup information") self.executor.init() backup_info = None try: # Create the BackupInfo object representing the backup backup_info = LocalBackupInfo( self.server, backup_id=datetime.datetime.now().strftime("%Y%m%dT%H%M%S") ) backup_info.set_attribute("systemid", self.server.systemid) backup_info.save() self.backup_cache_add(backup_info) output.info( "Starting backup using %s method for server %s in %s", self.mode, self.config.name, backup_info.get_basebackup_directory(), ) # Run the pre-backup-script if present. script = HookScriptRunner(self, "backup_script", "pre") script.env_from_backup_info(backup_info) script.run() # Run the pre-backup-retry-script if present. retry_script = RetryHookScriptRunner(self, "backup_retry_script", "pre") retry_script.env_from_backup_info(backup_info) retry_script.run() # Do the backup using the BackupExecutor self.executor.backup(backup_info) # Create a restore point after a backup target_name = "barman_%s" % backup_info.backup_id self.server.postgres.create_restore_point(target_name) # Free the Postgres connection self.server.postgres.close() # Compute backup size and fsync it on disk self.backup_fsync_and_set_sizes(backup_info) # Mark the backup as WAITING_FOR_WALS backup_info.set_attribute("status", BackupInfo.WAITING_FOR_WALS) # Use BaseException instead of Exception to catch events like # KeyboardInterrupt (e.g.: CTRL-C) except BaseException as e: msg_lines = force_str(e).strip().splitlines() # If the exception has no attached message use the raw # type name if len(msg_lines) == 0: msg_lines = [type(e).__name__] if backup_info: # Use only the first line of exception message # in backup_info error field backup_info.set_attribute("status", BackupInfo.FAILED) backup_info.set_attribute( "error", "failure %s (%s)" % (self.executor.current_action, msg_lines[0]), ) output.error( "Backup failed %s.\nDETAILS: %s", self.executor.current_action, "\n".join(msg_lines), ) else: output.info( "Backup end at LSN: %s (%s, %08X)", backup_info.end_xlog, backup_info.end_wal, backup_info.end_offset, ) executor = self.executor output.info( "Backup completed (start time: %s, elapsed time: %s)", self.executor.copy_start_time, human_readable_timedelta( datetime.datetime.now() - executor.copy_start_time ), ) # If requested, wait for end_wal to be archived if wait: try: self.server.wait_for_wal(backup_info.end_wal, wait_timeout) self.check_backup(backup_info) except KeyboardInterrupt: # Ignore CTRL-C pressed while waiting for WAL files output.info( "Got CTRL-C. Continuing without waiting for '%s' " "to be archived", backup_info.end_wal, ) finally: if backup_info: backup_info.save() # Make sure we are not holding any PostgreSQL connection # during the post-backup scripts self.server.close() # Run the post-backup-retry-script if present. try: retry_script = RetryHookScriptRunner( self, "backup_retry_script", "post" ) retry_script.env_from_backup_info(backup_info) retry_script.run() except AbortedRetryHookScript as e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning( "Ignoring stop request after receiving " "abort (exit code %d) from post-backup " "retry hook script: %s", e.hook.exit_status, e.hook.script, ) # Run the post-backup-script if present. script = HookScriptRunner(self, "backup_script", "post") script.env_from_backup_info(backup_info) script.run() output.result("backup", backup_info) return backup_info def recover( self, backup_info, dest, tablespaces=None, remote_command=None, **kwargs ): """ Performs a recovery of a backup :param barman.infofile.LocalBackupInfo backup_info: the backup to recover :param str dest: the destination directory :param dict[str,str]|None tablespaces: a tablespace name -> location map (for relocation) :param str|None remote_command: default None. The remote command to recover the base backup, in case of remote backup. :kwparam str|None target_tli: the target timeline :kwparam str|None target_time: the target time :kwparam str|None target_xid: the target xid :kwparam str|None target_lsn: the target LSN :kwparam str|None target_name: the target name created previously with pg_create_restore_point() function call :kwparam bool|None target_immediate: end recovery as soon as consistency is reached :kwparam bool exclusive: whether the recovery is exclusive or not :kwparam str|None target_action: default None. The recovery target action :kwparam bool|None standby_mode: the standby mode if needed """ # Archive every WAL files in the incoming directory of the server self.server.archive_wal(verbose=False) # Delegate the recovery operation to a RecoveryExecutor object executor = RecoveryExecutor(self) # Run the pre_recovery_script if present. script = HookScriptRunner(self, "recovery_script", "pre") script.env_from_recover( backup_info, dest, tablespaces, remote_command, **kwargs ) script.run() # Run the pre_recovery_retry_script if present. retry_script = RetryHookScriptRunner(self, "recovery_retry_script", "pre") retry_script.env_from_recover( backup_info, dest, tablespaces, remote_command, **kwargs ) retry_script.run() # Execute the recovery. # We use a closing context to automatically remove # any resource eventually allocated during recovery. with closing(executor): recovery_info = executor.recover( backup_info, dest, tablespaces=tablespaces, remote_command=remote_command, **kwargs ) # Run the post_recovery_retry_script if present. try: retry_script = RetryHookScriptRunner(self, "recovery_retry_script", "post") retry_script.env_from_recover( backup_info, dest, tablespaces, remote_command, **kwargs ) retry_script.run() except AbortedRetryHookScript as e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning( "Ignoring stop request after receiving " "abort (exit code %d) from post-recovery " "retry hook script: %s", e.hook.exit_status, e.hook.script, ) # Run the post-recovery-script if present. script = HookScriptRunner(self, "recovery_script", "post") script.env_from_recover( backup_info, dest, tablespaces, remote_command, **kwargs ) script.run() # Output recovery results output.result("recovery", recovery_info["results"]) def archive_wal(self, verbose=True): """ Executes WAL maintenance operations, such as archiving and compression If verbose is set to False, outputs something only if there is at least one file :param bool verbose: report even if no actions """ for archiver in self.server.archivers: archiver.archive(verbose) def cron_retention_policy(self): """ Retention policy management """ enforce_retention_policies = self.server.enforce_retention_policies retention_policy_mode = self.config.retention_policy_mode if enforce_retention_policies and retention_policy_mode == "auto": available_backups = self.get_available_backups(BackupInfo.STATUS_ALL) retention_status = self.config.retention_policy.report() for bid in sorted(retention_status.keys()): if retention_status[bid] == BackupInfo.OBSOLETE: output.info( "Enforcing retention policy: removing backup %s for " "server %s" % (bid, self.config.name) ) self.delete_backup( available_backups[bid], skip_wal_cleanup_if_standalone=False ) def delete_basebackup(self, backup): """ Delete the basebackup dir of a given backup. :param barman.infofile.LocalBackupInfo backup: the backup to delete """ backup_dir = backup.get_basebackup_directory() _logger.debug("Deleting base backup directory: %s" % backup_dir) shutil.rmtree(backup_dir) def delete_backup_data(self, backup): """ Delete the data contained in a given backup. :param barman.infofile.LocalBackupInfo backup: the backup to delete """ if backup.tablespaces: if backup.backup_version == 2: tbs_dir = backup.get_basebackup_directory() else: tbs_dir = os.path.join(backup.get_data_directory(), "pg_tblspc") for tablespace in backup.tablespaces: rm_dir = os.path.join(tbs_dir, str(tablespace.oid)) if os.path.exists(rm_dir): _logger.debug( "Deleting tablespace %s directory: %s" % (tablespace.name, rm_dir) ) shutil.rmtree(rm_dir) pg_data = backup.get_data_directory() if os.path.exists(pg_data): _logger.debug("Deleting PGDATA directory: %s" % pg_data) shutil.rmtree(pg_data) def delete_wal(self, wal_info): """ Delete a WAL segment, with the given WalFileInfo :param barman.infofile.WalFileInfo wal_info: the WAL to delete """ # Run the pre_wal_delete_script if present. script = HookScriptRunner(self, "wal_delete_script", "pre") script.env_from_wal_info(wal_info) script.run() # Run the pre_wal_delete_retry_script if present. retry_script = RetryHookScriptRunner(self, "wal_delete_retry_script", "pre") retry_script.env_from_wal_info(wal_info) retry_script.run() error = None try: os.unlink(wal_info.fullpath(self.server)) try: os.removedirs(os.path.dirname(wal_info.fullpath(self.server))) except OSError: # This is not an error condition # We always try to remove the the trailing directories, # this means that hashdir is not empty. pass except OSError as e: error = "Ignoring deletion of WAL file %s for server %s: %s" % ( wal_info.name, self.config.name, e, ) output.warning(error) # Run the post_wal_delete_retry_script if present. try: retry_script = RetryHookScriptRunner( self, "wal_delete_retry_script", "post" ) retry_script.env_from_wal_info(wal_info, None, error) retry_script.run() except AbortedRetryHookScript as e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning( "Ignoring stop request after receiving " "abort (exit code %d) from post-wal-delete " "retry hook script: %s", e.hook.exit_status, e.hook.script, ) # Run the post_wal_delete_script if present. script = HookScriptRunner(self, "wal_delete_script", "post") script.env_from_wal_info(wal_info, None, error) script.run() def check(self, check_strategy): """ This function does some checks on the server. :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("compression settings") # Check compression_setting parameter if self.config.compression and not self.compression_manager.check(): check_strategy.result(self.config.name, False) else: status = True try: self.compression_manager.get_default_compressor() except CompressionIncompatibility as field: check_strategy.result(self.config.name, "%s setting" % field, False) status = False check_strategy.result(self.config.name, status) # Failed backups check check_strategy.init_check("failed backups") failed_backups = self.get_available_backups((BackupInfo.FAILED,)) status = len(failed_backups) == 0 check_strategy.result( self.config.name, status, hint="there are %s failed backups" % ( len( failed_backups, ) ), ) check_strategy.init_check("minimum redundancy requirements") # Minimum redundancy checks no_backups = len(self.get_available_backups(status_filter=(BackupInfo.DONE,))) # Check minimum_redundancy_requirements parameter if no_backups < int(self.config.minimum_redundancy): status = False else: status = True check_strategy.result( self.config.name, status, hint="have %s backups, expected at least %s" % (no_backups, self.config.minimum_redundancy), ) # TODO: Add a check for the existence of ssh and of rsync # Execute additional checks defined by the BackupExecutor if self.executor: self.executor.check(check_strategy) def status(self): """ This function show the server status """ # get number of backups no_backups = len(self.get_available_backups(status_filter=(BackupInfo.DONE,))) output.result( "status", self.config.name, "backups_number", "No. of available backups", no_backups, ) output.result( "status", self.config.name, "first_backup", "First available backup", self.get_first_backup_id(), ) output.result( "status", self.config.name, "last_backup", "Last available backup", self.get_last_backup_id(), ) # Minimum redundancy check. if number of backups minor than minimum # redundancy, fail. if no_backups < self.config.minimum_redundancy: output.result( "status", self.config.name, "minimum_redundancy", "Minimum redundancy requirements", "FAILED (%s/%s)" % (no_backups, self.config.minimum_redundancy), ) else: output.result( "status", self.config.name, "minimum_redundancy", "Minimum redundancy requirements", "satisfied (%s/%s)" % (no_backups, self.config.minimum_redundancy), ) # Output additional status defined by the BackupExecutor if self.executor: self.executor.status() def fetch_remote_status(self): """ Build additional remote status lines defined by the BackupManager. This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ if self.executor: return self.executor.get_remote_status() else: return {} def rebuild_xlogdb(self): """ Rebuild the whole xlog database guessing it from the archive content. """ from os.path import isdir, join output.info("Rebuilding xlogdb for server %s", self.config.name) root = self.config.wals_directory comp_manager = self.compression_manager wal_count = label_count = history_count = 0 # lock the xlogdb as we are about replacing it completely with self.server.xlogdb("w") as fxlogdb: xlogdb_dir = os.path.dirname(fxlogdb.name) with tempfile.TemporaryFile(mode="w+", dir=xlogdb_dir) as fxlogdb_new: for name in sorted(os.listdir(root)): # ignore the xlogdb and its lockfile if name.startswith(self.server.XLOG_DB): continue fullname = join(root, name) if isdir(fullname): # all relevant files are in subdirectories hash_dir = fullname for wal_name in sorted(os.listdir(hash_dir)): fullname = join(hash_dir, wal_name) if isdir(fullname): _logger.warning( "unexpected directory " "rebuilding the wal database: %s", fullname, ) else: if xlog.is_wal_file(fullname): wal_count += 1 elif xlog.is_backup_file(fullname): label_count += 1 elif fullname.endswith(".tmp"): _logger.warning( "temporary file found " "rebuilding the wal database: %s", fullname, ) continue else: _logger.warning( "unexpected file " "rebuilding the wal database: %s", fullname, ) continue wal_info = comp_manager.get_wal_file_info(fullname) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: # only history files are here if xlog.is_history_file(fullname): history_count += 1 wal_info = comp_manager.get_wal_file_info(fullname) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: _logger.warning( "unexpected file rebuilding the wal database: %s", fullname, ) fxlogdb_new.flush() fxlogdb_new.seek(0) fxlogdb.seek(0) shutil.copyfileobj(fxlogdb_new, fxlogdb) fxlogdb.truncate() output.info( "Done rebuilding xlogdb for server %s " "(history: %s, backup_labels: %s, wal_file: %s)", self.config.name, history_count, label_count, wal_count, ) def get_latest_archived_wals_info(self): """ Return a dictionary of timelines associated with the WalFileInfo of the last WAL file in the archive, or None if the archive doesn't contain any WAL file. :rtype: dict[str, WalFileInfo]|None """ from os.path import isdir, join root = self.config.wals_directory comp_manager = self.compression_manager # If the WAL archive directory doesn't exists the archive is empty if not isdir(root): return dict() # Traverse all the directory in the archive in reverse order, # returning the first WAL file found timelines = {} for name in sorted(os.listdir(root), reverse=True): fullname = join(root, name) # All relevant files are in subdirectories, so # we skip any non-directory entry if isdir(fullname): # Extract the timeline. If it is not valid, skip this directory try: timeline = name[0:8] int(timeline, 16) except ValueError: continue # If this timeline already has a file, skip this directory if timeline in timelines: continue hash_dir = fullname # Inspect contained files in reverse order for wal_name in sorted(os.listdir(hash_dir), reverse=True): fullname = join(hash_dir, wal_name) # Return the first file that has the correct name if not isdir(fullname) and xlog.is_wal_file(fullname): timelines[timeline] = comp_manager.get_wal_file_info(fullname) break # Return the timeline map return timelines def remove_wal_before_backup( self, backup_info, timelines_to_protect=None, wal_ranges_to_protect=[] ): """ Remove WAL files which have been archived before the start of the provided backup. If no backup_info is provided delete all available WAL files If timelines_to_protect list is passed, never remove a wal in one of these timelines. :param BackupInfo|None backup_info: the backup information structure :param set timelines_to_protect: optional list of timelines to protect :param list wal_ranges_to_protect: optional list of `(begin_wal, end_wal)` tuples which define inclusive ranges of WALs which must not be deleted. :return list: a list of removed WAL files """ removed = [] with self.server.xlogdb("r+") as fxlogdb: xlogdb_dir = os.path.dirname(fxlogdb.name) with tempfile.TemporaryFile(mode="w+", dir=xlogdb_dir) as fxlogdb_new: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if not xlog.is_any_xlog_file(wal_info.name): output.error( "invalid WAL segment name %r\n" 'HINT: Please run "barman rebuild-xlogdb %s" ' "to solve this issue", wal_info.name, self.config.name, ) continue # Keeps the WAL segment if it is a history file keep = xlog.is_history_file(wal_info.name) # Keeps the WAL segment if its timeline is in # `timelines_to_protect` if timelines_to_protect: tli, _, _ = xlog.decode_segment_name(wal_info.name) keep |= tli in timelines_to_protect # Keeps the WAL segment if it is within a protected range if xlog.is_backup_file(wal_info.name): # If we have a .backup file then truncate the name for the # range check wal_name = wal_info.name[:24] else: wal_name = wal_info.name for begin_wal, end_wal in wal_ranges_to_protect: keep |= wal_name >= begin_wal and wal_name <= end_wal # Keeps the WAL segment if it is a newer # than the given backup (the first available) if backup_info and backup_info.begin_wal is not None: keep |= wal_info.name >= backup_info.begin_wal # If the file has to be kept write it in the new xlogdb # otherwise delete it and record it in the removed list if keep: fxlogdb_new.write(wal_info.to_xlogdb_line()) else: self.delete_wal(wal_info) removed.append(wal_info.name) fxlogdb_new.flush() fxlogdb_new.seek(0) fxlogdb.seek(0) shutil.copyfileobj(fxlogdb_new, fxlogdb) fxlogdb.truncate() return removed def validate_last_backup_maximum_age(self, last_backup_maximum_age): """ Evaluate the age of the last available backup in a catalogue. If the last backup is older than the specified time interval (age), the function returns False. If within the requested age interval, the function returns True. :param timedate.timedelta last_backup_maximum_age: time interval representing the maximum allowed age for the last backup in a server catalogue :return tuple: a tuple containing the boolean result of the check and auxiliary information about the last backup current age """ # Get the ID of the last available backup backup_id = self.get_last_backup_id() if backup_id: # Get the backup object backup = LocalBackupInfo(self.server, backup_id=backup_id) now = datetime.datetime.now(dateutil.tz.tzlocal()) # Evaluate the point of validity validity_time = now - last_backup_maximum_age # Pretty print of a time interval (age) msg = human_readable_timedelta(now - backup.end_time) # If the backup end time is older than the point of validity, # return False, otherwise return true if backup.end_time < validity_time: return False, msg else: return True, msg else: # If no backup is available return false return False, "No available backups" def validate_last_backup_min_size(self, last_backup_minimum_size): """ Evaluate the size of the last available backup in a catalogue. If the last backup is smaller than the specified size the function returns False. Otherwise, the function returns True. :param last_backup_minimum_size: size in bytes representing the maximum allowed age for the last backup in a server catalogue :return tuple: a tuple containing the boolean result of the check and auxiliary information about the last backup current age """ # Get the ID of the last available backup backup_id = self.get_last_backup_id() if backup_id: # Get the backup object backup = LocalBackupInfo(self.server, backup_id=backup_id) if backup.size < last_backup_minimum_size: return False, backup.size else: return True, backup.size else: # If no backup is available return false return False, 0 def backup_fsync_and_set_sizes(self, backup_info): """ Fsync all files in a backup and set the actual size on disk of a backup. Also evaluate the deduplication ratio and the deduplicated size if applicable. :param LocalBackupInfo backup_info: the backup to update """ # Calculate the base backup size self.executor.current_action = "calculating backup size" _logger.debug(self.executor.current_action) backup_size = 0 deduplicated_size = 0 backup_dest = backup_info.get_basebackup_directory() for dir_path, _, file_names in os.walk(backup_dest): # execute fsync() on the containing directory fsync_dir(dir_path) # execute fsync() on all the contained files for filename in file_names: file_path = os.path.join(dir_path, filename) file_stat = fsync_file(file_path) backup_size += file_stat.st_size # Excludes hard links from real backup size if file_stat.st_nlink == 1: deduplicated_size += file_stat.st_size # Save size into BackupInfo object backup_info.set_attribute("size", backup_size) backup_info.set_attribute("deduplicated_size", deduplicated_size) if backup_info.size > 0: deduplication_ratio = 1 - ( float(backup_info.deduplicated_size) / backup_info.size ) else: deduplication_ratio = 0 if self.config.reuse_backup == "link": output.info( "Backup size: %s. Actual size on disk: %s" " (-%s deduplication ratio)." % ( pretty_size(backup_info.size), pretty_size(backup_info.deduplicated_size), "{percent:.2%}".format(percent=deduplication_ratio), ) ) else: output.info("Backup size: %s" % pretty_size(backup_info.size)) def check_backup(self, backup_info): """ Make sure that all the required WAL files to check the consistency of a physical backup (that is, from the beginning to the end of the full backup) are correctly archived. This command is automatically invoked by the cron command and at the end of every backup operation. :param backup_info: the target backup """ # Gather the list of the latest archived wals timelines = self.get_latest_archived_wals_info() # Get the basic info for the backup begin_wal = backup_info.begin_wal end_wal = backup_info.end_wal timeline = begin_wal[:8] # Case 0: there is nothing to check for this backup, as it is # currently in progress if not end_wal: return # Case 1: Barman still doesn't know about the timeline the backup # started with. We still haven't archived any WAL corresponding # to the backup, so we can't proceed with checking the existence # of the required WAL files if not timelines or timeline not in timelines: backup_info.status = BackupInfo.WAITING_FOR_WALS backup_info.save() return # Find the most recent archived WAL for this server in the timeline # where the backup was taken last_archived_wal = timelines[timeline].name # Case 2: the most recent WAL file archived is older than the # start of the backup. We must wait for the archiver to receive # and/or process the WAL files. if last_archived_wal < begin_wal: backup_info.status = BackupInfo.WAITING_FOR_WALS backup_info.save() return # Check the intersection between the required WALs and the archived # ones. They should all exist segments = backup_info.get_required_wal_segments() missing_wal = None for wal in segments: # Stop checking if we reach the last archived wal if wal > last_archived_wal: break wal_full_path = self.server.get_wal_full_path(wal) if not os.path.exists(wal_full_path): missing_wal = wal break if missing_wal: # Case 3: the most recent WAL file archived is more recent than # the one corresponding to the start of a backup. If WAL # file is missing, then we can't recover from the backup so we # must mark the backup as FAILED. # TODO: Verify if the error field is the right place # to store the error message backup_info.error = ( "At least one WAL file is missing. " "The first missing WAL file is %s" % missing_wal ) backup_info.status = BackupInfo.FAILED backup_info.save() return if end_wal <= last_archived_wal: # Case 4: if the most recent WAL file archived is more recent or # equal than the one corresponding to the end of the backup and # every WAL that will be required by the recovery is available, # we can mark the backup as DONE. backup_info.status = BackupInfo.DONE else: # Case 5: if the most recent WAL file archived is older than # the one corresponding to the end of the backup but # all the WAL files until that point are present. backup_info.status = BackupInfo.WAITING_FOR_WALS backup_info.save() def verify_backup(self, backup_info): """ This function should check if pg_verifybackup is installed and run it against backup path should test if pg_verifybackup is installed locally :param backup_info: barman.infofile.LocalBackupInfo instance """ output.info("Calling pg_verifybackup") # Test pg_verifybackup existence version_info = PgVerifyBackup.get_version_info(self.server.path) if version_info.get("full_path", None) is None: output.error("pg_verifybackup not found") return pg_verifybackup = PgVerifyBackup( data_path=backup_info.get_data_directory(), command=version_info["full_path"], version=version_info["full_version"], ) try: pg_verifybackup() except CommandFailedException as e: output.error( "verify backup failure on directory '%s'" % backup_info.get_data_directory() ) output.error(e.args[0]["err"]) return output.info(pg_verifybackup.get_output()[0].strip()) barman-2.18/barman/postgres_plumbing.py0000644000621200062120000000665314172556763016467 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ PostgreSQL Plumbing module This module contain low-level PostgreSQL related information, such as the on-disk structure and the name of the core functions in different PostgreSQL versions. """ PGDATA_EXCLUDE_LIST = [ # Exclude log files (pg_log was renamed to log in Postgres v10) "/pg_log/*", "/log/*", # Exclude WAL files (pg_xlog was renamed to pg_wal in Postgres v10) "/pg_xlog/*", "/pg_wal/*", # We handle this on a different step of the copy "/global/pg_control", ] EXCLUDE_LIST = [ # Files: see excludeFiles const in PostgreSQL source "pgsql_tmp*", "postgresql.auto.conf.tmp", "current_logfiles.tmp", "pg_internal.init", "postmaster.pid", "postmaster.opts", "recovery.conf", "standby.signal", # Directories: see excludeDirContents const in PostgreSQL source "pg_dynshmem/*", "pg_notify/*", "pg_replslot/*", "pg_serial/*", "pg_stat_tmp/*", "pg_snapshots/*", "pg_subtrans/*", ] def function_name_map(server_version): """ Return a map with function and directory names according to the current PostgreSQL version. Each entry has the `current` name as key and the name for the specific version as value. :param number|None server_version: Version of PostgreSQL as returned by psycopg2 (i.e. 90301 represent PostgreSQL 9.3.1). If the version is None, default to the latest PostgreSQL version :rtype: dict[str] """ if server_version and server_version < 100000: return { "pg_switch_wal": "pg_switch_xlog", "pg_walfile_name": "pg_xlogfile_name", "pg_wal": "pg_xlog", "pg_walfile_name_offset": "pg_xlogfile_name_offset", "pg_last_wal_replay_lsn": "pg_last_xlog_replay_location", "pg_current_wal_lsn": "pg_current_xlog_location", "pg_current_wal_insert_lsn": "pg_current_xlog_insert_location", "pg_last_wal_receive_lsn": "pg_last_xlog_receive_location", "sent_lsn": "sent_location", "write_lsn": "write_location", "flush_lsn": "flush_location", "replay_lsn": "replay_location", } return { "pg_switch_wal": "pg_switch_wal", "pg_walfile_name": "pg_walfile_name", "pg_wal": "pg_wal", "pg_walfile_name_offset": "pg_walfile_name_offset", "pg_last_wal_replay_lsn": "pg_last_wal_replay_lsn", "pg_current_wal_lsn": "pg_current_wal_lsn", "pg_current_wal_insert_lsn": "pg_current_wal_insert_lsn", "pg_last_wal_receive_lsn": "pg_last_wal_receive_lsn", "sent_lsn": "sent_lsn", "write_lsn": "write_lsn", "flush_lsn": "flush_lsn", "replay_lsn": "replay_lsn", } barman-2.18/barman/remote_status.py0000644000621200062120000000441314172556763015612 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ Remote Status module A Remote Status class implements a standard interface for retrieving and caching the results of a remote component (such as Postgres server, WAL archiver, etc.). It follows the Mixin pattern. """ from abc import ABCMeta, abstractmethod from barman.utils import with_metaclass class RemoteStatusMixin(with_metaclass(ABCMeta, object)): """ Abstract base class that implements remote status capabilities following the Mixin pattern. """ def __init__(self, *args, **kwargs): """ Base constructor (Mixin pattern) """ self._remote_status = None super(RemoteStatusMixin, self).__init__(*args, **kwargs) @abstractmethod def fetch_remote_status(self): """ Retrieve status information from the remote component The implementation of this method must not raise any exception in case of errors, but should set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ def get_remote_status(self): """ Get the status of the remote component This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ if self._remote_status is None: self._remote_status = self.fetch_remote_status() return self._remote_status def reset_remote_status(self): """ Reset the cached result """ self._remote_status = None barman-2.18/barman/exceptions.py0000644000621200062120000002114114172556763015072 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . class BarmanException(Exception): """ The base class of all other barman exceptions """ class ConfigurationException(BarmanException): """ Base exception for all the Configuration errors """ class CommandException(BarmanException): """ Base exception for all the errors related to the execution of a Command. """ class CompressionException(BarmanException): """ Base exception for all the errors related to the execution of a compression action. """ class PostgresException(BarmanException): """ Base exception for all the errors related to PostgreSQL. """ class BackupException(BarmanException): """ Base exception for all the errors related to the execution of a backup. """ class WALFileException(BarmanException): """ Base exception for all the errors related to WAL files. """ def __str__(self): """ Human readable string representation """ return "%s:%s" % (self.__class__.__name__, self.args[0] if self.args else None) class HookScriptException(BarmanException): """ Base exception for all the errors related to Hook Script execution. """ class LockFileException(BarmanException): """ Base exception for lock related errors """ class SyncException(BarmanException): """ Base Exception for synchronisation functions """ class DuplicateWalFile(WALFileException): """ A duplicate WAL file has been found """ class MatchingDuplicateWalFile(DuplicateWalFile): """ A duplicate WAL file has been found, but it's identical to the one we already have. """ class SshCommandException(CommandException): """ Error parsing ssh_command parameter """ class UnknownBackupIdException(BackupException): """ The searched backup_id doesn't exists """ class BackupInfoBadInitialisation(BackupException): """ Exception for a bad initialization error """ class SyncError(SyncException): """ Synchronisation error """ class SyncNothingToDo(SyncException): """ Nothing to do during sync operations """ class SyncToBeDeleted(SyncException): """ An incomplete backup is to be deleted """ class CommandFailedException(CommandException): """ Exception representing a failed command """ class CommandMaxRetryExceeded(CommandFailedException): """ A command with retry_times > 0 has exceeded the number of available retry """ class RsyncListFilesFailure(CommandException): """ Failure parsing the output of a "rsync --list-only" command """ class DataTransferFailure(CommandException): """ Used to pass failure details from a data transfer Command """ @classmethod def from_command_error(cls, cmd, e, msg): """ This method build a DataTransferFailure exception and report the provided message to the user (both console and log file) along with the output of the failed command. :param str cmd: The command that failed the transfer :param CommandFailedException e: The exception we are handling :param str msg: a descriptive message on what we are trying to do :return DataTransferFailure: will contain the message provided in msg """ try: details = msg details += "\n%s error:\n" % cmd details += e.args[0]["out"] details += e.args[0]["err"] return cls(details) except (TypeError, NameError): # If it is not a dictionary just convert it to a string from barman.utils import force_str return cls(force_str(e.args)) class CompressionIncompatibility(CompressionException): """ Exception for compression incompatibility """ class FsOperationFailed(CommandException): """ Exception which represents a failed execution of a command on FS """ class LockFileBusy(LockFileException): """ Raised when a lock file is not free """ class LockFilePermissionDenied(LockFileException): """ Raised when a lock file is not accessible """ class LockFileParsingError(LockFileException): """ Raised when the content of the lockfile is unexpected """ class ConninfoException(ConfigurationException): """ Error for missing or failed parsing of the conninfo parameter (DSN) """ class PostgresConnectionError(PostgresException): """ Error connecting to the PostgreSQL server """ def __str__(self): # Returns the first line if self.args and self.args[0]: from barman.utils import force_str return force_str(self.args[0]).splitlines()[0].strip() else: return "" class PostgresAppNameError(PostgresConnectionError): """ Error setting application name with PostgreSQL server """ class PostgresSuperuserRequired(PostgresException): """ Superuser access is required """ class BackupFunctionsAccessRequired(PostgresException): """ Superuser or access to backup functions is required """ class PostgresIsInRecovery(PostgresException): """ PostgreSQL is in recovery, so no write operations are allowed """ class PostgresUnsupportedFeature(PostgresException): """ Unsupported feature """ class PostgresDuplicateReplicationSlot(PostgresException): """ The creation of a physical replication slot failed because the slot already exists """ class PostgresReplicationSlotsFull(PostgresException): """ The creation of a physical replication slot failed because the all the replication slots have been taken """ class PostgresReplicationSlotInUse(PostgresException): """ The drop of a physical replication slot failed because the replication slots is in use """ class PostgresInvalidReplicationSlot(PostgresException): """ Exception representing a failure during the deletion of a non existent replication slot """ class TimeoutError(CommandException): """ A timeout occurred. """ class ArchiverFailure(WALFileException): """ Exception representing a failure during the execution of the archive process """ class BadXlogSegmentName(WALFileException): """ Exception for a bad xlog name """ class BadHistoryFileContents(WALFileException): """ Exception for a corrupted history file """ class AbortedRetryHookScript(HookScriptException): """ Exception for handling abort of retry hook scripts """ def __init__(self, hook): """ Initialise the exception with hook script info """ self.hook = hook def __str__(self): """ String representation """ return "Abort '%s_%s' retry hook script (%s, exit code: %d)" % ( self.hook.phase, self.hook.name, self.hook.script, self.hook.exit_status, ) class RecoveryException(BarmanException): """ Exception for a recovery error """ class RecoveryTargetActionException(RecoveryException): """ Exception for a wrong recovery target action """ class RecoveryStandbyModeException(RecoveryException): """ Exception for a wrong recovery standby mode """ class RecoveryInvalidTargetException(RecoveryException): """ Exception for a wrong recovery target """ class UnrecoverableHookScriptError(BarmanException): """ Exception for hook script errors which mean the script should not be retried. """ class ArchivalBackupException(BarmanException): """ Exception for errors concerning archival backups. """ class WalArchiveContentError(BarmanException): """ Exception raised when unexpected content is detected in the WAL archive. """ class InvalidRetentionPolicy(BarmanException): """ Exception raised when a retention policy cannot be parsed. """ barman-2.18/barman/wal_archiver.py0000644000621200062120000012276314172556763015373 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see import collections import datetime import errno import filecmp import logging import os import shutil from abc import ABCMeta, abstractmethod from glob import glob from distutils.version import LooseVersion as Version from barman import output, xlog from barman.command_wrappers import CommandFailedException, PgReceiveXlog from barman.exceptions import ( AbortedRetryHookScript, ArchiverFailure, DuplicateWalFile, MatchingDuplicateWalFile, ) from barman.hooks import HookScriptRunner, RetryHookScriptRunner from barman.infofile import WalFileInfo from barman.remote_status import RemoteStatusMixin from barman.utils import fsync_dir, fsync_file, mkpath, with_metaclass from barman.xlog import is_partial_file _logger = logging.getLogger(__name__) class WalArchiverQueue(list): def __init__(self, items, errors=None, skip=None, batch_size=0): """ A WalArchiverQueue is a list of WalFileInfo which has two extra attribute list: * errors: containing a list of unrecognized files * skip: containing a list of skipped files. It also stores batch run size information in case it is requested by configuration, in order to limit the number of WAL files that are processed in a single run of the archive-wal command. :param items: iterable from which initialize the list :param batch_size: size of the current batch run (0=unlimited) :param errors: an optional list of unrecognized files :param skip: an optional list of skipped files """ super(WalArchiverQueue, self).__init__(items) self.skip = [] self.errors = [] if skip is not None: self.skip = skip if errors is not None: self.errors = errors # Normalises batch run size if batch_size > 0: self.batch_size = batch_size else: self.batch_size = 0 @property def size(self): """ Number of valid WAL segments waiting to be processed (in total) :return int: total number of valid WAL files """ return len(self) @property def run_size(self): """ Number of valid WAL files to be processed in this run - takes in consideration the batch size :return int: number of valid WAL files for this batch run """ # In case a batch size has been explicitly specified # (i.e. batch_size > 0), returns the minimum number between # batch size and the queue size. Otherwise, simply # returns the total queue size (unlimited batch size). if self.batch_size > 0: return min(self.size, self.batch_size) return self.size class WalArchiver(with_metaclass(ABCMeta, RemoteStatusMixin)): """ Base class for WAL archiver objects """ def __init__(self, backup_manager, name): """ Base class init method. :param backup_manager: The backup manager :param name: The name of this archiver :return: """ self.backup_manager = backup_manager self.server = backup_manager.server self.config = backup_manager.config self.name = name super(WalArchiver, self).__init__() def receive_wal(self, reset=False): """ Manage reception of WAL files. Does nothing by default. Some archiver classes, like the StreamingWalArchiver, have a full implementation. :param bool reset: When set, resets the status of receive-wal :raise ArchiverFailure: when something goes wrong """ def archive(self, verbose=True): """ Archive WAL files, discarding duplicates or those that are not valid. :param boolean verbose: Flag for verbose output """ compressor = self.backup_manager.compression_manager.get_default_compressor() stamp = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ") processed = 0 header = "Processing xlog segments from %s for %s" % ( self.name, self.config.name, ) # Get the next batch of WAL files to be processed batch = self.get_next_batch() # Analyse the batch and properly log the information if batch.size: if batch.size > batch.run_size: # Batch mode enabled _logger.info( "Found %s xlog segments from %s for %s." " Archive a batch of %s segments in this run.", batch.size, self.name, self.config.name, batch.run_size, ) header += " (batch size: %s)" % batch.run_size else: # Single run mode (traditional) _logger.info( "Found %s xlog segments from %s for %s." " Archive all segments in one run.", batch.size, self.name, self.config.name, ) else: _logger.info( "No xlog segments found from %s for %s.", self.name, self.config.name ) # Print the header (verbose mode) if verbose: output.info(header, log=False) # Loop through all available WAL files for wal_info in batch: # Print the header (non verbose mode) if not processed and not verbose: output.info(header, log=False) # Exit when archive batch size is reached if processed >= batch.run_size: _logger.debug( "Batch size reached (%s) - Exit %s process for %s", batch.batch_size, self.name, self.config.name, ) break processed += 1 # Report to the user the WAL file we are archiving output.info("\t%s", wal_info.name, log=False) _logger.info( "Archiving segment %s of %s from %s: %s/%s", processed, batch.run_size, self.name, self.config.name, wal_info.name, ) # Archive the WAL file try: self.archive_wal(compressor, wal_info) except MatchingDuplicateWalFile: # We already have this file. Simply unlink the file. os.unlink(wal_info.orig_filename) continue except DuplicateWalFile: output.info( "\tError: %s is already present in server %s. " "File moved to errors directory.", wal_info.name, self.config.name, ) error_dst = os.path.join( self.config.errors_directory, "%s.%s.duplicate" % (wal_info.name, stamp), ) # TODO: cover corner case of duplication (unlikely, # but theoretically possible) shutil.move(wal_info.orig_filename, error_dst) continue except AbortedRetryHookScript as e: _logger.warning( "Archiving of %s/%s aborted by " "pre_archive_retry_script." "Reason: %s" % (self.config.name, wal_info.name, e) ) return if processed: _logger.debug( "Archived %s out of %s xlog segments from %s for %s", processed, batch.size, self.name, self.config.name, ) elif verbose: output.info("\tno file found", log=False) if batch.errors: output.info( "Some unknown objects have been found while " "processing xlog segments for %s. " "Objects moved to errors directory:", self.config.name, log=False, ) # Log unexpected files _logger.warning( "Archiver is about to move %s unexpected file(s) " "to errors directory for %s from %s", len(batch.errors), self.config.name, self.name, ) for error in batch.errors: basename = os.path.basename(error) output.info("\t%s", basename, log=False) # Print informative log line. _logger.warning( "Moving unexpected file for %s from %s: %s", self.config.name, self.name, basename, ) error_dst = os.path.join( self.config.errors_directory, "%s.%s.unknown" % (basename, stamp) ) try: shutil.move(error, error_dst) except IOError as e: if e.errno == errno.ENOENT: _logger.warning("%s not found" % error) def archive_wal(self, compressor, wal_info): """ Archive a WAL segment and update the wal_info object :param compressor: the compressor for the file (if any) :param WalFileInfo wal_info: the WAL file is being processed """ src_file = wal_info.orig_filename src_dir = os.path.dirname(src_file) dst_file = wal_info.fullpath(self.server) tmp_file = dst_file + ".tmp" dst_dir = os.path.dirname(dst_file) comp_manager = self.backup_manager.compression_manager error = None try: # Run the pre_archive_script if present. script = HookScriptRunner(self.backup_manager, "archive_script", "pre") script.env_from_wal_info(wal_info, src_file) script.run() # Run the pre_archive_retry_script if present. retry_script = RetryHookScriptRunner( self.backup_manager, "archive_retry_script", "pre" ) retry_script.env_from_wal_info(wal_info, src_file) retry_script.run() # Check if destination already exists if os.path.exists(dst_file): src_uncompressed = src_file dst_uncompressed = dst_file dst_info = comp_manager.get_wal_file_info(dst_file) try: if dst_info.compression is not None: dst_uncompressed = dst_file + ".uncompressed" comp_manager.get_compressor(dst_info.compression).decompress( dst_file, dst_uncompressed ) if wal_info.compression: src_uncompressed = src_file + ".uncompressed" comp_manager.get_compressor(wal_info.compression).decompress( src_file, src_uncompressed ) # Directly compare files. # When the files are identical # raise a MatchingDuplicateWalFile exception, # otherwise raise a DuplicateWalFile exception. if filecmp.cmp(dst_uncompressed, src_uncompressed): raise MatchingDuplicateWalFile(wal_info) else: raise DuplicateWalFile(wal_info) finally: if src_uncompressed != src_file: os.unlink(src_uncompressed) if dst_uncompressed != dst_file: os.unlink(dst_uncompressed) mkpath(dst_dir) # Compress the file only if not already compressed if compressor and not wal_info.compression: compressor.compress(src_file, tmp_file) # Perform the real filesystem operation with the xlogdb lock taken. # This makes the operation atomic from the xlogdb file POV with self.server.xlogdb("a") as fxlogdb: if compressor and not wal_info.compression: shutil.copystat(src_file, tmp_file) os.rename(tmp_file, dst_file) os.unlink(src_file) # Update wal_info stat = os.stat(dst_file) wal_info.size = stat.st_size wal_info.compression = compressor.compression else: # Try to atomically rename the file. If successful, # the renaming will be an atomic operation # (this is a POSIX requirement). try: os.rename(src_file, dst_file) except OSError: # Source and destination are probably on different # filesystems shutil.copy2(src_file, tmp_file) os.rename(tmp_file, dst_file) os.unlink(src_file) # At this point the original file has been removed wal_info.orig_filename = None # Execute fsync() on the archived WAL file fsync_file(dst_file) # Execute fsync() on the archived WAL containing directory fsync_dir(dst_dir) # Execute fsync() also on the incoming directory fsync_dir(src_dir) # Updates the information of the WAL archive with # the latest segments fxlogdb.write(wal_info.to_xlogdb_line()) # flush and fsync for every line fxlogdb.flush() os.fsync(fxlogdb.fileno()) except Exception as e: # In case of failure save the exception for the post scripts error = e raise # Ensure the execution of the post_archive_retry_script and # the post_archive_script finally: # Run the post_archive_retry_script if present. try: retry_script = RetryHookScriptRunner( self, "archive_retry_script", "post" ) retry_script.env_from_wal_info(wal_info, dst_file, error) retry_script.run() except AbortedRetryHookScript as e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning( "Ignoring stop request after receiving " "abort (exit code %d) from post-archive " "retry hook script: %s", e.hook.exit_status, e.hook.script, ) # Run the post_archive_script if present. script = HookScriptRunner(self, "archive_script", "post", error) script.env_from_wal_info(wal_info, dst_file) script.run() @abstractmethod def get_next_batch(self): """ Return a WalArchiverQueue containing the WAL files to be archived. :rtype: WalArchiverQueue """ @abstractmethod def check(self, check_strategy): """ Perform specific checks for the archiver - invoked by server.check_postgres :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ @abstractmethod def status(self): """ Set additional status info - invoked by Server.status() """ @staticmethod def summarise_error_files(error_files): """ Summarise a error files list :param list[str] error_files: Error files list to summarise :return str: A summary, None if there are no error files """ if not error_files: return None # The default value for this dictionary will be 0 counters = collections.defaultdict(int) # Count the file types for name in error_files: if name.endswith(".error"): counters["not relevant"] += 1 elif name.endswith(".duplicate"): counters["duplicates"] += 1 elif name.endswith(".unknown"): counters["unknown"] += 1 else: counters["unknown failure"] += 1 # Return a summary list of the form: "item a: 2, item b: 5" return ", ".join("%s: %s" % entry for entry in counters.items()) class FileWalArchiver(WalArchiver): """ Manager of file-based WAL archiving operations (aka 'log shipping'). """ def __init__(self, backup_manager): super(FileWalArchiver, self).__init__(backup_manager, "file archival") def fetch_remote_status(self): """ Returns the status of the FileWalArchiver. This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ result = dict.fromkeys(["archive_mode", "archive_command"], None) postgres = self.server.postgres # If Postgres is not available we cannot detect anything if not postgres: return result # Query the database for 'archive_mode' and 'archive_command' result["archive_mode"] = postgres.get_setting("archive_mode") result["archive_command"] = postgres.get_setting("archive_command") # Add pg_stat_archiver statistics if the view is supported pg_stat_archiver = postgres.get_archiver_stats() if pg_stat_archiver is not None: result.update(pg_stat_archiver) return result def get_next_batch(self): """ Returns the next batch of WAL files that have been archived through a PostgreSQL's 'archive_command' (in the 'incoming' directory) :return: WalArchiverQueue: list of WAL files """ # Get the batch size from configuration (0 = unlimited) batch_size = self.config.archiver_batch_size # List and sort all files in the incoming directory # IMPORTANT: the list is sorted, and this allows us to know that the # WAL stream we have is monotonically increasing. That allows us to # verify that a backup has all the WALs required for the restore. file_names = glob(os.path.join(self.config.incoming_wals_directory, "*")) file_names.sort() # Process anything that looks like a valid WAL file. Anything # else is treated like an error/anomaly files = [] errors = [] for file_name in file_names: # Ignore temporary files if file_name.endswith(".tmp"): continue if xlog.is_any_xlog_file(file_name) and os.path.isfile(file_name): files.append(file_name) else: errors.append(file_name) # Build the list of WalFileInfo wal_files = [ WalFileInfo.from_file(f, self.backup_manager.compression_manager) for f in files ] return WalArchiverQueue(wal_files, batch_size=batch_size, errors=errors) def check(self, check_strategy): """ Perform additional checks for FileWalArchiver - invoked by server.check_postgres :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("archive_mode") remote_status = self.get_remote_status() # If archive_mode is None, there are issues connecting to PostgreSQL if remote_status["archive_mode"] is None: return # Check archive_mode parameter: must be on if remote_status["archive_mode"] in ("on", "always"): check_strategy.result(self.config.name, True) else: msg = "please set it to 'on'" if self.server.postgres.server_version >= 90500: msg += " or 'always'" check_strategy.result(self.config.name, False, hint=msg) check_strategy.init_check("archive_command") if ( remote_status["archive_command"] and remote_status["archive_command"] != "(disabled)" ): check_strategy.result(self.config.name, True, check="archive_command") # Report if the archiving process works without issues. # Skip if the archive_command check fails # It can be None if PostgreSQL is older than 9.4 if remote_status.get("is_archiving") is not None: check_strategy.result( self.config.name, remote_status["is_archiving"], check="continuous archiving", ) else: check_strategy.result( self.config.name, False, hint="please set it accordingly to documentation", ) def status(self): """ Set additional status info - invoked by Server.status() """ # We need to get full info here from the server remote_status = self.server.get_remote_status() # If archive_mode is None, there are issues connecting to PostgreSQL if remote_status["archive_mode"] is None: return output.result( "status", self.config.name, "archive_command", "PostgreSQL 'archive_command' setting", remote_status["archive_command"] or "FAILED (please set it accordingly to documentation)", ) last_wal = remote_status.get("last_archived_wal") # If PostgreSQL is >= 9.4 we have the last_archived_time if last_wal and remote_status.get("last_archived_time"): last_wal += ", at %s" % (remote_status["last_archived_time"].ctime()) output.result( "status", self.config.name, "last_archived_wal", "Last archived WAL", last_wal or "No WAL segment shipped yet", ) # Set output for WAL archive failures (PostgreSQL >= 9.4) if remote_status.get("failed_count") is not None: remote_fail = str(remote_status["failed_count"]) if int(remote_status["failed_count"]) > 0: remote_fail += " (%s at %s)" % ( remote_status["last_failed_wal"], remote_status["last_failed_time"].ctime(), ) output.result( "status", self.config.name, "failed_count", "Failures of WAL archiver", remote_fail, ) # Add hourly archive rate if available (PostgreSQL >= 9.4) and > 0 if remote_status.get("current_archived_wals_per_second"): output.result( "status", self.config.name, "server_archived_wals_per_hour", "Server WAL archiving rate", "%0.2f/hour" % (3600 * remote_status["current_archived_wals_per_second"]), ) class StreamingWalArchiver(WalArchiver): """ Object used for the management of streaming WAL archive operation. """ def __init__(self, backup_manager): super(StreamingWalArchiver, self).__init__(backup_manager, "streaming") def fetch_remote_status(self): """ Execute checks for replication-based wal archiving This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ remote_status = dict.fromkeys( ( "pg_receivexlog_compatible", "pg_receivexlog_installed", "pg_receivexlog_path", "pg_receivexlog_supports_slots", "pg_receivexlog_synchronous", "pg_receivexlog_version", ), None, ) # Test pg_receivexlog existence version_info = PgReceiveXlog.get_version_info(self.server.path) if version_info["full_path"]: remote_status["pg_receivexlog_installed"] = True remote_status["pg_receivexlog_path"] = version_info["full_path"] remote_status["pg_receivexlog_version"] = version_info["full_version"] pgreceivexlog_version = version_info["major_version"] else: remote_status["pg_receivexlog_installed"] = False return remote_status # Retrieve the PostgreSQL version pg_version = None if self.server.streaming is not None: pg_version = self.server.streaming.server_major_version # If one of the version is unknown we cannot compare them if pgreceivexlog_version is None or pg_version is None: return remote_status # pg_version is not None so transform into a Version object # for easier comparison between versions pg_version = Version(pg_version) # Set conservative default values (False) for modern features remote_status["pg_receivexlog_compatible"] = False remote_status["pg_receivexlog_supports_slots"] = False remote_status["pg_receivexlog_synchronous"] = False # pg_receivexlog 9.2 is compatible only with PostgreSQL 9.2. if "9.2" == pg_version == pgreceivexlog_version: remote_status["pg_receivexlog_compatible"] = True # other versions are compatible with lesser versions of PostgreSQL # WARNING: The development versions of `pg_receivexlog` are considered # higher than the stable versions here, but this is not an issue # because it accepts everything that is less than # the `pg_receivexlog` version(e.g. '9.6' is less than '9.6devel') elif "9.2" < pg_version <= pgreceivexlog_version: # At least PostgreSQL 9.3 is required here remote_status["pg_receivexlog_compatible"] = True # replication slots are supported starting from version 9.4 if "9.4" <= pg_version <= pgreceivexlog_version: remote_status["pg_receivexlog_supports_slots"] = True # Synchronous WAL streaming requires replication slots # and pg_receivexlog >= 9.5 if "9.4" <= pg_version and "9.5" <= pgreceivexlog_version: remote_status["pg_receivexlog_synchronous"] = self._is_synchronous() return remote_status def receive_wal(self, reset=False): """ Creates a PgReceiveXlog object and issues the pg_receivexlog command for a specific server :param bool reset: When set reset the status of receive-wal :raise ArchiverFailure: when something goes wrong """ # Ensure the presence of the destination directory mkpath(self.config.streaming_wals_directory) # Execute basic sanity checks on PostgreSQL connection streaming_status = self.server.streaming.get_remote_status() if streaming_status["streaming_supported"] is None: raise ArchiverFailure( "failed opening the PostgreSQL streaming connection " "for server %s" % (self.config.name) ) elif not streaming_status["streaming_supported"]: raise ArchiverFailure( "PostgreSQL version too old (%s < 9.2)" % self.server.streaming.server_txt_version ) # Execute basic sanity checks on pg_receivexlog command = "pg_receivewal" if self.server.streaming.server_version < 100000: command = "pg_receivexlog" remote_status = self.get_remote_status() if not remote_status["pg_receivexlog_installed"]: raise ArchiverFailure("%s not present in $PATH" % command) if not remote_status["pg_receivexlog_compatible"]: raise ArchiverFailure( "%s version not compatible with PostgreSQL server version" % command ) # Execute sanity check on replication slot usage postgres_status = self.server.postgres.get_remote_status() if self.config.slot_name: # Check if slots are supported if not remote_status["pg_receivexlog_supports_slots"]: raise ArchiverFailure( "Physical replication slot not supported by %s " "(9.4 or higher is required)" % self.server.streaming.server_txt_version ) # Check if the required slot exists if postgres_status["replication_slot"] is None: if self.config.create_slot == "auto": if not reset: output.info( "Creating replication slot '%s'", self.config.slot_name ) self.server.create_physical_repslot() else: raise ArchiverFailure( "replication slot '%s' doesn't exist. " "Please execute " "'barman receive-wal --create-slot %s'" % (self.config.slot_name, self.config.name) ) # Check if the required slot is available elif postgres_status["replication_slot"].active: raise ArchiverFailure( "replication slot '%s' is already in use" % (self.config.slot_name,) ) # Check if is a reset request if reset: self._reset_streaming_status(postgres_status, streaming_status) return # Check the size of the .partial WAL file and truncate it if needed self._truncate_partial_file_if_needed(postgres_status["xlog_segment_size"]) # Make sure we are not wasting precious PostgreSQL resources self.server.close() _logger.info("Activating WAL archiving through streaming protocol") try: output_handler = PgReceiveXlog.make_output_handler(self.config.name + ": ") receive = PgReceiveXlog( connection=self.server.streaming, destination=self.config.streaming_wals_directory, command=remote_status["pg_receivexlog_path"], version=remote_status["pg_receivexlog_version"], app_name=self.config.streaming_archiver_name, path=self.server.path, slot_name=self.config.slot_name, synchronous=remote_status["pg_receivexlog_synchronous"], out_handler=output_handler, err_handler=output_handler, ) # Finally execute the pg_receivexlog process receive.execute() except CommandFailedException as e: # Retrieve the return code from the exception ret_code = e.args[0]["ret"] if ret_code < 0: # If the return code is negative, then pg_receivexlog # was terminated by a signal msg = "%s terminated by signal: %s" % (command, abs(ret_code)) else: # Otherwise terminated with an error msg = "%s terminated with error code: %s" % (command, ret_code) raise ArchiverFailure(msg) except KeyboardInterrupt: # This is a normal termination, so there is nothing to do beside # informing the user. output.info("SIGINT received. Terminate gracefully.") def _reset_streaming_status(self, postgres_status, streaming_status): """ Reset the status of receive-wal by removing the .partial file that is marking the current position and creating one that is current with the PostgreSQL insert location """ current_wal = xlog.location_to_xlogfile_name_offset( postgres_status["current_lsn"], streaming_status["timeline"], postgres_status["xlog_segment_size"], )["file_name"] restart_wal = current_wal if ( postgres_status["replication_slot"] and postgres_status["replication_slot"].restart_lsn ): restart_wal = xlog.location_to_xlogfile_name_offset( postgres_status["replication_slot"].restart_lsn, streaming_status["timeline"], postgres_status["xlog_segment_size"], )["file_name"] restart_path = os.path.join(self.config.streaming_wals_directory, restart_wal) restart_partial_path = restart_path + ".partial" wal_files = sorted( glob(os.path.join(self.config.streaming_wals_directory, "*")), reverse=True ) # Pick the newer file last = None for last in wal_files: if xlog.is_wal_file(last) or xlog.is_partial_file(last): break # Check if the status is already up-to-date if not last or last == restart_partial_path or last == restart_path: output.info("Nothing to do. Position of receive-wal is aligned.") return if os.path.basename(last) > current_wal: output.error( "The receive-wal position is ahead of PostgreSQL " "current WAL lsn (%s > %s)", os.path.basename(last), postgres_status["current_xlog"], ) return output.info("Resetting receive-wal directory status") if xlog.is_partial_file(last): output.info("Removing status file %s" % last) os.unlink(last) output.info("Creating status file %s" % restart_partial_path) open(restart_partial_path, "w").close() def _truncate_partial_file_if_needed(self, xlog_segment_size): """ Truncate .partial WAL file if size is not 0 or xlog_segment_size :param int xlog_segment_size: """ # Retrieve the partial list (only one is expected) partial_files = glob( os.path.join(self.config.streaming_wals_directory, "*.partial") ) # Take the last partial file, ignoring wrongly formatted file names last_partial = None for partial in partial_files: if not is_partial_file(partial): continue if not last_partial or partial > last_partial: last_partial = partial # Skip further work if there is no good partial file if not last_partial: return # If size is either 0 or wal_segment_size everything is fine... partial_size = os.path.getsize(last_partial) if partial_size == 0 or partial_size == xlog_segment_size: return # otherwise truncate the file to be empty. This is safe because # pg_receivewal pads the file to the full size before start writing. output.info( "Truncating partial file %s that has wrong size %s " "while %s was expected." % (last_partial, partial_size, xlog_segment_size) ) open(last_partial, "wb").close() def get_next_batch(self): """ Returns the next batch of WAL files that have been archived via streaming replication (in the 'streaming' directory) This method always leaves one file in the "streaming" directory, because the 'pg_receivexlog' process needs at least one file to detect the current streaming position after a restart. :return: WalArchiverQueue: list of WAL files """ # Get the batch size from configuration (0 = unlimited) batch_size = self.config.streaming_archiver_batch_size # List and sort all files in the incoming directory. # IMPORTANT: the list is sorted, and this allows us to know that the # WAL stream we have is monotonically increasing. That allows us to # verify that a backup has all the WALs required for the restore. file_names = glob(os.path.join(self.config.streaming_wals_directory, "*")) file_names.sort() # Process anything that looks like a valid WAL file, # including partial ones and history files. # Anything else is treated like an error/anomaly files = [] skip = [] errors = [] for file_name in file_names: # Ignore temporary files if file_name.endswith(".tmp"): continue # If the file doesn't exist, it has been renamed/removed while # we were reading the directory. Ignore it. if not os.path.exists(file_name): continue if not os.path.isfile(file_name): errors.append(file_name) elif xlog.is_partial_file(file_name): skip.append(file_name) elif xlog.is_any_xlog_file(file_name): files.append(file_name) else: errors.append(file_name) # In case of more than a partial file, keep the last # and treat the rest as normal files if len(skip) > 1: partials = skip[:-1] _logger.info( "Archiving partial files for server %s: %s" % (self.config.name, ", ".join([os.path.basename(f) for f in partials])) ) files.extend(partials) skip = skip[-1:] # Keep the last full WAL file in case no partial file is present elif len(skip) == 0 and files: skip.append(files.pop()) # Build the list of WalFileInfo wal_files = [WalFileInfo.from_file(f, compression=None) for f in files] return WalArchiverQueue( wal_files, batch_size=batch_size, errors=errors, skip=skip ) def check(self, check_strategy): """ Perform additional checks for StreamingWalArchiver - invoked by server.check_postgres :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("pg_receivexlog") # Check the version of pg_receivexlog remote_status = self.get_remote_status() check_strategy.result( self.config.name, remote_status["pg_receivexlog_installed"] ) hint = None check_strategy.init_check("pg_receivexlog compatible") if not remote_status["pg_receivexlog_compatible"]: pg_version = "Unknown" if self.server.streaming is not None: pg_version = self.server.streaming.server_txt_version hint = "PostgreSQL version: %s, pg_receivexlog version: %s" % ( pg_version, remote_status["pg_receivexlog_version"], ) check_strategy.result( self.config.name, remote_status["pg_receivexlog_compatible"], hint=hint ) # Check if pg_receivexlog is running, by retrieving a list # of running 'receive-wal' processes from the process manager. receiver_list = self.server.process_manager.list("receive-wal") # If there's at least one 'receive-wal' process running for this # server, the test is passed check_strategy.init_check("receive-wal running") if receiver_list: check_strategy.result(self.config.name, True) else: check_strategy.result( self.config.name, False, hint="See the Barman log file for more details" ) def _is_synchronous(self): """ Check if receive-wal process is eligible for synchronous replication The receive-wal process is eligible for synchronous replication if `synchronous_standby_names` is configured and contains the value of `streaming_archiver_name` :rtype: bool """ # Nothing to do if postgres connection is not working postgres = self.server.postgres if postgres is None or postgres.server_txt_version is None: return None # Check if synchronous WAL streaming can be enabled # by peeking 'synchronous_standby_names' postgres_status = postgres.get_remote_status() syncnames = postgres_status["synchronous_standby_names"] _logger.debug( "Look for '%s' in 'synchronous_standby_names': %s", self.config.streaming_archiver_name, syncnames, ) # The receive-wal process is eligible for synchronous replication # if `synchronous_standby_names` is configured and contains # the value of `streaming_archiver_name` streaming_archiver_name = self.config.streaming_archiver_name synchronous = syncnames and ( "*" in syncnames or streaming_archiver_name in syncnames ) _logger.debug( "Synchronous WAL streaming for %s: %s", streaming_archiver_name, synchronous ) return synchronous def status(self): """ Set additional status info - invoked by Server.status() """ # TODO: Add status information for WAL streaming barman-2.18/barman/fs.py0000644000621200062120000003527314172556763013334 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2013-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging import re from barman.command_wrappers import Command, full_command_quote from barman.exceptions import FsOperationFailed _logger = logging.getLogger(__name__) class UnixLocalCommand(object): """ This class is a wrapper for local calls for file system operations """ def __init__(self, path=None): # initialize a shell self.internal_cmd = Command(cmd="sh", args=["-c"], path=path) def cmd(self, cmd_name, args=[]): """ Execute a command string, escaping it, if necessary """ return self.internal_cmd(full_command_quote(cmd_name, args)) def get_last_output(self): """ Return the output and the error strings from the last executed command :rtype: tuple[str,str] """ return self.internal_cmd.out, self.internal_cmd.err def create_dir_if_not_exists(self, dir_path): """ This method recursively creates a directory if not exists If the path exists and is not a directory raise an exception. :param str dir_path: full path for the directory """ _logger.debug("Create directory %s if it does not exists" % dir_path) exists = self.exists(dir_path) if exists: is_dir = self.cmd("test", args=["-d", dir_path]) if is_dir != 0: raise FsOperationFailed("A file with the same name already exists") else: return False else: # Make parent directories if needed mkdir_ret = self.cmd("mkdir", args=["-p", dir_path]) if mkdir_ret == 0: return True else: raise FsOperationFailed("mkdir execution failed") def delete_if_exists(self, path): """ This method check for the existence of a path. If it exists, then is removed using a rm -fr command, and returns True. If the command fails an exception is raised. If the path does not exists returns False :param path the full path for the directory """ _logger.debug("Delete path %s if exists" % path) exists = self.exists(path, False) if exists: rm_ret = self.cmd("rm", args=["-fr", path]) if rm_ret == 0: return True else: raise FsOperationFailed("rm execution failed") else: return False def check_directory_exists(self, dir_path): """ Check for the existence of a directory in path. if the directory exists returns true. if the directory does not exists returns false. if exists a file and is not a directory raises an exception :param dir_path full path for the directory """ _logger.debug("Check if directory %s exists" % dir_path) exists = self.exists(dir_path) if exists: is_dir = self.cmd("test", args=["-d", dir_path]) if is_dir != 0: raise FsOperationFailed( "A file with the same name exists, but is not a directory" ) else: return True else: return False def check_write_permission(self, dir_path): """ check write permission for barman on a given path. Creates a hidden file using touch, then remove the file. returns true if the file is written and removed without problems raise exception if the creation fails. raise exception if the removal fails. :param dir_path full dir_path for the directory to check """ _logger.debug("Check if directory %s is writable" % dir_path) exists = self.exists(dir_path) if exists: is_dir = self.cmd("test", args=["-d", dir_path]) if is_dir == 0: can_write = self.cmd( "touch", args=["%s/.barman_write_check" % dir_path] ) if can_write == 0: can_remove = self.cmd( "rm", args=["%s/.barman_write_check" % dir_path] ) if can_remove == 0: return True else: raise FsOperationFailed("Unable to remove file") else: raise FsOperationFailed("Unable to create write check file") else: raise FsOperationFailed("%s is not a directory" % dir_path) else: raise FsOperationFailed("%s does not exists" % dir_path) def create_symbolic_link(self, src, dst): """ Create a symlink pointing to src named dst. Check src exists, if so, checks that destination does not exists. if src is an invalid folder, raises an exception. if dst already exists, raises an exception. if ln -s command fails raises an exception :param src full path to the source of the symlink :param dst full path for the destination of the symlink """ _logger.debug("Create symbolic link %s -> %s" % (dst, src)) exists = self.exists(src) if exists: exists_dst = self.exists(dst) if not exists_dst: link = self.cmd("ln", args=["-s", src, dst]) if link == 0: return True else: raise FsOperationFailed("ln command failed") else: raise FsOperationFailed("ln destination already exists") else: raise FsOperationFailed("ln source does not exists") def get_system_info(self): """ Gather important system information for 'barman diagnose' command """ result = {} # self.internal_cmd.out can be None. The str() call will ensure it # will be translated to a literal 'None' release = "" if self.cmd("lsb_release", args=["-a"]) == 0: release = self.internal_cmd.out.rstrip() elif self.exists("/etc/lsb-release"): self.cmd("cat", args=["/etc/lsb-release"]) release = "Ubuntu Linux %s" % self.internal_cmd.out.rstrip() elif self.exists("/etc/debian_version"): self.cmd("cat", args=["/etc/debian_version"]) release = "Debian GNU/Linux %s" % self.internal_cmd.out.rstrip() elif self.exists("/etc/redhat-release"): self.cmd("cat", args=["/etc/redhat-release"]) release = "RedHat Linux %s" % self.internal_cmd.out.rstrip() elif self.cmd("sw_vers") == 0: release = self.internal_cmd.out.rstrip() result["release"] = release self.cmd("uname", args=["-a"]) result["kernel_ver"] = self.internal_cmd.out.rstrip() self.cmd("python", args=["--version", "2>&1"]) result["python_ver"] = self.internal_cmd.out.rstrip() self.cmd("rsync", args=["--version", "2>&1"]) try: result["rsync_ver"] = self.internal_cmd.out.splitlines(True)[0].rstrip() except IndexError: result["rsync_ver"] = "" self.cmd("ssh", args=["-V", "2>&1"]) result["ssh_ver"] = self.internal_cmd.out.rstrip() return result def get_file_content(self, path): """ Retrieve the content of a file If the file doesn't exist or isn't readable, it raises an exception. :param str path: full path to the file to read """ _logger.debug("Reading content of file %s" % path) result = self.exists(path) if not result: raise FsOperationFailed("The %s file does not exist" % path) result = self.cmd("test", args=["-r", path]) if result != 0: raise FsOperationFailed("The %s file is not readable" % path) result = self.cmd("cat", args=[path]) if result != 0: raise FsOperationFailed("Failed to execute \"cat '%s'\"" % path) return self.internal_cmd.out def exists(self, path, dereference=True): """ Check for the existence of a path. :param str path: full path to check :param bool dereference: whether dereference symlinks, defaults to True :return bool: if the file exists or not. """ _logger.debug("check for existence of: %s" % path) options = ["-e", path] if not dereference: options += ["-o", "-L", path] result = self.cmd("test", args=options) return result == 0 def ping(self): """ 'Ping' the server executing the `true` command. :return int: the true cmd result """ _logger.debug("execute the true command") result = self.cmd("true") return result def list_dir_content(self, dir_path, options=[]): """ List the contents of a given directory. :param str dir_path: the path where we want the ls to be executed :param list[str] options: a string containing the options for the ls command :return str: the ls cmd output """ _logger.debug("list the content of a directory") ls_options = [] if options: ls_options += options ls_options.append(dir_path) self.cmd("ls", args=ls_options) return self.internal_cmd.out class UnixRemoteCommand(UnixLocalCommand): """ This class is a wrapper for remote calls for file system operations """ # noinspection PyMissingConstructor def __init__(self, ssh_command, ssh_options=None, path=None): """ Uses the same commands as the UnixLocalCommand but the constructor is overridden and a remote shell is initialized using the ssh_command provided by the user :param str ssh_command: the ssh command provided by the user :param list[str] ssh_options: the options to be passed to SSH :param str path: the path to be used if provided, otherwise the PATH environment variable will be used """ # Ensure that ssh_option is iterable if ssh_options is None: ssh_options = [] if ssh_command is None: raise FsOperationFailed("No ssh command provided") self.internal_cmd = Command( ssh_command, args=ssh_options, path=path, shell=True ) try: ret = self.cmd("true") except OSError: raise FsOperationFailed("Unable to execute %s" % ssh_command) if ret != 0: raise FsOperationFailed( "Connection failed using '%s %s' return code %s" % (ssh_command, " ".join(ssh_options), ret) ) def path_allowed(exclude, include, path, is_dir): """ Filter files based on include/exclude lists. The rules are evaluated in steps: 1. if there are include rules and the proposed path match them, it is immediately accepted. 2. if there are exclude rules and the proposed path match them, it is immediately rejected. 3. the path is accepted. Look at the documentation for the "evaluate_path_matching_rules" function for more information about the syntax of the rules. :param list[str]|None exclude: The list of rules composing the exclude list :param list[str]|None include: The list of rules composing the include list :param str path: The patch to patch :param bool is_dir: True is the passed path is a directory :return bool: True is the patch is accepted, False otherwise """ if include and _match_path(include, path, is_dir): return True if exclude and _match_path(exclude, path, is_dir): return False return True def _match_path(rules, path, is_dir): """ Determine if a certain list of rules match a filesystem entry. The rule-checking algorithm also handles rsync-like anchoring of rules prefixed with '/'. If the rule is not anchored then it match every file whose suffix matches the rule. That means that a rule like 'a/b', will match 'a/b' and 'x/a/b' too. A rule like '/a/b' will match 'a/b' but not 'x/a/b'. If a rule ends with a slash (i.e. 'a/b/') if will be used only if the passed path is a directory. This function implements the basic wildcards. For more information about that, consult the documentation of the "translate_to_regexp" function. :param list[str] rules: match :param path: the path of the entity to match :param is_dir: True if the entity is a directory :return bool: """ for rule in rules: if rule[-1] == "/": if not is_dir: continue rule = rule[:-1] anchored = False if rule[0] == "/": rule = rule[1:] anchored = True if _wildcard_match_path(path, rule): return True if not anchored and _wildcard_match_path(path, "**/" + rule): return True return False def _wildcard_match_path(path, pattern): """ Check if the proposed shell pattern match the path passed. :param str path: :param str pattern: :rtype bool: True if it match, False otherwise """ regexp = re.compile(_translate_to_regexp(pattern)) return regexp.match(path) is not None def _translate_to_regexp(pattern): """ Translate a shell PATTERN to a regular expression. These wildcard characters you to use: - "?" to match every character - "*" to match zero or more characters, excluding "/" - "**" to match zero or more characters, including "/" There is no way to quote meta-characters. This implementation is based on the one in the Python fnmatch module :param str pattern: A string containing wildcards """ i, n = 0, len(pattern) res = "" while i < n: c = pattern[i] i = i + 1 if pattern[i - 1 :].startswith("**"): res = res + ".*" i = i + 1 elif c == "*": res = res + "[^/]*" elif c == "?": res = res + "." else: res = res + re.escape(c) return r"(?s)%s\Z" % res barman-2.18/barman/annotations.py0000644000621200062120000003072714172556763015260 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import errno import io import os from abc import ABCMeta, abstractmethod from barman.exceptions import ArchivalBackupException from barman.utils import with_metaclass class AnnotationManager(with_metaclass(ABCMeta)): """ This abstract base class defines the AnnotationManager interface which provides methods for read, write and delete of annotations for a given backup. """ @abstractmethod def put_annotation(self, backup_id, key, value): """Add an annotation""" @abstractmethod def get_annotation(self, backup_id, key): """Get the value of an annotation""" @abstractmethod def delete_annotation(self, backup_id, key): """Delete an annotation""" class AnnotationManagerFile(AnnotationManager): def __init__(self, path): """ Constructor for the file-based annotation manager. Should be initialised with the path to the barman base backup directory. """ self.path = path def _get_annotation_path(self, backup_id, key): """ Builds the annotation path for the specified backup_id and annotation key. """ return "%s/%s/annotations/%s" % (self.path, backup_id, key) def delete_annotation(self, backup_id, key): """ Deletes an annotation from the filesystem for the specified backup_id and annotation key. """ annotation_path = self._get_annotation_path(backup_id, key) try: os.remove(annotation_path) except EnvironmentError as e: # For Python 2 compatibility we must check the error code directly # If the annotation doesn't exist then the failure to delete it is not an # error condition and we should not proceed to remove the annotations # directory if e.errno == errno.ENOENT: return else: raise try: os.rmdir(os.path.dirname(annotation_path)) except EnvironmentError as e: # For Python 2 compatibility we must check the error code directly # If we couldn't remove the directory because it wasn't empty then we # do not consider it an error condition if e.errno != errno.ENOTEMPTY: raise def get_annotation(self, backup_id, key): """ Reads the annotation `key` for the specified backup_id from the filesystem and returns the value. """ annotation_path = self._get_annotation_path(backup_id, key) try: with open(annotation_path, "r") as annotation_file: return annotation_file.read() except EnvironmentError as e: # For Python 2 compatibility we must check the error code directly # If the annotation doesn't exist then return None if e.errno != errno.ENOENT: raise def put_annotation(self, backup_id, key, value): """ Writes the specified value for annotation `key` for the specified backup_id to the filesystem. """ annotation_path = self._get_annotation_path(backup_id, key) try: os.makedirs(os.path.dirname(annotation_path)) except EnvironmentError as e: # For Python 2 compatibility we must check the error code directly # If the directory already exists then it is not an error condition if e.errno != errno.EEXIST: raise with open(annotation_path, "w") as annotation_file: if value: annotation_file.write(value) class AnnotationManagerCloud(AnnotationManager): def __init__(self, cloud_interface, server_name): """ Constructor for the cloud-based annotation manager. Should be initialised with the CloudInterface and name of the server which was used to create the backups. """ self.cloud_interface = cloud_interface self.server_name = server_name self.annotation_cache = None def _get_base_path(self): """ Returns the base path to the cloud storage, accounting for the fact that CloudInterface.path may be None. """ return self.cloud_interface.path and "%s/" % self.cloud_interface.path or "" def _get_annotation_path(self, backup_id, key): """ Builds the full key to the annotation in cloud storage for the specified backup_id and annotation key. """ return "%s%s/base/%s/annotations/%s" % ( self._get_base_path(), self.server_name, backup_id, key, ) def _populate_annotation_cache(self): """ Build a cache of which annotations actually exist by walking the bucket. This allows us to optimize get_annotation by just checking a (backup_id,key) tuple here which is cheaper (in time and money) than going to the cloud every time. """ self.annotation_cache = {} for object_key in self.cloud_interface.list_bucket( os.path.join(self._get_base_path(), self.server_name, "base") + "/", delimiter="", ): key_parts = object_key.split("/") if len(key_parts) > 3: if key_parts[-2] == "annotations": backup_id = key_parts[-3] annotation_key = key_parts[-1] self.annotation_cache[(backup_id, annotation_key)] = True def delete_annotation(self, backup_id, key): """ Deletes an annotation from cloud storage for the specified backup_id and annotation key. """ annotation_path = self._get_annotation_path(backup_id, key) self.cloud_interface.delete_objects([annotation_path]) def get_annotation(self, backup_id, key, use_cache=True): """ Reads the annotation `key` for the specified backup_id from cloud storage and returns the value. The default behaviour is that, when it is first run, it populates a cache of the annotations which exist for each backup by walking the bucket. Subsequent operations can check that cache and avoid having to call remote_open if an annotation is not found in the cache. This optimises for the case where annotations are sparse and assumes the cost of walking the bucket is less than the cost of the remote_open calls which would not return a value. In cases where we do not want to walk the bucket up front then the caching can be disabled. """ # Optimize for the most common case where there is no annotation if use_cache: if self.annotation_cache is None: self._populate_annotation_cache() if ( self.annotation_cache is not None and (backup_id, key) not in self.annotation_cache ): return None # We either know there's an annotation or we havn't used the cache so read # it from the cloud annotation_path = self._get_annotation_path(backup_id, key) annotation_fileobj = self.cloud_interface.remote_open(annotation_path) if annotation_fileobj: with annotation_fileobj: annotation_bytes = annotation_fileobj.readline() return annotation_bytes.decode("utf-8") else: # We intentionally return None if remote_open found nothing return None def put_annotation(self, backup_id, key, value): """ Writes the specified value for annotation `key` for the specified backup_id to cloud storage. """ annotation_path = self._get_annotation_path(backup_id, key) self.cloud_interface.upload_fileobj( io.BytesIO(value.encode("utf-8")), annotation_path ) class KeepManager(with_metaclass(ABCMeta, object)): """Abstract base class which defines the KeepManager interface""" ANNOTATION_KEY = "keep" TARGET_FULL = "full" TARGET_STANDALONE = "standalone" supported_targets = (TARGET_FULL, TARGET_STANDALONE) @abstractmethod def should_keep_backup(self, backup_id): pass @abstractmethod def keep_backup(self, backup_id, target): pass @abstractmethod def get_keep_target(self, backup_id): pass @abstractmethod def release_keep(self, backup_id): pass class KeepManagerMixin(KeepManager): """ A Mixin which adds KeepManager functionality to its subclasses. Keep management is built on top of annotations and consists of the following functionality: - Determine whether a given backup is intended to be kept beyond its retention period. - Determine the intended recovery target for the archival backup. - Add and remove the keep annotation. The functionality is implemented as a Mixin so that it can be used to add keep management to the backup management class in barman (BackupManager) as well as its closest analog in barman-cloud (CloudBackupCatalog). """ def __init__(self, *args, **kwargs): """ Base constructor (Mixin pattern). kwargs must contain *either*: - A barman.server.Server object with the key `server`, *or*: - A CloudInterface object and a server name, keys `cloud_interface` and `server_name` respectively. """ if "server" in kwargs: server = kwargs.pop("server") self.annotation_manager = AnnotationManagerFile( server.config.basebackups_directory ) elif "cloud_interface" in kwargs: self.annotation_manager = AnnotationManagerCloud( kwargs.pop("cloud_interface"), kwargs.pop("server_name") ) super(KeepManagerMixin, self).__init__(*args, **kwargs) def should_keep_backup(self, backup_id): """ Returns True if the specified backup_id for this server has a keep annotation. False otherwise. """ return ( self.annotation_manager.get_annotation(backup_id, type(self).ANNOTATION_KEY) is not None ) def keep_backup(self, backup_id, target): """ Add a keep annotation for backup with ID backup_id with the specified recovery target. """ if target not in KeepManagerMixin.supported_targets: raise ArchivalBackupException("Unsupported recovery target: %s" % target) self.annotation_manager.put_annotation( backup_id, type(self).ANNOTATION_KEY, target ) def get_keep_target(self, backup_id): """Retrieve the intended recovery target""" return self.annotation_manager.get_annotation( backup_id, type(self).ANNOTATION_KEY ) def release_keep(self, backup_id): """Release the keep annotation""" self.annotation_manager.delete_annotation(backup_id, type(self).ANNOTATION_KEY) class KeepManagerMixinCloud(KeepManagerMixin): """ A specialised KeepManager which allows the annotation caching optimization in the AnnotationManagerCloud backend to be optionally disabled. """ def should_keep_backup(self, backup_id, use_cache=True): """ Like KeepManagerMixinCloud.should_keep_backup but with the use_cache option. """ return ( self.annotation_manager.get_annotation( backup_id, type(self).ANNOTATION_KEY, use_cache=use_cache ) is not None ) def get_keep_target(self, backup_id, use_cache=True): """ Like KeepManagerMixinCloud.get_keep_target but with the use_cache option. """ return self.annotation_manager.get_annotation( backup_id, type(self).ANNOTATION_KEY, use_cache=use_cache ) barman-2.18/barman/command_wrappers.py0000644000621200062120000012263514172556763016264 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains a wrapper for shell commands """ from __future__ import print_function import errno import inspect import logging import os import re import select import signal import subprocess import sys import time from distutils.version import LooseVersion as Version import barman.utils from barman.exceptions import CommandFailedException, CommandMaxRetryExceeded _logger = logging.getLogger(__name__) class StreamLineProcessor(object): """ Class deputed to reading lines from a file object, using a buffered read. NOTE: This class never call os.read() twice in a row. And is designed to work with the select.select() method. """ def __init__(self, fobject, handler): """ :param file fobject: The file that is being read :param callable handler: The function (taking only one unicode string argument) which will be called for every line """ self._file = fobject self._handler = handler self._buf = "" def fileno(self): """ Method used by select.select() to get the underlying file descriptor. :rtype: the underlying file descriptor """ return self._file.fileno() def process(self): """ Read the ready data from the stream and for each line found invoke the handler. :return bool: True when End Of File has been reached """ data = os.read(self._file.fileno(), 4096) # If nothing has been read, we reached the EOF if not data: self._file.close() # Handle the last line (always incomplete, maybe empty) self._handler(self._buf) return True self._buf += data.decode("utf-8", "replace") # If no '\n' is present, we just read a part of a very long line. # Nothing to do at the moment. if "\n" not in self._buf: return False tmp = self._buf.split("\n") # Leave the remainder in self._buf self._buf = tmp[-1] # Call the handler for each complete line. lines = tmp[:-1] for line in lines: self._handler(line) return False class Command(object): """ Wrapper for a system command """ def __init__( self, cmd, args=None, env_append=None, path=None, shell=False, check=False, allowed_retval=(0,), close_fds=True, out_handler=None, err_handler=None, retry_times=0, retry_sleep=0, retry_handler=None, ): """ If the `args` argument is specified the arguments will be always added to the ones eventually passed with the actual invocation. If the `env_append` argument is present its content will be appended to the environment of every invocation. The subprocess output and error stream will be processed through the output and error handler, respectively defined through the `out_handler` and `err_handler` arguments. If not provided every line will be sent to the log respectively at INFO and WARNING level. The `out_handler` and the `err_handler` functions will be invoked with one single argument, which is a string containing the line that is being processed. If the `close_fds` argument is True, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. If the `check` argument is True, the exit code will be checked against the `allowed_retval` list, raising a CommandFailedException if not in the list. If `retry_times` is greater than 0, when the execution of a command terminates with an error, it will be retried for a maximum of `retry_times` times, waiting for `retry_sleep` seconds between every attempt. Every time a command is retried the `retry_handler` is executed before running the command again. The retry_handler must be a callable that accepts the following fields: * the Command object * the arguments list * the keyword arguments dictionary * the number of the failed attempt * the exception containing the error An example of such a function is: > def retry_handler(command, args, kwargs, attempt, exc): > print("Failed command!") Some of the keyword arguments can be specified both in the class constructor and during the method call. If specified in both places, the method arguments will take the precedence over the constructor arguments. :param str cmd: The command to execute :param list[str]|None args: List of additional arguments to append :param dict[str.str]|None env_append: additional environment variables :param str path: PATH to be used while searching for `cmd` :param bool shell: If true, use the shell instead of an "execve" call :param bool check: Raise a CommandFailedException if the exit code is not present in `allowed_retval` :param list[int] allowed_retval: List of exit codes considered as a successful termination. :param bool close_fds: If set, close all the extra file descriptors :param callable out_handler: handler for lines sent on stdout :param callable err_handler: handler for lines sent on stderr :param int retry_times: number of allowed retry attempts :param int retry_sleep: wait seconds between every retry :param callable retry_handler: handler invoked during a command retry """ self.pipe = None self.cmd = cmd self.args = args if args is not None else [] self.shell = shell self.close_fds = close_fds self.check = check self.allowed_retval = allowed_retval self.retry_times = retry_times self.retry_sleep = retry_sleep self.retry_handler = retry_handler self.path = path self.ret = None self.out = None self.err = None # If env_append has been provided use it or replace with an empty dict env_append = env_append or {} # If path has been provided, replace it in the environment if path: env_append["PATH"] = path # Find the absolute path to the command to execute if not self.shell: full_path = barman.utils.which(self.cmd, self.path) if not full_path: raise CommandFailedException("%s not in PATH" % self.cmd) self.cmd = full_path # If env_append contains anything, build an env dict to be used during # subprocess call, otherwise set it to None and let the subprocesses # inherit the parent environment if env_append: self.env = os.environ.copy() self.env.update(env_append) else: self.env = None # If an output handler has been provided use it, otherwise log the # stdout as INFO if out_handler: self.out_handler = out_handler else: self.out_handler = self.make_logging_handler(logging.INFO) # If an error handler has been provided use it, otherwise log the # stderr as WARNING if err_handler: self.err_handler = err_handler else: self.err_handler = self.make_logging_handler(logging.WARNING) @staticmethod def _restore_sigpipe(): """restore default signal handler (http://bugs.python.org/issue1652)""" signal.signal(signal.SIGPIPE, signal.SIG_DFL) # pragma: no cover def __call__(self, *args, **kwargs): """ Run the command and return the exit code. The output and error strings are not returned, but they can be accessed as attributes of the Command object, as well as the exit code. If `stdin` argument is specified, its content will be passed to the executed command through the standard input descriptor. If the `close_fds` argument is True, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. If the `check` argument is True, the exit code will be checked against the `allowed_retval` list, raising a CommandFailedException if not in the list. Every keyword argument can be specified both in the class constructor and during the method call. If specified in both places, the method arguments will take the precedence over the constructor arguments. :rtype: int :raise: CommandFailedException :raise: CommandMaxRetryExceeded """ self.get_output(*args, **kwargs) return self.ret def get_output(self, *args, **kwargs): """ Run the command and return the output and the error as a tuple. The return code is not returned, but it can be accessed as an attribute of the Command object, as well as the output and the error strings. If `stdin` argument is specified, its content will be passed to the executed command through the standard input descriptor. If the `close_fds` argument is True, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. If the `check` argument is True, the exit code will be checked against the `allowed_retval` list, raising a CommandFailedException if not in the list. Every keyword argument can be specified both in the class constructor and during the method call. If specified in both places, the method arguments will take the precedence over the constructor arguments. :rtype: tuple[str, str] :raise: CommandFailedException :raise: CommandMaxRetryExceeded """ attempt = 0 while True: try: return self._get_output_once(*args, **kwargs) except CommandFailedException as exc: # Try again if retry number is lower than the retry limit if attempt < self.retry_times: # If a retry_handler is defined, invoke it passing the # Command instance and the exception if self.retry_handler: self.retry_handler(self, args, kwargs, attempt, exc) # Sleep for configured time, then try again time.sleep(self.retry_sleep) attempt += 1 else: if attempt == 0: # No retry requested by the user # Raise the original exception raise else: # If the max number of attempts is reached and # there is still an error, exit raising # a CommandMaxRetryExceeded exception and wrap the # original one raise CommandMaxRetryExceeded(*exc.args) def _get_output_once(self, *args, **kwargs): """ Run the command and return the output and the error as a tuple. The return code is not returned, but it can be accessed as an attribute of the Command object, as well as the output and the error strings. If `stdin` argument is specified, its content will be passed to the executed command through the standard input descriptor. If the `close_fds` argument is True, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. If the `check` argument is True, the exit code will be checked against the `allowed_retval` list, raising a CommandFailedException if not in the list. Every keyword argument can be specified both in the class constructor and during the method call. If specified in both places, the method arguments will take the precedence over the constructor arguments. :rtype: tuple[str, str] :raises: CommandFailedException """ out = [] err = [] # If check is true, it must be handled here check = kwargs.pop("check", self.check) allowed_retval = kwargs.pop("allowed_retval", self.allowed_retval) self.execute( out_handler=out.append, err_handler=err.append, check=False, *args, **kwargs ) self.out = "\n".join(out) self.err = "\n".join(err) _logger.debug("Command stdout: %s", self.out) _logger.debug("Command stderr: %s", self.err) # Raise if check and the return code is not in the allowed list if check: self.check_return_value(allowed_retval) return self.out, self.err def check_return_value(self, allowed_retval): """ Check the current return code and raise CommandFailedException when it's not in the allowed_retval list :param list[int] allowed_retval: list of return values considered success :raises: CommandFailedException """ if self.ret not in allowed_retval: raise CommandFailedException(dict(ret=self.ret, out=self.out, err=self.err)) def execute(self, *args, **kwargs): """ Execute the command and pass the output to the configured handlers If `stdin` argument is specified, its content will be passed to the executed command through the standard input descriptor. The subprocess output and error stream will be processed through the output and error handler, respectively defined through the `out_handler` and `err_handler` arguments. If not provided every line will be sent to the log respectively at INFO and WARNING level. If the `close_fds` argument is True, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. If the `check` argument is True, the exit code will be checked against the `allowed_retval` list, raising a CommandFailedException if not in the list. Every keyword argument can be specified both in the class constructor and during the method call. If specified in both places, the method arguments will take the precedence over the constructor arguments. :rtype: int :raise: CommandFailedException """ # Check keyword arguments stdin = kwargs.pop("stdin", None) check = kwargs.pop("check", self.check) allowed_retval = kwargs.pop("allowed_retval", self.allowed_retval) close_fds = kwargs.pop("close_fds", self.close_fds) out_handler = kwargs.pop("out_handler", self.out_handler) err_handler = kwargs.pop("err_handler", self.err_handler) if len(kwargs): raise TypeError( "%s() got an unexpected keyword argument %r" % (inspect.stack()[1][3], kwargs.popitem()[0]) ) # Reset status self.ret = None self.out = None self.err = None # Create the subprocess and save it in the current object to be usable # by signal handlers pipe = self._build_pipe(args, close_fds) self.pipe = pipe # Send the provided input and close the stdin descriptor if stdin: pipe.stdin.write(stdin) pipe.stdin.close() # Prepare the list of processors processors = [ StreamLineProcessor(pipe.stdout, out_handler), StreamLineProcessor(pipe.stderr, err_handler), ] # Read the streams until the subprocess exits self.pipe_processor_loop(processors) # Reap the zombie and read the exit code pipe.wait() self.ret = pipe.returncode # Remove the closed pipe from the object self.pipe = None _logger.debug("Command return code: %s", self.ret) # Raise if check and the return code is not in the allowed list if check: self.check_return_value(allowed_retval) return self.ret def _build_pipe(self, args, close_fds): """ Build the Pipe object used by the Command The resulting command will be composed by: self.cmd + self.args + args :param args: extra arguments for the subprocess :param close_fds: if True all file descriptors except 0, 1 and 2 will be closed before the child process is executed. :rtype: subprocess.Popen """ # Append the argument provided to this method of the base argument list args = self.args + list(args) # If shell is True, properly quote the command if self.shell: cmd = full_command_quote(self.cmd, args) else: cmd = [self.cmd] + args # Log the command we are about to execute _logger.debug("Command: %r", cmd) return subprocess.Popen( cmd, shell=self.shell, env=self.env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=self._restore_sigpipe, close_fds=close_fds, ) @staticmethod def pipe_processor_loop(processors): """ Process the output received through the pipe until all the provided StreamLineProcessor reach the EOF. :param list[StreamLineProcessor] processors: a list of StreamLineProcessor """ # Loop until all the streams reaches the EOF while processors: try: ready = select.select(processors, [], [])[0] except select.error as e: # If the select call has been interrupted by a signal # just retry if e.args[0] == errno.EINTR: continue raise # For each ready StreamLineProcessor invoke the process() method for stream in ready: eof = stream.process() # Got EOF on this stream if eof: # Remove the stream from the list of valid processors processors.remove(stream) @classmethod def make_logging_handler(cls, level, prefix=None): """ Build a handler function that logs every line it receives. The resulting function logs its input at the specified level with an optional prefix. :param level: The log level to use :param prefix: An optional prefix to prepend to the line :return: handler function """ class_logger = logging.getLogger(cls.__name__) def handler(line): if line: if prefix: class_logger.log(level, "%s%s", prefix, line) else: class_logger.log(level, "%s", line) return handler @staticmethod def make_output_handler(prefix=None): """ Build a handler function which prints every line it receives. The resulting function prints (and log it at INFO level) its input with an optional prefix. :param prefix: An optional prefix to prepend to the line :return: handler function """ # Import the output module inside the function to avoid circular # dependency from barman import output def handler(line): if line: if prefix: output.info("%s%s", prefix, line) else: output.info("%s", line) return handler def enable_signal_forwarding(self, signal_id): """ Enable signal forwarding to the subprocess for a specified signal_id :param signal_id: The signal id to be forwarded """ # Get the current signal handler old_handler = signal.getsignal(signal_id) def _handler(sig, frame): """ This signal handler forward the signal to the subprocess then execute the original handler. """ # Forward the signal to the subprocess if self.pipe: self.pipe.send_signal(signal_id) # If the old handler is callable if callable(old_handler): old_handler(sig, frame) # If we have got a SIGTERM, we must exit elif old_handler == signal.SIG_DFL and signal_id == signal.SIGTERM: sys.exit(128 + signal_id) # Set the signal handler signal.signal(signal_id, _handler) class Rsync(Command): """ This class is a wrapper for the rsync system command, which is used vastly by barman """ def __init__( self, rsync="rsync", args=None, ssh=None, ssh_options=None, bwlimit=None, exclude=None, exclude_and_protect=None, include=None, network_compression=None, path=None, **kwargs ): """ :param str rsync: rsync executable name :param list[str]|None args: List of additional argument to always append :param str ssh: the ssh executable to be used when building the `-e` argument :param list[str] ssh_options: the ssh options to be used when building the `-e` argument :param str bwlimit: optional bandwidth limit :param list[str] exclude: list of file to be excluded from the copy :param list[str] exclude_and_protect: list of file to be excluded from the copy, preserving the destination if exists :param list[str] include: list of files to be included in the copy even if excluded. :param bool network_compression: enable the network compression :param str path: PATH to be used while searching for `cmd` :param bool check: Raise a CommandFailedException if the exit code is not present in `allowed_retval` :param list[int] allowed_retval: List of exit codes considered as a successful termination. """ options = [] if ssh: options += ["-e", full_command_quote(ssh, ssh_options)] if network_compression: options += ["-z"] # Include patterns must be before the exclude ones, because the exclude # patterns actually short-circuit the directory traversal stage # when rsync finds the files to send. if include: for pattern in include: options += ["--include=%s" % (pattern,)] if exclude: for pattern in exclude: options += ["--exclude=%s" % (pattern,)] if exclude_and_protect: for pattern in exclude_and_protect: options += ["--exclude=%s" % (pattern,), "--filter=P_%s" % (pattern,)] if args: options += self._args_for_suse(args) if bwlimit is not None and bwlimit > 0: options += ["--bwlimit=%s" % bwlimit] # By default check is on and the allowed exit code are 0 and 24 if "check" not in kwargs: kwargs["check"] = True if "allowed_retval" not in kwargs: kwargs["allowed_retval"] = (0, 24) Command.__init__(self, rsync, args=options, path=path, **kwargs) def _args_for_suse(self, args): """ Mangle args for SUSE compatibility See https://bugzilla.opensuse.org/show_bug.cgi?id=898513 """ # Prepend any argument starting with ':' with a space # Workaround for SUSE rsync issue return [" " + a if a.startswith(":") else a for a in args] def get_output(self, *args, **kwargs): """ Run the command and return the output and the error (if present) """ # Prepares args for SUSE args = self._args_for_suse(args) # Invoke the base class method return super(Rsync, self).get_output(*args, **kwargs) def from_file_list(self, filelist, src, dst, *args, **kwargs): """ This method copies filelist from src to dst. Returns the return code of the rsync command """ if "stdin" in kwargs: raise TypeError("from_file_list() doesn't support 'stdin' keyword argument") input_string = ("\n".join(filelist)).encode("UTF-8") _logger.debug("from_file_list: %r", filelist) kwargs["stdin"] = input_string self.get_output("--files-from=-", src, dst, *args, **kwargs) return self.ret class RsyncPgData(Rsync): """ This class is a wrapper for rsync, specialised in sync-ing the Postgres data directory """ def __init__(self, rsync="rsync", args=None, **kwargs): """ Constructor :param str rsync: command to run """ options = ["-rLKpts", "--delete-excluded", "--inplace"] if args: options += args Rsync.__init__(self, rsync, args=options, **kwargs) class PostgreSQLClient(Command): """ Superclass of all the PostgreSQL client commands. """ COMMAND_ALTERNATIVES = None """ Sometimes the name of a command has been changed during the PostgreSQL evolution. I.e. that happened with pg_receivexlog, that has been renamed to pg_receivewal. In that case, we should try using pg_receivewal (the newer auternative) and, if that command doesn't exist, we should try using `pg_receivexlog`. This is a list of command names to be used to find the installed command. """ def __init__( self, connection, command, version=None, app_name=None, path=None, **kwargs ): """ Constructor :param PostgreSQL connection: an object representing a database connection :param str command: the command to use :param Version version: the command version :param str app_name: the application name to use for the connection :param str path: additional path for executable retrieval """ Command.__init__(self, command, path=path, **kwargs) if not connection: self.enable_signal_forwarding(signal.SIGINT) self.enable_signal_forwarding(signal.SIGTERM) return if version and version >= Version("9.3"): # If version of the client is >= 9.3 we use the connection # string because allows the user to use all the parameters # supported by the libpq library to create a connection conn_string = connection.get_connection_string(app_name) self.args.append("--dbname=%s" % conn_string) else: # 9.2 version doesn't support # connection strings so the 'split' version of the conninfo # option is used instead. conn_params = connection.conn_parameters self.args.append("--host=%s" % conn_params.get("host", None)) self.args.append("--port=%s" % conn_params.get("port", None)) self.args.append("--username=%s" % conn_params.get("user", None)) self.enable_signal_forwarding(signal.SIGINT) self.enable_signal_forwarding(signal.SIGTERM) @classmethod def find_command(cls, path=None): """ Find the active command, given all the alternatives as set in the property named `COMMAND_ALTERNATIVES` in this class. :param str path: The path to use while searching for the command :rtype: Command """ # TODO: Unit tests of this one # To search for an available command, testing if the command # exists in PATH is not sufficient. Debian will install wrappers for # all commands, even if the real command doesn't work. # # I.e. we may have a wrapper for `pg_receivewal` even it PostgreSQL # 10 isn't installed. # # This is an example of what can happen in this case: # # ``` # $ pg_receivewal --version; echo $? # Error: pg_wrapper: pg_receivewal was not found in # /usr/lib/postgresql/9.6/bin # 1 # $ pg_receivexlog --version; echo $? # pg_receivexlog (PostgreSQL) 9.6.3 # 0 # ``` # # That means we should not only ensure the existence of the command, # but we also need to invoke the command to see if it is a shim # or not. # Get the system path if needed if path is None: path = os.getenv("PATH") # If the path is None at this point we have nothing to search if path is None: path = "" # Search the requested executable in every directory present # in path and return a Command object first occurrence that exists, # is executable and runs without errors. for path_entry in path.split(os.path.pathsep): for cmd in cls.COMMAND_ALTERNATIVES: full_path = barman.utils.which(cmd, path_entry) # It doesn't exist try another if not full_path: continue # It exists, let's try invoking it with `--version` to check if # it's real or not. try: command = Command(full_path, path=path, check=True) command("--version") return command except CommandFailedException: # It's only a inactive shim continue # We don't have such a command raise CommandFailedException( "command not in PATH, tried: %s" % " ".join(cls.COMMAND_ALTERNATIVES) ) @classmethod def get_version_info(cls, path=None): """ Return a dictionary containing all the info about the version of the PostgreSQL client :param str path: the PATH env """ if cls.COMMAND_ALTERNATIVES is None: raise NotImplementedError( "get_version_info cannot be invoked on %s" % cls.__name__ ) version_info = dict.fromkeys( ("full_path", "full_version", "major_version"), None ) # Get the version string try: command = cls.find_command(path) except CommandFailedException as e: _logger.debug("Error invoking %s: %s", cls.__name__, e) return version_info version_info["full_path"] = command.cmd # Parse the full text version try: full_version = command.out.strip() # Remove values inside parenthesis, they # carries additional information we do not need. full_version = re.sub(r"\s*\([^)]*\)", "", full_version) full_version = full_version.split()[1] except IndexError: _logger.debug("Error parsing %s version output", version_info["full_path"]) return version_info if not re.match(r"(\d+)(\.(\d+)|devel|beta|alpha|rc).*", full_version): _logger.debug("Error parsing %s version output", version_info["full_path"]) return version_info # Extract the major version version_info["full_version"] = Version(full_version) version_info["major_version"] = Version( barman.utils.simplify_version(full_version) ) return version_info class PgBaseBackup(PostgreSQLClient): """ Wrapper class for the pg_basebackup system command """ COMMAND_ALTERNATIVES = ["pg_basebackup"] def __init__( self, connection, destination, command, version=None, app_name=None, bwlimit=None, tbs_mapping=None, immediate=False, check=True, args=None, **kwargs ): """ Constructor :param PostgreSQL connection: an object representing a database connection :param str destination: destination directory path :param str command: the command to use :param Version version: the command version :param str app_name: the application name to use for the connection :param str bwlimit: bandwidth limit for pg_basebackup :param Dict[str, str] tbs_mapping: used for tablespace :param bool immediate: fast checkpoint identifier for pg_basebackup :param bool check: check if the return value is in the list of allowed values of the Command obj :param List[str] args: additional arguments """ PostgreSQLClient.__init__( self, connection=connection, command=command, version=version, app_name=app_name, check=check, **kwargs ) # Set the backup destination self.args += ["-v", "--no-password", "--pgdata=%s" % destination] if version and version >= Version("10"): # If version of the client is >= 10 it would use # a temporary replication slot by default to keep WALs. # We don't need it because Barman already stores the full # WAL stream, so we disable this feature to avoid wasting one slot. self.args += ["--no-slot"] # We also need to specify that we do not want to fetch any WAL file self.args += ["--wal-method=none"] # The tablespace mapping option is repeated once for each tablespace if tbs_mapping: for (tbs_source, tbs_destination) in tbs_mapping.items(): self.args.append( "--tablespace-mapping=%s=%s" % (tbs_source, tbs_destination) ) # Only global bandwidth limit is supported if bwlimit is not None and bwlimit > 0: self.args.append("--max-rate=%s" % bwlimit) # Immediate checkpoint if immediate: self.args.append("--checkpoint=fast") # Manage additional args if args: self.args += args class PgReceiveXlog(PostgreSQLClient): """ Wrapper class for pg_receivexlog """ COMMAND_ALTERNATIVES = ["pg_receivewal", "pg_receivexlog"] def __init__( self, connection, destination, command, version=None, app_name=None, synchronous=False, check=True, slot_name=None, args=None, **kwargs ): """ Constructor :param PostgreSQL connection: an object representing a database connection :param str destination: destination directory path :param str command: the command to use :param Version version: the command version :param str app_name: the application name to use for the connection :param bool synchronous: request synchronous WAL streaming :param bool check: check if the return value is in the list of allowed values of the Command obj :param str slot_name: the replication slot name to use for the connection :param List[str] args: additional arguments """ PostgreSQLClient.__init__( self, connection=connection, command=command, version=version, app_name=app_name, check=check, **kwargs ) self.args += [ "--verbose", "--no-loop", "--no-password", "--directory=%s" % destination, ] # Add the replication slot name if set in the configuration. if slot_name is not None: self.args.append("--slot=%s" % slot_name) # Request synchronous mode if synchronous: self.args.append("--synchronous") # Manage additional args if args: self.args += args class PgVerifyBackup(PostgreSQLClient): """ Wrapper class for the pg_verify system command """ COMMAND_ALTERNATIVES = ["pg_verifybackup"] def __init__( self, data_path, command, connection=None, version=None, app_name=None, check=True, args=None, **kwargs ): """ Constructor :param str data_path: backup data directory :param str command: the command to use :param PostgreSQL connection: an object representing a database connection :param Version version: the command version :param str app_name: the application name to use for the connection :param bool check: check if the return value is in the list of allowed values of the Command obj :param List[str] args: additional arguments """ PostgreSQLClient.__init__( self, connection=connection, command=command, version=version, app_name=app_name, check=check, **kwargs ) self.args = ["-n", data_path] if args: self.args += args class BarmanSubProcess(object): """ Wrapper class for barman sub instances """ def __init__( self, command=sys.argv[0], subcommand=None, config=None, args=None, keep_descriptors=False, ): """ Build a specific wrapper for all the barman sub-commands, providing a unified interface. :param str command: path to barman :param str subcommand: the barman sub-command :param str config: path to the barman configuration file. :param list[str] args: a list containing the sub-command args like the target server name :param bool keep_descriptors: whether to keep the subprocess stdin, stdout, stderr descriptors attached. Defaults to False """ # The config argument is needed when the user explicitly # passes a configuration file, as the child process # must know the configuration file to use. # # The configuration file must always be propagated, # even in case of the default one. if not config: raise CommandFailedException( "No configuration file passed to barman subprocess" ) # Build the sub-command: # * be sure to run it with the right python interpreter # * pass the current configuration file with -c # * set it quiet with -q self.command = [sys.executable, command, "-c", config, "-q", subcommand] self.keep_descriptors = keep_descriptors # Handle args for the sub-command (like the server name) if args: self.command += args def execute(self): """ Execute the command and pass the output to the configured handlers """ _logger.debug("BarmanSubProcess: %r", self.command) # Redirect all descriptors to /dev/null devnull = open(os.devnull, "a+") additional_arguments = {} if not self.keep_descriptors: additional_arguments = {"stdout": devnull, "stderr": devnull} proc = subprocess.Popen( self.command, preexec_fn=os.setsid, close_fds=True, stdin=devnull, **additional_arguments ) _logger.debug("BarmanSubProcess: subprocess started. pid: %s", proc.pid) def shell_quote(arg): """ Quote a string argument to be safely included in a shell command line. :param str arg: The script argument :return: The argument quoted """ # This is an excerpt of the Bash manual page, and the same applies for # every Posix compliant shell: # # A non-quoted backslash (\) is the escape character. It preserves # the literal value of the next character that follows, with the # exception of . If a \ pair appears, and the # backslash is not itself quoted, the \ is treated as a # line continuation (that is, it is removed from the input # stream and effectively ignored). # # Enclosing characters in single quotes preserves the literal value # of each character within the quotes. A single quote may not occur # between single quotes, even when pre-ceded by a backslash. # # This means that, as long as the original string doesn't contain any # apostrophe character, it can be safely included between single quotes. # # If a single quote is contained in the string, we must terminate the # string with a quote, insert an apostrophe character escaping it with # a backslash, and then start another string using a quote character. assert arg is not None return "'%s'" % arg.replace("'", "'\\''") def full_command_quote(command, args=None): """ Produce a command with quoted arguments :param str command: the command to be executed :param list[str] args: the command arguments :rtype: str """ if args is not None and len(args) > 0: return "%s %s" % (command, " ".join([shell_quote(arg) for arg in args])) else: return command barman-2.18/barman/output.py0000644000621200062120000020666414172556763014270 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2013-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module control how the output of Barman will be rendered """ from __future__ import print_function import datetime import inspect import json import logging import sys from barman.infofile import BackupInfo from barman.utils import ( BarmanEncoder, force_str, human_readable_timedelta, pretty_size, redact_passwords, ) from barman.xlog import diff_lsn __all__ = [ "error_occurred", "debug", "info", "warning", "error", "exception", "result", "close_and_exit", "close", "set_output_writer", "AVAILABLE_WRITERS", "DEFAULT_WRITER", "ConsoleOutputWriter", "NagiosOutputWriter", "JsonOutputWriter", ] #: True if error or exception methods have been called error_occurred = False #: Exit code if error occurred error_exit_code = 1 #: Enable colors in the output ansi_colors_enabled = False def _ansi_color(command): """ Return the ansi sequence for the provided color """ return "\033[%sm" % command def _colored(message, color): """ Return a string formatted with the provided color. """ if ansi_colors_enabled: return _ansi_color(color) + message + _ansi_color("0") else: return message def _red(message): """ Format a red string """ return _colored(message, "31") def _green(message): """ Format a green string """ return _colored(message, "32") def _yellow(message): """ Format a yellow string """ return _colored(message, "33") def _format_message(message, args): """ Format a message using the args list. The result will be equivalent to message % args If args list contains a dictionary as its only element the result will be message % args[0] :param str message: the template string to be formatted :param tuple args: a list of arguments :return: the formatted message :rtype: str """ if len(args) == 1 and isinstance(args[0], dict): return message % args[0] elif len(args) > 0: return message % args else: return message def _put(level, message, *args, **kwargs): """ Send the message with all the remaining positional arguments to the configured output manager with the right output level. The message will be sent also to the logger unless explicitly disabled with log=False No checks are performed on level parameter as this method is meant to be called only by this module. If level == 'exception' the stack trace will be also logged :param str level: :param str message: the template string to be formatted :param tuple args: all remaining arguments are passed to the log formatter :key bool log: whether to log the message :key bool is_error: treat this message as an error """ # handle keyword-only parameters log = kwargs.pop("log", True) is_error = kwargs.pop("is_error", False) global error_exit_code error_exit_code = kwargs.pop("exit_code", error_exit_code) if len(kwargs): raise TypeError( "%s() got an unexpected keyword argument %r" % (inspect.stack()[1][3], kwargs.popitem()[0]) ) if is_error: global error_occurred error_occurred = True _writer.error_occurred() # Make sure the message is an unicode string if message: message = force_str(message) # dispatch the call to the output handler getattr(_writer, level)(message, *args) # log the message as originating from caller's caller module if log: exc_info = False if level == "exception": level = "error" exc_info = True frm = inspect.stack()[2] mod = inspect.getmodule(frm[0]) logger = logging.getLogger(mod.__name__) log_level = logging.getLevelName(level.upper()) logger.log(log_level, message, *args, **{"exc_info": exc_info}) def _dispatch(obj, prefix, name, *args, **kwargs): """ Dispatch the call to the %(prefix)s_%(name) method of the obj object :param obj: the target object :param str prefix: prefix of the method to be called :param str name: name of the method to be called :param tuple args: all remaining positional arguments will be sent to target :param dict kwargs: all remaining keyword arguments will be sent to target :return: the result of the invoked method :raise ValueError: if the target method is not present """ method_name = "%s_%s" % (prefix, name) handler = getattr(obj, method_name, None) if callable(handler): return handler(*args, **kwargs) else: raise ValueError( "The object %r does not have the %r method" % (obj, method_name) ) def is_quiet(): """ Calls the "is_quiet" method, accessing the protected parameter _quiet of the instanced OutputWriter :return bool: the _quiet parameter value """ return _writer.is_quiet() def is_debug(): """ Calls the "is_debug" method, accessing the protected parameter _debug of the instanced OutputWriter :return bool: the _debug parameter value """ return _writer.is_debug() def debug(message, *args, **kwargs): """ Output a message with severity 'DEBUG' :key bool log: whether to log the message """ _put("debug", message, *args, **kwargs) def info(message, *args, **kwargs): """ Output a message with severity 'INFO' :key bool log: whether to log the message """ _put("info", message, *args, **kwargs) def warning(message, *args, **kwargs): """ Output a message with severity 'INFO' :key bool log: whether to log the message """ _put("warning", message, *args, **kwargs) def error(message, *args, **kwargs): """ Output a message with severity 'ERROR'. Also records that an error has occurred unless the ignore parameter is True. :key bool ignore: avoid setting an error exit status (default False) :key bool log: whether to log the message """ # ignore is a keyword-only parameter ignore = kwargs.pop("ignore", False) if not ignore: kwargs.setdefault("is_error", True) _put("error", message, *args, **kwargs) def exception(message, *args, **kwargs): """ Output a message with severity 'EXCEPTION' If raise_exception parameter doesn't evaluate to false raise and exception: - if raise_exception is callable raise the result of raise_exception() - if raise_exception is an exception raise it - else raise the last exception again :key bool ignore: avoid setting an error exit status :key raise_exception: raise an exception after the message has been processed :key bool log: whether to log the message """ # ignore and raise_exception are keyword-only parameters ignore = kwargs.pop("ignore", False) # noinspection PyNoneFunctionAssignment raise_exception = kwargs.pop("raise_exception", None) if not ignore: kwargs.setdefault("is_error", True) _put("exception", message, *args, **kwargs) if raise_exception: if callable(raise_exception): # noinspection PyCallingNonCallable raise raise_exception(message) elif isinstance(raise_exception, BaseException): raise raise_exception else: raise def init(command, *args, **kwargs): """ Initialize the output writer for a given command. :param str command: name of the command are being executed :param tuple args: all remaining positional arguments will be sent to the output processor :param dict kwargs: all keyword arguments will be sent to the output processor """ try: _dispatch(_writer, "init", command, *args, **kwargs) except ValueError: exception( 'The %s writer does not support the "%s" command', _writer.__class__.__name__, command, ) close_and_exit() def result(command, *args, **kwargs): """ Output the result of an operation. :param str command: name of the command are being executed :param tuple args: all remaining positional arguments will be sent to the output processor :param dict kwargs: all keyword arguments will be sent to the output processor """ try: _dispatch(_writer, "result", command, *args, **kwargs) except ValueError: exception( 'The %s writer does not support the "%s" command', _writer.__class__.__name__, command, ) close_and_exit() def close_and_exit(): """ Close the output writer and terminate the program. If an error has been emitted the program will report a non zero return value. """ close() if error_occurred: sys.exit(error_exit_code) else: sys.exit(0) def close(): """ Close the output writer. """ _writer.close() def set_output_writer(new_writer, *args, **kwargs): """ Replace the current output writer with a new one. The new_writer parameter can be a symbolic name or an OutputWriter object :param new_writer: the OutputWriter name or the actual OutputWriter :type: string or an OutputWriter :param tuple args: all remaining positional arguments will be passed to the OutputWriter constructor :param dict kwargs: all remaining keyword arguments will be passed to the OutputWriter constructor """ global _writer _writer.close() if new_writer in AVAILABLE_WRITERS: _writer = AVAILABLE_WRITERS[new_writer](*args, **kwargs) else: _writer = new_writer class ConsoleOutputWriter(object): def __init__(self, debug=False, quiet=False): """ Default output writer that output everything on console. :param bool debug: print debug messages on standard error :param bool quiet: don't print info messages """ self._debug = debug self._quiet = quiet #: Used in check command to hold the check results self.result_check_list = [] #: The minimal flag. If set the command must output a single list of #: values. self.minimal = False #: The server is active self.active = True def _print(self, message, args, stream): """ Print an encoded message on the given output stream """ # Make sure to add a newline at the end of the message if message is None: message = "\n" else: message += "\n" # Format and encode the message, redacting eventual passwords encoded_msg = redact_passwords(_format_message(message, args)).encode("utf-8") try: # Python 3.x stream.buffer.write(encoded_msg) except AttributeError: # Python 2.x stream.write(encoded_msg) stream.flush() def _out(self, message, args): """ Print a message on standard output """ self._print(message, args, sys.stdout) def _err(self, message, args): """ Print a message on standard error """ self._print(message, args, sys.stderr) def is_quiet(self): """ Access the quiet property of the OutputWriter instance :return bool: if the writer is quiet or not """ return self._quiet def is_debug(self): """ Access the debug property of the OutputWriter instance :return bool: if the writer is in debug mode or not """ return self._debug def debug(self, message, *args): """ Emit debug. """ if self._debug: self._err("DEBUG: %s" % message, args) def info(self, message, *args): """ Normal messages are sent to standard output """ if not self._quiet: self._out(message, args) def warning(self, message, *args): """ Warning messages are sent to standard error """ self._err(_yellow("WARNING: %s" % message), args) def error(self, message, *args): """ Error messages are sent to standard error """ self._err(_red("ERROR: %s" % message), args) def exception(self, message, *args): """ Warning messages are sent to standard error """ self._err(_red("EXCEPTION: %s" % message), args) def error_occurred(self): """ Called immediately before any message method when the originating call has is_error=True """ def close(self): """ Close the output channel. Nothing to do for console. """ def result_backup(self, backup_info): """ Render the result of a backup. Nothing to do for console. """ # TODO: evaluate to display something useful here def result_recovery(self, results): """ Render the result of a recovery. """ if len(results["changes"]) > 0: self.info("") self.info("IMPORTANT") self.info("These settings have been modified to prevent data losses") self.info("") for assertion in results["changes"]: self.info( "%s line %s: %s = %s", assertion.filename, assertion.line, assertion.key, assertion.value, ) if len(results["warnings"]) > 0: self.info("") self.info("WARNING") self.info( "You are required to review the following options" " as potentially dangerous" ) self.info("") for assertion in results["warnings"]: self.info( "%s line %s: %s = %s", assertion.filename, assertion.line, assertion.key, assertion.value, ) if results["missing_files"]: # At least one file is missing, warn the user self.info("") self.info("WARNING") self.info( "The following configuration files have not been " "saved during backup, hence they have not been " "restored." ) self.info( "You need to manually restore them " "in order to start the recovered PostgreSQL instance:" ) self.info("") for file_name in results["missing_files"]: self.info(" %s" % file_name) if results["delete_barman_wal"]: self.info("") self.info( "After the recovery, please remember to remove the " '"barman_wal" directory' ) self.info("inside the PostgreSQL data directory.") if results["get_wal"]: self.info("") self.info("WARNING: 'get-wal' is in the specified 'recovery_options'.") self.info( "Before you start up the PostgreSQL server, please " "review the %s file", results["recovery_configuration_file"], ) self.info( "inside the target directory. Make sure that " "'restore_command' can be executed by " "the PostgreSQL user." ) self.info("") self.info( "Recovery completed (start time: %s, elapsed time: %s)", results["recovery_start_time"], human_readable_timedelta( datetime.datetime.now() - results["recovery_start_time"] ), ) self.info("") self.info("Your PostgreSQL server has been successfully prepared for recovery!") def _record_check(self, server_name, check, status, hint, perfdata): """ Record the check line in result_check_map attribute This method is for subclass use :param str server_name: the server is being checked :param str check: the check name :param bool status: True if succeeded :param str,None hint: hint to print if not None :param str,None perfdata: additional performance data to print if not None """ self.result_check_list.append( dict( server_name=server_name, check=check, status=status, hint=hint, perfdata=perfdata, ) ) if not status and self.active: global error_occurred error_occurred = True def init_check(self, server_name, active, disabled): """ Init the check command :param str server_name: the server we are start listing :param boolean active: The server is active :param boolean disabled: The server is disabled """ display_name = server_name # If the server has been manually disabled if not active: display_name += " (inactive)" # If server has configuration errors elif disabled: display_name += " (WARNING: disabled)" self.info("Server %s:" % display_name) self.active = active def result_check(self, server_name, check, status, hint=None, perfdata=None): """ Record a server result of a server check and output it as INFO :param str server_name: the server is being checked :param str check: the check name :param bool status: True if succeeded :param str,None hint: hint to print if not None :param str,None perfdata: additional performance data to print if not None """ self._record_check(server_name, check, status, hint, perfdata) if hint: self.info( "\t%s: %s (%s)" % (check, _green("OK") if status else _red("FAILED"), hint) ) else: self.info("\t%s: %s" % (check, _green("OK") if status else _red("FAILED"))) def init_list_backup(self, server_name, minimal=False): """ Init the list-backups command :param str server_name: the server we are start listing :param bool minimal: if true output only a list of backup id """ self.minimal = minimal def result_list_backup(self, backup_info, backup_size, wal_size, retention_status): """ Output a single backup in the list-backups command :param BackupInfo backup_info: backup we are displaying :param backup_size: size of base backup (with the required WAL files) :param wal_size: size of WAL files belonging to this backup (without the required WAL files) :param retention_status: retention policy status """ # If minimal is set only output the backup id if self.minimal: self.info(backup_info.backup_id) return out_list = ["%s %s - " % (backup_info.server_name, backup_info.backup_id)] if backup_info.status in BackupInfo.STATUS_COPY_DONE: end_time = backup_info.end_time.ctime() out_list.append( "%s - Size: %s - WAL Size: %s" % (end_time, pretty_size(backup_size), pretty_size(wal_size)) ) if backup_info.tablespaces: tablespaces = [ ("%s:%s" % (tablespace.name, tablespace.location)) for tablespace in backup_info.tablespaces ] out_list.append(" (tablespaces: %s)" % ", ".join(tablespaces)) if backup_info.status == BackupInfo.WAITING_FOR_WALS: out_list.append(" - %s" % BackupInfo.WAITING_FOR_WALS) if retention_status and retention_status != BackupInfo.NONE: out_list.append(" - %s" % retention_status) else: out_list.append(backup_info.status) self.info("".join(out_list)) def result_show_backup(self, backup_ext_info): """ Output all available information about a backup in show-backup command The argument has to be the result of a Server.get_backup_ext_info() call :param dict backup_ext_info: a dictionary containing the info to display """ data = dict(backup_ext_info) self.info("Backup %s:", data["backup_id"]) self.info(" Server Name : %s", data["server_name"]) if data["systemid"]: self.info(" System Id : %s", data["systemid"]) self.info(" Status : %s", data["status"]) if data["status"] in BackupInfo.STATUS_COPY_DONE: self.info(" PostgreSQL Version : %s", data["version"]) self.info(" PGDATA directory : %s", data["pgdata"]) if data["tablespaces"]: self.info(" Tablespaces:") for item in data["tablespaces"]: self.info( " %s: %s (oid: %s)", item.name, item.location, item.oid ) self.info("") self.info(" Base backup information:") self.info( " Disk usage : %s (%s with WALs)", pretty_size(data["size"]), pretty_size(data["size"] + data["wal_size"]), ) if data["deduplicated_size"] is not None and data["size"] > 0: deduplication_ratio = 1 - ( float(data["deduplicated_size"]) / data["size"] ) self.info( " Incremental size : %s (-%s)", pretty_size(data["deduplicated_size"]), "{percent:.2%}".format(percent=deduplication_ratio), ) self.info(" Timeline : %s", data["timeline"]) self.info(" Begin WAL : %s", data["begin_wal"]) self.info(" End WAL : %s", data["end_wal"]) self.info(" WAL number : %s", data["wal_num"]) # Output WAL compression ratio for basebackup WAL files if data["wal_compression_ratio"] > 0: self.info( " WAL compression ratio: %s", "{percent:.2%}".format(percent=data["wal_compression_ratio"]), ) self.info(" Begin time : %s", data["begin_time"]) self.info(" End time : %s", data["end_time"]) # If copy statistics are available print a summary copy_stats = data.get("copy_stats") if copy_stats: copy_time = copy_stats.get("copy_time") if copy_time: value = human_readable_timedelta( datetime.timedelta(seconds=copy_time) ) # Show analysis time if it is more than a second analysis_time = copy_stats.get("analysis_time") if analysis_time is not None and analysis_time >= 1: value += " + %s startup" % ( human_readable_timedelta( datetime.timedelta(seconds=analysis_time) ) ) self.info(" Copy time : %s", value) size = data["deduplicated_size"] or data["size"] value = "%s/s" % pretty_size(size / copy_time) number_of_workers = copy_stats.get("number_of_workers", 1) if number_of_workers > 1: value += " (%s jobs)" % number_of_workers self.info(" Estimated throughput : %s", value) self.info(" Begin Offset : %s", data["begin_offset"]) self.info(" End Offset : %s", data["end_offset"]) self.info(" Begin LSN : %s", data["begin_xlog"]) self.info(" End LSN : %s", data["end_xlog"]) self.info("") self.info(" WAL information:") self.info(" No of files : %s", data["wal_until_next_num"]) self.info( " Disk usage : %s", pretty_size(data["wal_until_next_size"]), ) # Output WAL rate if data["wals_per_second"] > 0: self.info( " WAL rate : %0.2f/hour", data["wals_per_second"] * 3600, ) # Output WAL compression ratio for archived WAL files if data["wal_until_next_compression_ratio"] > 0: self.info( " Compression ratio : %s", "{percent:.2%}".format( percent=data["wal_until_next_compression_ratio"] ), ) self.info(" Last available : %s", data["wal_last"]) if data["children_timelines"]: timelines = data["children_timelines"] self.info( " Reachable timelines : %s", ", ".join([str(history.tli) for history in timelines]), ) self.info("") self.info(" Catalog information:") self.info( " Retention Policy : %s", data["retention_policy_status"] or "not enforced", ) previous_backup_id = data.setdefault("previous_backup_id", "not available") self.info( " Previous Backup : %s", previous_backup_id or "- (this is the oldest base backup)", ) next_backup_id = data.setdefault("next_backup_id", "not available") self.info( " Next Backup : %s", next_backup_id or "- (this is the latest base backup)", ) if data["children_timelines"]: self.info("") self.info( "WARNING: WAL information is inaccurate due to " "multiple timelines interacting with this backup" ) else: if data["error"]: self.info(" Error: : %s", data["error"]) def init_status(self, server_name): """ Init the status command :param str server_name: the server we are start listing """ self.info("Server %s:", server_name) def result_status(self, server_name, status, description, message): """ Record a result line of a server status command and output it as INFO :param str server_name: the server is being checked :param str status: the returned status code :param str description: the returned status description :param str,object message: status message. It will be converted to str """ self.info("\t%s: %s", description, str(message)) def init_replication_status(self, server_name, minimal=False): """ Init the 'standby-status' command :param str server_name: the server we are start listing :param str minimal: minimal output """ self.minimal = minimal def result_replication_status(self, server_name, target, server_lsn, standby_info): """ Record a result line of a server status command and output it as INFO :param str server_name: the replication server :param str target: all|hot-standby|wal-streamer :param str server_lsn: server's current lsn :param StatReplication standby_info: status info of a standby """ if target == "hot-standby": title = "hot standby servers" elif target == "wal-streamer": title = "WAL streamers" else: title = "streaming clients" if self.minimal: # Minimal output if server_lsn: # current lsn from the master self.info( "%s for master '%s' (LSN @ %s):", title.capitalize(), server_name, server_lsn, ) else: # We are connected to a standby self.info("%s for slave '%s':", title.capitalize(), server_name) else: # Full output self.info("Status of %s for server '%s':", title, server_name) # current lsn from the master if server_lsn: self.info(" Current LSN on master: %s", server_lsn) if standby_info is not None and not len(standby_info): self.info(" No %s attached", title) return # Minimal output if self.minimal: n = 1 for standby in standby_info: if not standby.replay_lsn: # WAL streamer self.info( " %s. W) %s@%s S:%s W:%s P:%s AN:%s", n, standby.usename, standby.client_addr or "socket", standby.sent_lsn, standby.write_lsn, standby.sync_priority, standby.application_name, ) else: # Standby self.info( " %s. %s) %s@%s S:%s F:%s R:%s P:%s AN:%s", n, standby.sync_state[0].upper(), standby.usename, standby.client_addr or "socket", standby.sent_lsn, standby.flush_lsn, standby.replay_lsn, standby.sync_priority, standby.application_name, ) n += 1 else: n = 1 self.info(" Number of %s: %s", title, len(standby_info)) for standby in standby_info: self.info("") # Calculate differences in bytes sent_diff = diff_lsn(standby.sent_lsn, standby.current_lsn) write_diff = diff_lsn(standby.write_lsn, standby.current_lsn) flush_diff = diff_lsn(standby.flush_lsn, standby.current_lsn) replay_diff = diff_lsn(standby.replay_lsn, standby.current_lsn) # Determine the sync stage of the client sync_stage = None if not standby.replay_lsn: client_type = "WAL streamer" max_level = 3 else: client_type = "standby" max_level = 5 # Only standby can replay WAL info if replay_diff == 0: sync_stage = "5/5 Hot standby (max)" elif flush_diff == 0: sync_stage = "4/5 2-safe" # remote flush # If not yet done, set the sync stage if not sync_stage: if write_diff == 0: sync_stage = "3/%s Remote write" % max_level elif sent_diff == 0: sync_stage = "2/%s WAL Sent (min)" % max_level else: sync_stage = "1/%s 1-safe" % max_level # Synchronous standby if getattr(standby, "sync_priority", None) > 0: self.info( " %s. #%s %s %s", n, standby.sync_priority, standby.sync_state.capitalize(), client_type, ) # Asynchronous standby else: self.info( " %s. %s %s", n, standby.sync_state.capitalize(), client_type ) self.info(" Application name: %s", standby.application_name) self.info(" Sync stage : %s", sync_stage) if getattr(standby, "client_addr", None): self.info(" Communication : TCP/IP") self.info( " IP Address : %s / Port: %s / Host: %s", standby.client_addr, standby.client_port, standby.client_hostname or "-", ) else: self.info(" Communication : Unix domain socket") self.info(" User name : %s", standby.usename) self.info( " Current state : %s (%s)", standby.state, standby.sync_state ) if getattr(standby, "slot_name", None): self.info(" Replication slot: %s", standby.slot_name) self.info(" WAL sender PID : %s", standby.pid) self.info(" Started at : %s", standby.backend_start) if getattr(standby, "backend_xmin", None): self.info(" Standby's xmin : %s", standby.backend_xmin or "-") if getattr(standby, "sent_lsn", None): self.info( " Sent LSN : %s (diff: %s)", standby.sent_lsn, pretty_size(sent_diff), ) if getattr(standby, "write_lsn", None): self.info( " Write LSN : %s (diff: %s)", standby.write_lsn, pretty_size(write_diff), ) if getattr(standby, "flush_lsn", None): self.info( " Flush LSN : %s (diff: %s)", standby.flush_lsn, pretty_size(flush_diff), ) if getattr(standby, "replay_lsn", None): self.info( " Replay LSN : %s (diff: %s)", standby.replay_lsn, pretty_size(replay_diff), ) n += 1 def init_list_server(self, server_name, minimal=False): """ Init the list-servers command :param str server_name: the server we are start listing """ self.minimal = minimal def result_list_server(self, server_name, description=None): """ Output a result line of a list-servers command :param str server_name: the server is being checked :param str,None description: server description if applicable """ if self.minimal or not description: self.info("%s", server_name) else: self.info("%s - %s", server_name, description) def init_show_server(self, server_name): """ Init the show-servers command output method :param str server_name: the server we are displaying """ self.info("Server %s:" % server_name) def result_show_server(self, server_name, server_info): """ Output the results of the show-servers command :param str server_name: the server we are displaying :param dict server_info: a dictionary containing the info to display """ for status, message in sorted(server_info.items()): self.info("\t%s: %s", status, message) def init_check_wal_archive(self, server_name): """ Init the check-wal-archive command output method :param str server_name: the server we are displaying """ self.info("Server %s:" % server_name) def result_check_wal_archive(self, server_name): """ Output the results of the check-wal-archive command :param str server_name: the server we are displaying """ self.info(" - WAL archive check for server %s passed" % server_name) class JsonOutputWriter(ConsoleOutputWriter): def __init__(self, *args, **kwargs): """ Output writer that writes on standard output using JSON. When closed, it dumps all the collected results as a JSON object. """ super(JsonOutputWriter, self).__init__(*args, **kwargs) #: Store JSON data self.json_output = {} def _mangle_key(self, value): """ Mangle a generic description to be used as dict key :type value: str :rtype: str """ return value.lower().replace(" ", "_").replace("-", "_").replace(".", "") def _out_to_field(self, field, message, *args): """ Store a message in the required field """ if field not in self.json_output: self.json_output[field] = [] message = _format_message(message, args) self.json_output[field].append(message) def debug(self, message, *args): """ Add debug messages in _DEBUG list """ if not self._debug: return self._out_to_field("_DEBUG", message, *args) def info(self, message, *args): """ Add normal messages in _INFO list """ self._out_to_field("_INFO", message, *args) def warning(self, message, *args): """ Add warning messages in _WARNING list """ self._out_to_field("_WARNING", message, *args) def error(self, message, *args): """ Add error messages in _ERROR list """ self._out_to_field("_ERROR", message, *args) def exception(self, message, *args): """ Add exception messages in _EXCEPTION list """ self._out_to_field("_EXCEPTION", message, *args) def close(self): """ Close the output channel. Print JSON output """ if not self._quiet: json.dump(self.json_output, sys.stdout, sort_keys=True, cls=BarmanEncoder) self.json_output = {} def result_backup(self, backup_info): """ Save the result of a backup. """ self.json_output.update(backup_info.to_dict()) def result_recovery(self, results): """ Render the result of a recovery. """ changes_count = len(results["changes"]) self.json_output["changes_count"] = changes_count self.json_output["changes"] = results["changes"] if changes_count > 0: self.warning( "IMPORTANT! Some settings have been modified " "to prevent data losses. See 'changes' key." ) warnings_count = len(results["warnings"]) self.json_output["warnings_count"] = warnings_count self.json_output["warnings"] = results["warnings"] if warnings_count > 0: self.warning( "WARNING! You are required to review the options " "as potentially dangerous. See 'warnings' key." ) missing_files_count = len(results["missing_files"]) self.json_output["missing_files"] = results["missing_files"] if missing_files_count > 0: # At least one file is missing, warn the user self.warning( "WARNING! Some configuration files have not been " "saved during backup, hence they have not been " "restored. See 'missing_files' key." ) if results["delete_barman_wal"]: self.warning( "After the recovery, please remember to remove the " "'barman_wal' directory inside the PostgreSQL " "data directory." ) if results["get_wal"]: self.warning( "WARNING: 'get-wal' is in the specified " "'recovery_options'. Before you start up the " "PostgreSQL server, please review the recovery " "configuration inside the target directory. " "Make sure that 'restore_command' can be " "executed by the PostgreSQL user." ) self.json_output.update( { "recovery_start_time": results["recovery_start_time"].isoformat(" "), "recovery_start_time_timestamp": results[ "recovery_start_time" ].strftime("%s"), "recovery_elapsed_time": human_readable_timedelta( datetime.datetime.now() - results["recovery_start_time"] ), "recovery_elapsed_time_seconds": ( datetime.datetime.now() - results["recovery_start_time"] ).total_seconds(), } ) def init_check(self, server_name, active, disabled): """ Init the check command :param str server_name: the server we are start listing :param boolean active: The server is active :param boolean disabled: The server is disabled """ self.json_output[server_name] = {} self.active = active def result_check(self, server_name, check, status, hint=None, perfdata=None): """ Record a server result of a server check and output it as INFO :param str server_name: the server is being checked :param str check: the check name :param bool status: True if succeeded :param str,None hint: hint to print if not None :param str,None perfdata: additional performance data to print if not None """ self._record_check(server_name, check, status, hint, perfdata) check_key = self._mangle_key(check) self.json_output[server_name][check_key] = dict( status="OK" if status else "FAILED", hint=hint or "" ) def init_list_backup(self, server_name, minimal=False): """ Init the list-backups command :param str server_name: the server we are listing :param bool minimal: if true output only a list of backup id """ self.minimal = minimal self.json_output[server_name] = [] def result_list_backup(self, backup_info, backup_size, wal_size, retention_status): """ Output a single backup in the list-backups command :param BackupInfo backup_info: backup we are displaying :param backup_size: size of base backup (with the required WAL files) :param wal_size: size of WAL files belonging to this backup (without the required WAL files) :param retention_status: retention policy status """ server_name = backup_info.server_name # If minimal is set only output the backup id if self.minimal: self.json_output[server_name].append(backup_info.backup_id) return output = dict( backup_id=backup_info.backup_id, ) if backup_info.status in BackupInfo.STATUS_COPY_DONE: output.update( dict( end_time_timestamp=backup_info.end_time.strftime("%s"), end_time=backup_info.end_time.ctime(), size_bytes=backup_size, wal_size_bytes=wal_size, size=pretty_size(backup_size), wal_size=pretty_size(wal_size), status=backup_info.status, retention_status=retention_status or BackupInfo.NONE, ) ) output["tablespaces"] = [] if backup_info.tablespaces: for tablespace in backup_info.tablespaces: output["tablespaces"].append( dict(name=tablespace.name, location=tablespace.location) ) else: output.update(dict(status=backup_info.status)) self.json_output[server_name].append(output) def result_show_backup(self, backup_ext_info): """ Output all available information about a backup in show-backup command The argument has to be the result of a Server.get_backup_ext_info() call :param dict backup_ext_info: a dictionary containing the info to display """ data = dict(backup_ext_info) server_name = data["server_name"] output = self.json_output[server_name] = dict( backup_id=data["backup_id"], status=data["status"] ) if data["status"] in BackupInfo.STATUS_COPY_DONE: output.update( dict( postgresql_version=data["version"], pgdata_directory=data["pgdata"], tablespaces=[], ) ) if data["tablespaces"]: for item in data["tablespaces"]: output["tablespaces"].append( dict(name=item.name, location=item.location, oid=item.oid) ) output["base_backup_information"] = dict( disk_usage=pretty_size(data["size"]), disk_usage_bytes=data["size"], disk_usage_with_wals=pretty_size(data["size"] + data["wal_size"]), disk_usage_with_wals_bytes=data["size"] + data["wal_size"], ) if data["deduplicated_size"] is not None and data["size"] > 0: deduplication_ratio = 1 - ( float(data["deduplicated_size"]) / data["size"] ) output["base_backup_information"].update( dict( incremental_size=pretty_size(data["deduplicated_size"]), incremental_size_bytes=data["deduplicated_size"], incremental_size_ratio="-{percent:.2%}".format( percent=deduplication_ratio ), ) ) output["base_backup_information"].update( dict( timeline=data["timeline"], begin_wal=data["begin_wal"], end_wal=data["end_wal"], ) ) if data["wal_compression_ratio"] > 0: output["base_backup_information"].update( dict( wal_compression_ratio="{percent:.2%}".format( percent=data["wal_compression_ratio"] ) ) ) output["base_backup_information"].update( dict( begin_time_timestamp=data["begin_time"].strftime("%s"), begin_time=data["begin_time"].isoformat(sep=" "), end_time_timestamp=data["end_time"].strftime("%s"), end_time=data["end_time"].isoformat(sep=" "), ) ) copy_stats = data.get("copy_stats") if copy_stats: copy_time = copy_stats.get("copy_time") analysis_time = copy_stats.get("analysis_time", 0) if copy_time: output["base_backup_information"].update( dict( copy_time=human_readable_timedelta( datetime.timedelta(seconds=copy_time) ), copy_time_seconds=copy_time, analysis_time=human_readable_timedelta( datetime.timedelta(seconds=analysis_time) ), analysis_time_seconds=analysis_time, ) ) size = data["deduplicated_size"] or data["size"] output["base_backup_information"].update( dict( throughput="%s/s" % pretty_size(size / copy_time), throughput_bytes=size / copy_time, number_of_workers=copy_stats.get("number_of_workers", 1), ) ) output["base_backup_information"].update( dict( begin_offset=data["begin_offset"], end_offset=data["end_offset"], begin_lsn=data["begin_xlog"], end_lsn=data["end_xlog"], ) ) wal_output = output["wal_information"] = dict( no_of_files=data["wal_until_next_num"], disk_usage=pretty_size(data["wal_until_next_size"]), disk_usage_bytes=data["wal_until_next_size"], wal_rate=0, wal_rate_per_second=0, compression_ratio=0, last_available=data["wal_last"], timelines=[], ) # TODO: move the following calculations in a separate function # or upstream (backup_ext_info?) so that they are shared with # console output. if data["wals_per_second"] > 0: wal_output["wal_rate"] = "%0.2f/hour" % (data["wals_per_second"] * 3600) wal_output["wal_rate_per_second"] = data["wals_per_second"] if data["wal_until_next_compression_ratio"] > 0: wal_output["compression_ratio"] = "{percent:.2%}".format( percent=data["wal_until_next_compression_ratio"] ) if data["children_timelines"]: wal_output[ "_WARNING" ] = "WAL information is inaccurate \ due to multiple timelines interacting with \ this backup" for history in data["children_timelines"]: wal_output["timelines"].append(str(history.tli)) previous_backup_id = data.setdefault("previous_backup_id", "not available") next_backup_id = data.setdefault("next_backup_id", "not available") output["catalog_information"] = { "retention_policy": data["retention_policy_status"] or "not enforced", "previous_backup": previous_backup_id or "- (this is the oldest base backup)", "next_backup": next_backup_id or "- (this is the latest base backup)", } else: if data["error"]: output["error"] = data["error"] def init_status(self, server_name): """ Init the status command :param str server_name: the server we are start listing """ if not hasattr(self, "json_output"): self.json_output = {} self.json_output[server_name] = {} def result_status(self, server_name, status, description, message): """ Record a result line of a server status command and output it as INFO :param str server_name: the server is being checked :param str status: the returned status code :param str description: the returned status description :param str,object message: status message. It will be converted to str """ self.json_output[server_name][status] = dict( description=description, message=str(message) ) def init_replication_status(self, server_name, minimal=False): """ Init the 'standby-status' command :param str server_name: the server we are start listing :param str minimal: minimal output """ if not hasattr(self, "json_output"): self.json_output = {} self.json_output[server_name] = {} self.minimal = minimal def result_replication_status(self, server_name, target, server_lsn, standby_info): """ Record a result line of a server status command and output it as INFO :param str server_name: the replication server :param str target: all|hot-standby|wal-streamer :param str server_lsn: server's current lsn :param StatReplication standby_info: status info of a standby """ if target == "hot-standby": title = "hot standby servers" elif target == "wal-streamer": title = "WAL streamers" else: title = "streaming clients" title_key = self._mangle_key(title) if title_key not in self.json_output[server_name]: self.json_output[server_name][title_key] = [] self.json_output[server_name]["server_lsn"] = server_lsn if server_lsn else None if standby_info is not None and not len(standby_info): self.json_output[server_name]["standby_info"] = "No %s attached" % title return self.json_output[server_name][title_key] = [] # Minimal output if self.minimal: for idx, standby in enumerate(standby_info): if not standby.replay_lsn: # WAL streamer self.json_output[server_name][title_key].append( dict( user_name=standby.usename, client_addr=standby.client_addr or "socket", sent_lsn=standby.sent_lsn, write_lsn=standby.write_lsn, sync_priority=standby.sync_priority, application_name=standby.application_name, ) ) else: # Standby self.json_output[server_name][title_key].append( dict( sync_state=standby.sync_state[0].upper(), user_name=standby.usename, client_addr=standby.client_addr or "socket", sent_lsn=standby.sent_lsn, flush_lsn=standby.flush_lsn, replay_lsn=standby.replay_lsn, sync_priority=standby.sync_priority, application_name=standby.application_name, ) ) else: for idx, standby in enumerate(standby_info): self.json_output[server_name][title_key].append({}) json_output = self.json_output[server_name][title_key][idx] # Calculate differences in bytes lsn_diff = dict( sent=diff_lsn(standby.sent_lsn, standby.current_lsn), write=diff_lsn(standby.write_lsn, standby.current_lsn), flush=diff_lsn(standby.flush_lsn, standby.current_lsn), replay=diff_lsn(standby.replay_lsn, standby.current_lsn), ) # Determine the sync stage of the client sync_stage = None if not standby.replay_lsn: client_type = "WAL streamer" max_level = 3 else: client_type = "standby" max_level = 5 # Only standby can replay WAL info if lsn_diff["replay"] == 0: sync_stage = "5/5 Hot standby (max)" elif lsn_diff["flush"] == 0: sync_stage = "4/5 2-safe" # remote flush # If not yet done, set the sync stage if not sync_stage: if lsn_diff["write"] == 0: sync_stage = "3/%s Remote write" % max_level elif lsn_diff["sent"] == 0: sync_stage = "2/%s WAL Sent (min)" % max_level else: sync_stage = "1/%s 1-safe" % max_level # Synchronous standby if getattr(standby, "sync_priority", None) > 0: json_output["name"] = "#%s %s %s" % ( standby.sync_priority, standby.sync_state.capitalize(), client_type, ) # Asynchronous standby else: json_output["name"] = "%s %s" % ( standby.sync_state.capitalize(), client_type, ) json_output["application_name"] = standby.application_name json_output["sync_stage"] = sync_stage if getattr(standby, "client_addr", None): json_output.update( dict( communication="TCP/IP", ip_address=standby.client_addr, port=standby.client_port, host=standby.client_hostname or None, ) ) else: json_output["communication"] = "Unix domain socket" json_output.update( dict( user_name=standby.usename, current_state=standby.state, current_sync_state=standby.sync_state, ) ) if getattr(standby, "slot_name", None): json_output["replication_slot"] = standby.slot_name json_output.update( dict( wal_sender_pid=standby.pid, started_at=standby.backend_start.isoformat(sep=" "), ) ) if getattr(standby, "backend_xmin", None): json_output["standbys_xmin"] = standby.backend_xmin or None for lsn in lsn_diff.keys(): standby_key = lsn + "_lsn" if getattr(standby, standby_key, None): json_output.update( { lsn + "_lsn": getattr(standby, standby_key), lsn + "_lsn_diff": pretty_size(lsn_diff[lsn]), lsn + "_lsn_diff_bytes": lsn_diff[lsn], } ) def init_list_server(self, server_name, minimal=False): """ Init the list-servers command :param str server_name: the server we are listing """ self.json_output[server_name] = {} self.minimal = minimal def result_list_server(self, server_name, description=None): """ Output a result line of a list-servers command :param str server_name: the server is being checked :param str,None description: server description if applicable """ self.json_output[server_name] = dict(description=description) def init_show_server(self, server_name): """ Init the show-servers command output method :param str server_name: the server we are displaying """ self.json_output[server_name] = {} def result_show_server(self, server_name, server_info): """ Output the results of the show-servers command :param str server_name: the server we are displaying :param dict server_info: a dictionary containing the info to display """ for status, message in sorted(server_info.items()): if not isinstance(message, (int, str, bool, list, dict, type(None))): message = str(message) self.json_output[server_name][status] = message def init_check_wal_archive(self, server_name): """ Init the check-wal-archive command output method :param str server_name: the server we are displaying """ self.json_output[server_name] = {} def result_check_wal_archive(self, server_name): """ Output the results of the check-wal-archive command :param str server_name: the server we are displaying """ self.json_output[server_name] = ( "WAL archive check for server %s passed" % server_name ) class NagiosOutputWriter(ConsoleOutputWriter): """ Nagios output writer. This writer doesn't output anything to console. On close it writes a nagios-plugin compatible status """ def _out(self, message, args): """ Do not print anything on standard output """ def _err(self, message, args): """ Do not print anything on standard error """ def _parse_check_results(self): """ Parse the check results and return the servers checked and any issues. :return tuple: a tuple containing a list of checked servers, a list of all issues found and a list of additional performance detail. """ # List of all servers that have been checked servers = [] # List of servers reporting issues issues = [] # Nagios performance data perf_detail = [] for item in self.result_check_list: # Keep track of all the checked servers if item["server_name"] not in servers: servers.append(item["server_name"]) # Keep track of the servers with issues if not item["status"] and item["server_name"] not in issues: issues.append(item["server_name"]) # Build the performance data list if item["check"] == "backup minimum size": perf_detail.append( "%s=%dB" % (item["server_name"], int(item["perfdata"])) ) if item["check"] == "wal size": perf_detail.append( "%s_wals=%dB" % (item["server_name"], int(item["perfdata"])) ) return servers, issues, perf_detail def _summarise_server_issues(self, issues): """ Converts the supplied list of issues into a printable summary. :return tuple: A tuple where the first element is a string summarising each server with issues and the second element is a string containing the details of all failures for each server. """ fail_summary = [] details = [] for server in issues: # Join all the issues for a server. Output format is in the # form: # " FAILED: , ... " # All strings will be concatenated into the $SERVICEOUTPUT$ # macro of the Nagios output server_fail = "%s FAILED: %s" % ( server, ", ".join( [ item["check"] for item in self.result_check_list if item["server_name"] == server and not item["status"] ] ), ) fail_summary.append(server_fail) # Prepare an array with the detailed output for # the $LONGSERVICEOUTPUT$ macro of the Nagios output # line format: # .: FAILED # .: FAILED (Hint if present) # : FAILED # ..... for issue in self.result_check_list: if issue["server_name"] == server and not issue["status"]: fail_detail = "%s.%s: FAILED" % (server, issue["check"]) if issue["hint"]: fail_detail += " (%s)" % issue["hint"] details.append(fail_detail) return fail_summary, details def _print_check_failure(self, servers, issues, perf_detail): """Prints the output for a failed check.""" # Generate the performance data message - blank string if no perf detail perf_detail_message = perf_detail and "|%s" % " ".join(perf_detail) or "" fail_summary, details = self._summarise_server_issues(issues) # Append the summary of failures to the first line of the output # using * as delimiter if len(servers) == 1: print( "BARMAN CRITICAL - server %s has issues * %s%s" % (servers[0], " * ".join(fail_summary), perf_detail_message) ) else: print( "BARMAN CRITICAL - %d server out of %d have issues * " "%s%s" % ( len(issues), len(servers), " * ".join(fail_summary), perf_detail_message, ) ) # add the detailed list to the output for issue in details: print(issue) def _print_check_success(self, servers, issues=None, perf_detail=None): """Prints the output for a successful check.""" if issues is None: issues = [] # Generate the issues message - blank string if no issues issues_message = "".join([" * IGNORING: %s" % issue for issue in issues]) # Generate the performance data message - blank string if no perf detail perf_detail_message = perf_detail and "|%s" % " ".join(perf_detail) or "" # Some issues, but only in skipped server good = [item for item in servers if item not in issues] # Display the output message for a single server check if len(good) == 0: print("BARMAN OK - No server configured%s" % issues_message) elif len(good) == 1: print( "BARMAN OK - Ready to serve the Espresso backup " "for %s%s%s" % (good[0], issues_message, perf_detail_message) ) else: # Display the output message for several servers, using # '*' as delimiter print( "BARMAN OK - Ready to serve the Espresso backup " "for %d servers * %s%s%s" % (len(good), " * ".join(good), issues_message, perf_detail_message) ) def close(self): """ Display the result of a check run as expected by Nagios. Also set the exit code as 2 (CRITICAL) in case of errors """ global error_occurred, error_exit_code servers, issues, perf_detail = self._parse_check_results() # Global error (detected at configuration level) if len(issues) == 0 and error_occurred: print("BARMAN CRITICAL - Global configuration errors") error_exit_code = 2 return if len(issues) > 0 and error_occurred: self._print_check_failure(servers, issues, perf_detail) error_exit_code = 2 else: self._print_check_success(servers, issues, perf_detail) #: This dictionary acts as a registry of available OutputWriters AVAILABLE_WRITERS = { "console": ConsoleOutputWriter, "json": JsonOutputWriter, # nagios is not registered as it isn't a general purpose output writer # 'nagios': NagiosOutputWriter, } #: The default OutputWriter DEFAULT_WRITER = "console" #: the current active writer. Initialized according DEFAULT_WRITER on load _writer = AVAILABLE_WRITERS[DEFAULT_WRITER]() barman-2.18/barman/clients/0000755000621200062120000000000014172556766014004 5ustar 00000000000000barman-2.18/barman/clients/cloud_backup.py0000755000621200062120000002306214172556763017014 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging import os import re import tempfile from contextlib import closing from shutil import rmtree from barman.clients.cloud_cli import ( add_tag_argument, create_argument_parser, GeneralErrorExit, NetworkErrorExit, OperationErrorExit, UrlArgumentType, ) from barman.cloud import ( CloudBackupUploaderBarman, CloudBackupUploaderPostgres, configure_logging, ) from barman.cloud_providers import get_cloud_interface from barman.exceptions import ( BarmanException, PostgresConnectionError, UnrecoverableHookScriptError, ) from barman.postgres import PostgreSQLConnection from barman.utils import check_positive, check_size, force_str _find_space = re.compile(r"[\s]").search def __is_hook_script(): """Check the environment and determine if we are running as a hook script""" if "BARMAN_HOOK" in os.environ and "BARMAN_PHASE" in os.environ: if ( os.getenv("BARMAN_HOOK") in ("backup_script", "backup_retry_script") and os.getenv("BARMAN_PHASE") == "post" ): return True else: raise BarmanException( "barman-cloud-backup called as unsupported hook script: %s_%s" % (os.getenv("BARMAN_PHASE"), os.getenv("BARMAN_HOOK")) ) else: return False def quote_conninfo(value): """ Quote a connection info parameter :param str value: :rtype: str """ if not value: return "''" if not _find_space(value): return value return "'%s'" % value.replace("\\", "\\\\").replace("'", "\\'") def build_conninfo(config): """ Build a DSN to connect to postgres using command-line arguments """ conn_parts = [] # If -d specified a conninfo string, just return it if config.dbname is not None: if config.dbname == "" or "=" in config.dbname: return config.dbname if config.host: conn_parts.append("host=%s" % quote_conninfo(config.host)) if config.port: conn_parts.append("port=%s" % quote_conninfo(config.port)) if config.user: conn_parts.append("user=%s" % quote_conninfo(config.user)) if config.dbname: conn_parts.append("dbname=%s" % quote_conninfo(config.dbname)) return " ".join(conn_parts) def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) tempdir = tempfile.mkdtemp(prefix="barman-cloud-backup-") try: # Create any temporary file in the `tempdir` subdirectory tempfile.tempdir = tempdir cloud_interface = get_cloud_interface(config) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) with closing(cloud_interface): # TODO: Should the setup be optional? cloud_interface.setup_bucket() # Perform the backup uploader_kwargs = { "server_name": config.server_name, "compression": config.compression, "max_archive_size": config.max_archive_size, "cloud_interface": cloud_interface, } if __is_hook_script(): if "BARMAN_BACKUP_DIR" not in os.environ: raise BarmanException( "BARMAN_BACKUP_DIR environment variable not set" ) if "BARMAN_BACKUP_ID" not in os.environ: raise BarmanException( "BARMAN_BACKUP_ID environment variable not set" ) if os.getenv("BARMAN_STATUS") != "DONE": raise UnrecoverableHookScriptError( "backup in '%s' has status '%s' (status should be: DONE)" % (os.getenv("BARMAN_BACKUP_DIR"), os.getenv("BARMAN_STATUS")) ) uploader = CloudBackupUploaderBarman( backup_dir=os.getenv("BARMAN_BACKUP_DIR"), backup_id=os.getenv("BARMAN_BACKUP_ID"), **uploader_kwargs ) uploader.backup() else: conninfo = build_conninfo(config) postgres = PostgreSQLConnection( conninfo, config.immediate_checkpoint, application_name="barman_cloud_backup", ) try: postgres.connect() except PostgresConnectionError as exc: logging.error("Cannot connect to postgres: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise OperationErrorExit() with closing(postgres): uploader = CloudBackupUploaderPostgres( postgres=postgres, **uploader_kwargs ) uploader.backup() except KeyboardInterrupt as exc: logging.error("Barman cloud backup was interrupted by the user") logging.debug("Exception details:", exc_info=exc) raise OperationErrorExit() except UnrecoverableHookScriptError as exc: logging.error("Barman cloud backup exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise SystemExit(63) except Exception as exc: logging.error("Barman cloud backup exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit() finally: # Remove the temporary directory and all the contained files rmtree(tempdir, ignore_errors=True) def parse_arguments(args=None): """ Parse command line arguments :return: The options parsed """ parser, s3_arguments, azure_arguments = create_argument_parser( description="This script can be used to perform a backup " "of a local PostgreSQL instance and ship " "the resulting tarball(s) to the Cloud. " "Currently AWS S3 and Azure Blob Storage are supported.", source_or_destination=UrlArgumentType.destination, ) compression = parser.add_mutually_exclusive_group() compression.add_argument( "-z", "--gzip", help="gzip-compress the WAL while uploading to the cloud", action="store_const", const="gz", dest="compression", ) compression.add_argument( "-j", "--bzip2", help="bzip2-compress the WAL while uploading to the cloud", action="store_const", const="bz2", dest="compression", ) compression.add_argument( "--snappy", help="snappy-compress the WAL while uploading to the cloud ", action="store_const", const="snappy", dest="compression", ) parser.add_argument( "-h", "--host", help="host or Unix socket for PostgreSQL connection " "(default: libpq settings)", ) parser.add_argument( "-p", "--port", help="port for PostgreSQL connection (default: libpq settings)", ) parser.add_argument( "-U", "--user", help="user name for PostgreSQL connection (default: libpq settings)", ) parser.add_argument( "--immediate-checkpoint", help="forces the initial checkpoint to be done as quickly as possible", action="store_true", ) parser.add_argument( "-J", "--jobs", type=check_positive, help="number of subprocesses to upload data to cloud storage (default: 2)", default=2, ) parser.add_argument( "-S", "--max-archive-size", type=check_size, help="maximum size of an archive when uploading to cloud storage " "(default: 100GB)", default="100GB", ) parser.add_argument( "-d", "--dbname", help="Database name or conninfo string for Postgres connection (default: postgres)", default="postgres", ) add_tag_argument( parser, name="tags", help="Tags to be added to all uploaded files in cloud storage", ) s3_arguments.add_argument( "-e", "--encryption", help="The encryption algorithm used when storing the uploaded data in S3. " "Allowed values: 'AES256'|'aws:kms'.", choices=["AES256", "aws:kms"], ) azure_arguments.add_argument( "--encryption-scope", help="The name of an encryption scope defined in the Azure Blob Storage " "service which is to be used to encrypt the data in Azure", ) return parser.parse_args(args=args) if __name__ == "__main__": main() barman-2.18/barman/clients/__init__.py0000644000621200062120000000132414172556763016112 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2019-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . barman-2.18/barman/clients/cloud_backup_keep.py0000644000621200062120000001030314172556763020007 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging from contextlib import closing from barman.annotations import KeepManager from barman.clients.cloud_cli import ( create_argument_parser, GeneralErrorExit, NetworkErrorExit, OperationErrorExit, ) from barman.cloud import CloudBackupCatalog, configure_logging from barman.cloud_providers import get_cloud_interface from barman.infofile import BackupInfo from barman.utils import force_str def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() catalog = CloudBackupCatalog(cloud_interface, config.server_name) if config.release: catalog.release_keep(config.backup_id) elif config.status: target = catalog.get_keep_target(config.backup_id) if target: print("Keep: %s" % target) else: print("Keep: nokeep") else: backup_info = catalog.get_backup_info(config.backup_id) if backup_info.status == BackupInfo.DONE: catalog.keep_backup(config.backup_id, config.target) else: logging.error( "Cannot add keep to backup %s because it has status %s. " "Only backups with status DONE can be kept.", config.backup_id, backup_info.status, ) raise OperationErrorExit() except Exception as exc: logging.error("Barman cloud keep exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit() def parse_arguments(args=None): """ Parse command line arguments :return: The options parsed """ parser, _, _ = create_argument_parser( description="This script can be used to tag backups in cloud storage as " "archival backups such that they will not be deleted. " "Currently AWS S3 and Azure Blob Storage are supported.", ) parser.add_argument( "backup_id", help="the backup ID of the backup to be kept", ) keep_options = parser.add_mutually_exclusive_group(required=True) keep_options.add_argument( "-r", "--release", help="If specified, the command will remove the keep annotation and the " "backup will be eligible for deletion", action="store_true", ) keep_options.add_argument( "-s", "--status", help="Print the keep status of the backup", action="store_true", ) keep_options.add_argument( "--target", help="Specify the recovery target for this backup", choices=[KeepManager.TARGET_FULL, KeepManager.TARGET_STANDALONE], ) return parser.parse_args(args=args) if __name__ == "__main__": main() barman-2.18/barman/clients/cloud_cli.py0000644000621200062120000001342314172556763016313 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import argparse import csv import logging from enum import Enum import barman from barman.utils import force_str class OperationErrorExit(SystemExit): """ Dedicated exit code for errors where connectivity to the cloud provider was ok but the operation still failed. """ def __init__(self): super(OperationErrorExit, self).__init__(1) class NetworkErrorExit(SystemExit): """Dedicated exit code for network related errors.""" def __init__(self): super(NetworkErrorExit, self).__init__(2) class CLIErrorExit(SystemExit): """Dedicated exit code for CLI level errors.""" def __init__(self): super(CLIErrorExit, self).__init__(3) class GeneralErrorExit(SystemExit): """Dedicated exit code for general barman cloud errors.""" def __init__(self): super(GeneralErrorExit, self).__init__(4) class UrlArgumentType(Enum): source = "source" destination = "destination" def __parse_tag(tag): """Parse key,value tag with csv reader""" try: rows = list(csv.reader([tag], delimiter=",")) except csv.Error as exc: logging.error( "Error parsing tag %s: %s", tag, force_str(exc), ) raise CLIErrorExit() if len(rows) != 1 or len(rows[0]) != 2: logging.error( "Invalid tag format: %s", tag, ) raise CLIErrorExit() return tuple(rows[0]) def add_tag_argument(parser, name, help): parser.add_argument( "--%s" % name, type=__parse_tag, nargs="*", help=help, ) class CloudArgumentParser(argparse.ArgumentParser): """ArgumentParser which exits with CLIErrorExit on errors.""" def error(self, message): try: super(CloudArgumentParser, self).error(message) except SystemExit: raise CLIErrorExit() def create_argument_parser(description, source_or_destination=UrlArgumentType.source): """ Create a barman-cloud argument parser with the given description. Returns an `argparse.ArgumentParser` object which parses the core arguments and options for barman-cloud commands. """ parser = CloudArgumentParser( description=description, add_help=False, ) parser.add_argument( "%s_url" % source_or_destination.value, help=( "URL of the cloud %s, such as a bucket in AWS S3." " For example: `s3://bucket/path/to/folder`." ) % source_or_destination.value, ) parser.add_argument( "server_name", help="the name of the server as configured in Barman." ) parser.add_argument( "-V", "--version", action="version", version="%%(prog)s %s" % barman.__version__ ) parser.add_argument("--help", action="help", help="show this help message and exit") verbosity = parser.add_mutually_exclusive_group() verbosity.add_argument( "-v", "--verbose", action="count", default=0, help="increase output verbosity (e.g., -vv is more than -v)", ) verbosity.add_argument( "-q", "--quiet", action="count", default=0, help="decrease output verbosity (e.g., -qq is less than -q)", ) parser.add_argument( "-t", "--test", help="Test cloud connectivity and exit", action="store_true", default=False, ) parser.add_argument( "--cloud-provider", help="The cloud provider to use as a storage backend", choices=["aws-s3", "azure-blob-storage"], default="aws-s3", ) s3_arguments = parser.add_argument_group( "Extra options for the aws-s3 cloud provider" ) s3_arguments.add_argument( "--endpoint-url", help="Override default S3 endpoint URL with the given one", ) s3_arguments.add_argument( "-P", "--profile", help="profile name (e.g. INI section in AWS credentials file)", ) azure_arguments = parser.add_argument_group( "Extra options for the azure-blob-storage cloud provider" ) azure_arguments.add_argument( "--credential", choices=["azure-cli", "managed-identity"], help="Optionally specify the type of credential to use when " "authenticating with Azure Blob Storage. If omitted then " "the credential will be obtained from the environment. If no " "credentials can be found in the environment then the default " "Azure authentication flow will be used", ) return parser, s3_arguments, azure_arguments azure = [ ( "--credential", { "choices": ["azure-cli", "managed-identity"], "help": ( "Optionally specify the type of credential to use when " "authenticating with Azure Blob Storage. If omitted then " "the credential will be obtained from the environment. If no " "credentials can be found in the environment then the default " "Azure authentication flow will be used" ), }, ), ] barman-2.18/barman/clients/cloud_compression.py0000644000621200062120000001423614172556763020110 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import bz2 import gzip import shutil from abc import ABCMeta, abstractmethod from io import BytesIO from barman.utils import with_metaclass def _try_import_snappy(): try: import snappy except ImportError: raise SystemExit("Missing required python module: python-snappy") return snappy class ChunkedCompressor(with_metaclass(ABCMeta, object)): """ Base class for all ChunkedCompressors """ @abstractmethod def add_chunk(self, data): """ Compresses the supplied data and returns all the compressed bytes. :param bytes data: The chunk of data to be compressed :return: The compressed data :rtype: bytes """ @abstractmethod def decompress(self, data): """ Decompresses the supplied chunk of data and returns at least part of the uncompressed data. :param bytes data: The chunk of data to be decompressed :return: The decompressed data :rtype: bytes """ class SnappyCompressor(ChunkedCompressor): """ A ChunkedCompressor implementation based on python-snappy """ def __init__(self): snappy = _try_import_snappy() self.compressor = snappy.StreamCompressor() self.decompressor = snappy.StreamDecompressor() def add_chunk(self, data): """ Compresses the supplied data and returns all the compressed bytes. :param bytes data: The chunk of data to be compressed :return: The compressed data :rtype: bytes """ return self.compressor.add_chunk(data) def decompress(self, data): """ Decompresses the supplied chunk of data and returns at least part of the uncompressed data. :param bytes data: The chunk of data to be decompressed :return: The decompressed data :rtype: bytes """ return self.decompressor.decompress(data) def get_compressor(compression): """ Helper function which returns a ChunkedCompressor for the specified compression algorithm. Currently only snappy is supported. The other compression algorithms supported by barman cloud use the decompression built into TarFile. :param str compression: The compression algorithm to use. Can be set to snappy or any compression supported by the TarFile mode string. :return: A ChunkedCompressor capable of compressing and decompressing using the specified compression. :rtype: ChunkedCompressor """ if compression == "snappy": return SnappyCompressor() return None def compress(wal_file, compression): """ Compresses the supplied wal_file and returns a file-like object containing the compressed data. :param IOBase wal_file: A file-like object containing the WAL file data. :param str compression: The compression algorithm to apply. Can be one of: bzip2, gzip, snappy. :return: The compressed data :rtype: BytesIO """ if compression == "snappy": in_mem_snappy = BytesIO() snappy = _try_import_snappy() snappy.stream_compress(wal_file, in_mem_snappy) in_mem_snappy.seek(0) return in_mem_snappy elif compression == "gzip": # Create a BytesIO for in memory compression in_mem_gzip = BytesIO() with gzip.GzipFile(fileobj=in_mem_gzip, mode="wb") as gz: # copy the gzipped data in memory shutil.copyfileobj(wal_file, gz) in_mem_gzip.seek(0) return in_mem_gzip elif compression == "bzip2": # Create a BytesIO for in memory compression in_mem_bz2 = BytesIO(bz2.compress(wal_file.read())) in_mem_bz2.seek(0) return in_mem_bz2 else: raise ValueError("Unknown compression type: %s" % compression) def get_streaming_tar_mode(mode, compression): """ Helper function used in streaming uploads and downloads which appends the supplied compression to the raw filemode (either r or w) and returns the result. Any compression algorithms supported by barman-cloud but not Python TarFile are ignored so that barman-cloud can apply them itself. :param str mode: The file mode to use, either r or w. :param str compression: The compression algorithm to use. Can be set to snappy or any compression supported by the TarFile mode string. :return: The full filemode for a streaming tar file :rtype: str """ if compression == "snappy" or compression is None: return "%s|" % mode else: return "%s|%s" % (mode, compression) def decompress_to_file(blob, dest_file, compression): """ Decompresses the supplied blob of data into the dest_file file-like object using the specified compression. :param IOBase blob: A file-like object containing the compressed data. :param IOBase dest_file: A file-like object into which the uncompressed data should be written. :param str compression: The compression algorithm to apply. Can be one of: bzip2, gzip, snappy. :rtype: None """ if compression == "snappy": snappy = _try_import_snappy() snappy.stream_decompress(blob, dest_file) return elif compression == "gzip": source_file = gzip.GzipFile(fileobj=blob, mode="rb") elif compression == "bzip2": source_file = bz2.BZ2File(blob, "rb") else: raise ValueError("Unknown compression type: %s" % compression) with source_file: shutil.copyfileobj(source_file, dest_file) barman-2.18/barman/clients/walrestore.py0000755000621200062120000003737314172556763016562 0ustar 00000000000000# -*- coding: utf-8 -*- # walrestore - Remote Barman WAL restore command for PostgreSQL # # This script remotely fetches WAL files from Barman via SSH, on demand. # It is intended to be used in restore_command in recovery configuration files # of PostgreSQL standby servers. Supports parallel fetching and # protects against SSH failures. # # See the help page for usage information. # # © Copyright EnterpriseDB UK Limited 2016-2022 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from __future__ import print_function import argparse import os import shutil import subprocess import sys import time import barman from barman.utils import force_str DEFAULT_USER = "barman" DEFAULT_SPOOL_DIR = "/var/tmp/walrestore" # The string_types list is used to identify strings # in a consistent way between python 2 and 3 if sys.version_info[0] == 3: string_types = (str,) else: string_types = (basestring,) # noqa def main(args=None): """ The main script entry point """ config = parse_arguments(args) # Do connectivity test if requested if config.test: connectivity_test(config) return # never reached # Check WAL destination is not a directory if os.path.isdir(config.wal_dest): exit_with_error( "WAL_DEST cannot be a directory: %s" % config.wal_dest, status=3 ) # Open the destination file try: dest_file = open(config.wal_dest, "wb") except EnvironmentError as e: exit_with_error( "Cannot open '%s' (WAL_DEST) for writing: %s" % (config.wal_dest, e), status=3, ) return # never reached # If the file is present in SPOOL_DIR use it and terminate try_deliver_from_spool(config, dest_file) # If required load the list of files to download in parallel additional_files = peek_additional_files(config) try: # Execute barman get-wal through the ssh connection ssh_process = RemoteGetWal(config, config.wal_name, dest_file) except EnvironmentError as e: exit_with_error('Error executing "ssh": %s' % e, sleep=config.sleep) return # never reached # Spawn a process for every additional file parallel_ssh_processes = spawn_additional_process(config, additional_files) # Wait for termination of every subprocess. If CTRL+C is pressed, # terminate all of them try: RemoteGetWal.wait_for_all() finally: # Cleanup failed spool files in case of errors for process in parallel_ssh_processes: if process.returncode != 0: os.unlink(process.dest_file) # If the command succeeded exit here if ssh_process.returncode == 0: sys.exit(0) # Report the exit code, remapping ssh failure code (255) to 2 if ssh_process.returncode == 255: exit_with_error("Connection problem with ssh", 2, sleep=config.sleep) else: exit_with_error( "Remote 'barman get-wal' command has failed!", ssh_process.returncode, sleep=config.sleep, ) def spawn_additional_process(config, additional_files): """ Execute additional barman get-wal processes :param argparse.Namespace config: the configuration from command line :param additional_files: A list of WAL file to be downloaded in parallel :return list[subprocess.Popen]: list of created processes """ processes = [] for wal_name in additional_files: spool_file_name = os.path.join(config.spool_dir, wal_name) try: # Spawn a process and write the output in the spool dir process = RemoteGetWal(config, wal_name, spool_file_name) processes.append(process) except EnvironmentError: # If execution has failed make sure the spool file is unlinked try: os.unlink(spool_file_name) except EnvironmentError: # Suppress unlink errors pass return processes def peek_additional_files(config): """ Invoke remote get-wal --peek to receive a list of wal files to copy :param argparse.Namespace config: the configuration from command line :returns set: a set of WAL file names from the peek command """ # If parallel downloading is not required return an empty array if not config.parallel: return [] # Make sure the SPOOL_DIR exists try: if not os.path.exists(config.spool_dir): os.mkdir(config.spool_dir) except EnvironmentError as e: exit_with_error("Cannot create '%s' directory: %s" % (config.spool_dir, e)) # Retrieve the list of files from remote additional_files = execute_peek(config) # Sanity check if len(additional_files) == 0 or additional_files[0] != config.wal_name: exit_with_error("The required file is not available: %s" % config.wal_name) # Remove the first element, as now we know is identical to config.wal_name del additional_files[0] return additional_files def build_ssh_command(config, wal_name, peek=0): """ Prepare an ssh command according to the arguments passed on command line :param argparse.Namespace config: the configuration from command line :param str wal_name: the wal_name get-wal parameter :param int peek: in :return list[str]: the ssh command as list of string """ ssh_command = [ "ssh", "-q", # quiet mode - suppress warnings "-T", # disable pseudo-terminal allocation "%s@%s" % (config.user, config.barman_host), "barman", ] if config.config: ssh_command.append("--config %s" % config.config) options = [] if config.test: options.append("--test") if peek: options.append("--peek '%s'" % peek) if config.compression: options.append("--%s" % config.compression) if config.partial: options.append("--partial") if options: get_wal_command = "get-wal %s '%s' '%s'" % ( " ".join(options), config.server_name, wal_name, ) else: get_wal_command = "get-wal '%s' '%s'" % (config.server_name, wal_name) ssh_command.append(get_wal_command) return ssh_command def execute_peek(config): """ Invoke remote get-wal --peek to receive a list of wal file to copy :param argparse.Namespace config: the configuration from command line :returns set: a set of WAL file names from the peek command """ # Build the peek command ssh_command = build_ssh_command(config, config.wal_name, config.parallel) # Issue the command try: output = subprocess.Popen(ssh_command, stdout=subprocess.PIPE).communicate() return list(output[0].decode().splitlines()) except subprocess.CalledProcessError as e: exit_with_error("Impossible to invoke remote get-wal --peek: %s" % e) def try_deliver_from_spool(config, dest_file): """ Search for the requested file in the spool directory. If is already present, then copy it locally and exit, return otherwise. :param argparse.Namespace config: the configuration from command line :param dest_file: The destination file object """ spool_file = os.path.join(config.spool_dir, config.wal_name) # id the file is not present, give up if not os.path.exists(spool_file): return try: shutil.copyfileobj(open(spool_file, "rb"), dest_file) os.unlink(spool_file) sys.exit(0) except IOError as e: exit_with_error( "Failure copying %s to %s: %s" % (spool_file, dest_file.name, e) ) def exit_with_error(message, status=2, sleep=0): """ Print ``message`` and terminate the script with ``status`` :param str message: message to print :param int status: script exit code :param int sleep: second to sleep before exiting """ print("ERROR: %s" % message, file=sys.stderr) # Sleep for config.sleep seconds if required if sleep: print("Sleeping for %d seconds." % sleep, file=sys.stderr) time.sleep(sleep) sys.exit(status) def connectivity_test(config): """ Invoke remote get-wal --test to test the connection with Barman server :param argparse.Namespace config: the configuration from command line """ # Build the peek command ssh_command = build_ssh_command(config, "dummy_wal_name") # Issue the command try: pipe = subprocess.Popen( ssh_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output = pipe.communicate() print(force_str(output[0])) sys.exit(pipe.returncode) except subprocess.CalledProcessError as e: exit_with_error("Impossible to invoke remote get-wal: %s" % e) def parse_arguments(args=None): """ Parse the command line arguments :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] :rtype: argparse.Namespace """ parser = argparse.ArgumentParser( description="This script will be used as a 'restore_command' " "based on the get-wal feature of Barman. " "A ssh connection will be opened to the Barman host.", ) parser.add_argument( "-V", "--version", action="version", version="%%(prog)s %s" % barman.__version__ ) parser.add_argument( "-U", "--user", default=DEFAULT_USER, help="The user used for the ssh connection to the Barman server. " "Defaults to '%(default)s'.", ) parser.add_argument( "-s", "--sleep", default=0, type=int, metavar="SECONDS", help="Sleep for SECONDS after a failure of get-wal request. " "Defaults to 0 (nowait).", ) parser.add_argument( "-p", "--parallel", default=0, type=int, metavar="JOBS", help="Specifies the number of files to peek and transfer " "in parallel. " "Defaults to 0 (disabled).", ) parser.add_argument( "--spool-dir", default=DEFAULT_SPOOL_DIR, metavar="SPOOL_DIR", help="Specifies spool directory for WAL files. Defaults to " "'{0}'.".format(DEFAULT_SPOOL_DIR), ) parser.add_argument( "-P", "--partial", help="retrieve also partial WAL files (.partial)", action="store_true", dest="partial", default=False, ) parser.add_argument( "-z", "--gzip", help="Transfer the WAL files compressed with gzip", action="store_const", const="gzip", dest="compression", ) parser.add_argument( "-j", "--bzip2", help="Transfer the WAL files compressed with bzip2", action="store_const", const="bzip2", dest="compression", ) parser.add_argument( "-c", "--config", metavar="CONFIG", help="configuration file on the Barman server", ) parser.add_argument( "-t", "--test", action="store_true", help="test both the connection and the configuration of the " "requested PostgreSQL server in Barman to make sure it is " "ready to receive WAL files. With this option, " "the 'wal_name' and 'wal_dest' mandatory arguments are ignored.", ) parser.add_argument( "barman_host", metavar="BARMAN_HOST", help="The host of the Barman server.", ) parser.add_argument( "server_name", metavar="SERVER_NAME", help="The server name configured in Barman from which WALs are taken.", ) parser.add_argument( "wal_name", metavar="WAL_NAME", help="The value of the '%%f' keyword (according to 'restore_command').", ) parser.add_argument( "wal_dest", metavar="WAL_DEST", help="The value of the '%%p' keyword (according to 'restore_command').", ) return parser.parse_args(args=args) class RemoteGetWal(object): processes = set() """ The list of processes that has been spawned by RemoteGetWal """ def __init__(self, config, wal_name, dest_file): """ Spawn a process that download a WAL from remote. If needed decompress the remote stream on the fly. :param argparse.Namespace config: the configuration from command line :param wal_name: The name of WAL to download :param dest_file: The destination file name or a writable file object """ self.config = config self.wal_name = wal_name self.decompressor = None self.dest_file = None # If a string has been passed, it's the name of the destination file # We convert it in a writable binary file object if isinstance(dest_file, string_types): self.dest_file = dest_file dest_file = open(dest_file, "wb") with dest_file: # If compression has been required, we need to spawn two processes if config.compression: # Spawn a remote get-wal process self.ssh_process = subprocess.Popen( build_ssh_command(config, wal_name), stdout=subprocess.PIPE ) # Spawn the local decompressor self.decompressor = subprocess.Popen( [config.compression, "-d"], stdin=self.ssh_process.stdout, stdout=dest_file, ) # Close the pipe descriptor, letting the decompressor process # to receive the SIGPIPE self.ssh_process.stdout.close() else: # With no compression only the remote get-wal process # is required self.ssh_process = subprocess.Popen( build_ssh_command(config, wal_name), stdout=dest_file ) # Register the spawned processes in the class registry self.processes.add(self.ssh_process) if self.decompressor: self.processes.add(self.decompressor) @classmethod def wait_for_all(cls): """ Wait for the termination of all the registered spawned processes. """ try: while len(cls.processes): time.sleep(0.1) for process in cls.processes.copy(): if process.poll() is not None: cls.processes.remove(process) except KeyboardInterrupt: # If a SIGINT has been received, make sure that every subprocess # terminate for process in cls.processes: process.kill() exit_with_error("SIGINT received! Terminating.") @property def returncode(self): """ Return the exit code of the RemoteGetWal processes. A remote get-wal process return code is 0 only if both the remote get-wal process and the eventual decompressor return 0 :return: exit code of the RemoteGetWal processes """ if self.ssh_process.returncode != 0: return self.ssh_process.returncode if self.decompressor: if self.decompressor.returncode != 0: return self.decompressor.returncode return 0 if __name__ == "__main__": main() barman-2.18/barman/clients/cloud_backup_delete.py0000644000621200062120000003165514172556763020342 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging import os from contextlib import closing from operator import attrgetter from barman.backup import BackupManager from barman.clients.cloud_cli import ( create_argument_parser, CLIErrorExit, GeneralErrorExit, NetworkErrorExit, OperationErrorExit, ) from barman.cloud import CloudBackupCatalog, configure_logging from barman.cloud_providers import get_cloud_interface from barman.exceptions import InvalidRetentionPolicy from barman.retention_policies import RetentionPolicyFactory from barman.utils import force_str from barman import xlog def _get_files_for_backup(catalog, backup_info): backup_files = [] # Sort the files by OID so that we always get a stable order. The PGDATA dir # has no OID so we use a -1 for sorting purposes, such that it always sorts # ahead of the tablespaces. for oid, backup_file in sorted( catalog.get_backup_files(backup_info, allow_missing=True).items(), key=lambda x: x[0] if x[0] else -1, ): key = oid or "PGDATA" for file_info in [backup_file] + sorted( backup_file.additional_files, key=attrgetter("path") ): # Silently skip files which could not be found - if they don't exist # then not being able to delete them is not an error condition here if file_info.path is not None: logging.debug( "Will delete archive for %s at %s" % (key, file_info.path) ) backup_files.append(file_info.path) return backup_files def _remove_wals_for_backup( cloud_interface, catalog, deleted_backup, dry_run, skip_wal_cleanup_if_standalone=True, ): # An implementation of BackupManager.remove_wal_before_backup which does not # use xlogdb, since xlogdb is not available to barman-cloud should_remove_wals, wal_ranges_to_protect = BackupManager.should_remove_wals( deleted_backup, catalog.get_backup_list(), keep_manager=catalog, skip_wal_cleanup_if_standalone=skip_wal_cleanup_if_standalone, ) next_backup = BackupManager.find_next_backup_in( catalog.get_backup_list(), deleted_backup.backup_id ) wals_to_delete = {} if should_remove_wals: # There is no previous backup or all previous backups are archival # standalone backups, so we can remove unused WALs (those WALs not # required by standalone archival backups). # If there is a next backup then all unused WALs up to the begin_wal # of the next backup can be removed. # If there is no next backup then there are no remaining backups, # because we must assume non-exclusive backups are taken, we can only # safely delete unused WALs up to begin_wal of the deleted backup. # See comments in barman.backup.BackupManager.delete_backup. if next_backup: remove_until = next_backup else: remove_until = deleted_backup # A WAL is only a candidate for deletion if it is on the same timeline so we # use BackupManager to get a set of all other timelines with backups so that # we can preserve all WALs on other timelines. timelines_to_protect = BackupManager.get_timelines_to_protect( remove_until=remove_until, deleted_backup=deleted_backup, available_backups=catalog.get_backup_list(), ) try: wal_paths = catalog.get_wal_paths() except Exception as exc: logging.error( "Cannot clean up WALs for backup %s because an error occurred listing WALs: %s", deleted_backup.backup_id, force_str(exc), ) return for wal_name, wal in wal_paths.items(): if xlog.is_history_file(wal_name): continue if timelines_to_protect: tli, _, _ = xlog.decode_segment_name(wal_name) if tli in timelines_to_protect: continue # Check if the WAL is in a protected range, required by an archival # standalone backup - so do not delete it if xlog.is_backup_file(wal_name): # If we have a backup file, truncate the name for the range check range_check_wal_name = wal_name[:24] else: range_check_wal_name = wal_name if any( range_check_wal_name >= begin_wal and range_check_wal_name <= end_wal for begin_wal, end_wal in wal_ranges_to_protect ): continue if wal_name < remove_until.begin_wal: wals_to_delete[wal_name] = wal # Explicitly sort because dicts are not ordered in python < 3.6 wal_paths_to_delete = sorted(wals_to_delete.values()) if len(wal_paths_to_delete) > 0: if not dry_run: try: cloud_interface.delete_objects(wal_paths_to_delete) except Exception as exc: logging.error( "Could not delete the following WALs for backup %s: %s, Reason: %s", deleted_backup.backup_id, wal_paths_to_delete, force_str(exc), ) # Return early so that we leave the WALs in the local cache so they # can be cleaned up should there be a subsequent backup deletion. return else: print( "Skipping deletion of objects %s due to --dry-run option" % wal_paths_to_delete ) for wal_name in wals_to_delete.keys(): catalog.remove_wal_from_cache(wal_name) def _delete_backup( cloud_interface, catalog, backup_id, dry_run=True, skip_wal_cleanup_if_standalone=True, ): backup_info = catalog.get_backup_info(backup_id) if not backup_info: logging.warning("Backup %s does not exist", backup_id) return objects_to_delete = _get_files_for_backup(catalog, backup_info) backup_info_path = os.path.join( catalog.prefix, backup_info.backup_id, "backup.info" ) logging.debug("Will delete backup.info file at %s" % backup_info_path) if not dry_run: try: cloud_interface.delete_objects(objects_to_delete) # Do not try to delete backup.info until we have successfully deleted # everything else so that it is possible to retry the operation should # we fail to delete any backup file cloud_interface.delete_objects([backup_info_path]) except Exception as exc: logging.error("Could not delete backup %s: %s", backup_id, force_str(exc)) raise OperationErrorExit() else: print( "Skipping deletion of objects %s due to --dry-run option" % (objects_to_delete + [backup_info_path]) ) _remove_wals_for_backup( cloud_interface, catalog, backup_info, dry_run, skip_wal_cleanup_if_standalone ) # It is important that the backup is removed from the catalog after cleaning # up the WALs because the code in _remove_wals_for_backup depends on the # deleted backup existing in the backup catalog catalog.remove_backup_from_cache(backup_id) def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() catalog = CloudBackupCatalog( cloud_interface=cloud_interface, server_name=config.server_name ) # Call catalog.get_backup_list now so we know we can read the whole catalog # (the results are cached so this does not result in extra calls to cloud # storage) catalog.get_backup_list() if len(catalog.unreadable_backups) > 0: logging.error( "Cannot read the following backups: %s\n" "Unsafe to proceed with deletion due to failure reading backup catalog" % catalog.unreadable_backups ) raise OperationErrorExit() if config.backup_id: # Because we only care about one backup, skip the annotation cache # because it is only helpful when dealing with multiple backups if catalog.should_keep_backup(config.backup_id, use_cache=False): logging.error( "Skipping delete of backup %s for server %s " "as it has a current keep request. If you really " "want to delete this backup please remove the keep " "and try again.", config.backup_id, config.server_name, ) raise OperationErrorExit() _delete_backup( cloud_interface, catalog, config.backup_id, config.dry_run ) elif config.retention_policy: try: retention_policy = RetentionPolicyFactory.create( "retention_policy", config.retention_policy, server_name=config.server_name, catalog=catalog, ) except InvalidRetentionPolicy as exc: logging.error( "Could not create retention policy %s: %s", config.retention_policy, force_str(exc), ) raise CLIErrorExit() # Sort to ensure that we delete the backups in ascending order, that is # from oldest to newest. This ensures that the relevant WALs will be cleaned # up after each backup is deleted. backups_to_delete = sorted( [ backup_id for backup_id, status in retention_policy.report().items() if status == "OBSOLETE" ] ) for backup_id in backups_to_delete: _delete_backup( cloud_interface, catalog, backup_id, config.dry_run, skip_wal_cleanup_if_standalone=False, ) except Exception as exc: logging.error("Barman cloud backup delete exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit() def parse_arguments(args=None): """ Parse command line arguments :return: The options parsed """ parser, _, _ = create_argument_parser( description="This script can be used to delete backups " "made with barman-cloud-backup command. " "Currently AWS S3 and Azure Blob Storage are supported.", ) delete_arguments = parser.add_mutually_exclusive_group(required=True) delete_arguments.add_argument( "-b", "--backup-id", help="Backup ID of the backup to be deleted", ) delete_arguments.add_argument( "-r", "--retention-policy", help="If specified, delete all backups eligible for deletion according to the " "supplied retention policy. Syntax: REDUNDANCY value | RECOVERY WINDOW OF " "value {DAYS | WEEKS | MONTHS}", ) parser.add_argument( "--dry-run", action="store_true", help="Find the objects which need to be deleted but do not delete them", ) return parser.parse_args(args=args) if __name__ == "__main__": main() barman-2.18/barman/clients/cloud_walarchive.py0000755000621200062120000002473614172556763017705 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging import os import os.path from contextlib import closing from barman.clients.cloud_cli import ( add_tag_argument, create_argument_parser, CLIErrorExit, GeneralErrorExit, NetworkErrorExit, UrlArgumentType, ) from barman.cloud import configure_logging from barman.clients.cloud_compression import compress from barman.cloud_providers import get_cloud_interface from barman.exceptions import BarmanException from barman.utils import check_positive, check_size, force_str from barman.xlog import hash_dir, is_any_xlog_file, is_history_file def __is_hook_script(): """Check the environment and determine if we are running as a hook script""" if "BARMAN_HOOK" in os.environ and "BARMAN_PHASE" in os.environ: if ( os.getenv("BARMAN_HOOK") in ("archive_script", "archive_retry_script") and os.getenv("BARMAN_PHASE") == "pre" ): return True else: raise BarmanException( "barman-cloud-wal-archive called as unsupported hook script: %s_%s" % (os.getenv("BARMAN_PHASE"), os.getenv("BARMAN_HOOK")) ) else: return False def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) # Read wal_path from environment if we're a hook script if __is_hook_script(): if "BARMAN_FILE" not in os.environ: raise BarmanException("Expected environment variable BARMAN_FILE not set") config.wal_path = os.getenv("BARMAN_FILE") else: if config.wal_path is None: raise BarmanException("the following arguments are required: wal_path") # Validate the WAL file name before uploading it if not is_any_xlog_file(config.wal_path): logging.error("%s is an invalid name for a WAL file" % config.wal_path) raise CLIErrorExit() try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): uploader = CloudWalUploader( cloud_interface=cloud_interface, server_name=config.server_name, compression=config.compression, ) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) # TODO: Should the setup be optional? cloud_interface.setup_bucket() upload_kwargs = {} if is_history_file(config.wal_path): upload_kwargs["override_tags"] = config.history_tags uploader.upload_wal(config.wal_path, **upload_kwargs) except Exception as exc: logging.error("Barman cloud WAL archiver exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit() def parse_arguments(args=None): """ Parse command line arguments :return: The options parsed """ parser, s3_arguments, azure_arguments = create_argument_parser( description="This script can be used in the `archive_command` " "of a PostgreSQL server to ship WAL files to the Cloud. " "Currently AWS S3 and Azure Blob Storage are supported.", source_or_destination=UrlArgumentType.destination, ) parser.add_argument( "wal_path", nargs="?", help="the value of the '%%p' keyword (according to 'archive_command').", default=None, ) compression = parser.add_mutually_exclusive_group() compression.add_argument( "-z", "--gzip", help="gzip-compress the WAL while uploading to the cloud " "(should not be used with python < 3.2)", action="store_const", const="gzip", dest="compression", ) compression.add_argument( "-j", "--bzip2", help="bzip2-compress the WAL while uploading to the cloud " "(should not be used with python < 3.3)", action="store_const", const="bzip2", dest="compression", ) compression.add_argument( "--snappy", help="snappy-compress the WAL while uploading to the cloud " "(requires optional python-snappy library)", action="store_const", const="snappy", dest="compression", ) add_tag_argument( parser, name="tags", help="Tags to be added to archived WAL files in cloud storage", ) add_tag_argument( parser, name="history-tags", help="Tags to be added to archived history files in cloud storage", ) s3_arguments.add_argument( "-e", "--encryption", help="The encryption algorithm used when storing the uploaded data in S3. " "Allowed values: 'AES256'|'aws:kms'.", choices=["AES256", "aws:kms"], metavar="ENCRYPTION", ) azure_arguments.add_argument( "--encryption-scope", help="The name of an encryption scope defined in the Azure Blob Storage " "service which is to be used to encrypt the data in Azure", ) azure_arguments.add_argument( "--max-block-size", help="The chunk size to be used when uploading an object via the " "concurrent chunk method (default: 4MB).", type=check_size, default="4MB", ) azure_arguments.add_argument( "--max-concurrency", help="The maximum number of chunks to be uploaded concurrently (default: 1).", type=check_positive, default=1, ) azure_arguments.add_argument( "--max-single-put-size", help="Maximum size for which the Azure client will upload an object in a " "single request (default: 64MB). If this is set lower than the PostgreSQL " "WAL segment size after any applied compression then the concurrent chunk " "upload method for WAL archiving will be used.", default="64MB", type=check_size, ) return parser.parse_args(args=args) class CloudWalUploader(object): """ Cloud storage upload client """ def __init__(self, cloud_interface, server_name, compression=None): """ Object responsible for handling interactions with cloud storage :param CloudInterface cloud_interface: The interface to use to upload the backup :param str server_name: The name of the server as configured in Barman :param str compression: Compression algorithm to use """ self.cloud_interface = cloud_interface self.compression = compression self.server_name = server_name def upload_wal(self, wal_path, override_tags=None): """ Upload a WAL file from postgres to cloud storage :param str wal_path: Full path of the WAL file :param List[tuple] override_tags: List of k,v tuples which should override any tags already defined in the cloud interface """ # Extract the WAL file wal_name = self.retrieve_wal_name(wal_path) # Use the correct file object for the upload (simple|gzip|bz2) file_object = self.retrieve_file_obj(wal_path) # Correctly format the destination path destination = os.path.join( self.cloud_interface.path, self.server_name, "wals", hash_dir(wal_path), wal_name, ) # Put the file in the correct bucket. # The put method will handle automatically multipart upload self.cloud_interface.upload_fileobj( fileobj=file_object, key=destination, override_tags=override_tags ) def retrieve_file_obj(self, wal_path): """ Create the correct type of file object necessary for the file transfer. If no compression is required a simple File object is returned. In case of compression, a BytesIO object is returned, containing the result of the compression. NOTE: the Wal files are actually compressed straight into memory, thanks to the usual small dimension of the WAL. This could change in the future because the WAL files dimension could be more than 16MB on some postgres install. TODO: Evaluate using tempfile if the WAL is bigger than 16MB :param str wal_path: :return File: simple or compressed file object """ # Read the wal_file in binary mode wal_file = open(wal_path, "rb") # return the opened file if is uncompressed if not self.compression: return wal_file return compress(wal_file, self.compression) def retrieve_wal_name(self, wal_path): """ Extract the name of the WAL file from the complete path. If no compression is specified, then the simple file name is returned. In case of compression, the correct file extension is applied to the WAL file name. :param str wal_path: the WAL file complete path :return str: WAL file name """ # Extract the WAL name wal_name = os.path.basename(wal_path) # return the plain file name if no compression is specified if not self.compression: return wal_name if self.compression == "gzip": # add gz extension return "%s.gz" % wal_name elif self.compression == "bzip2": # add bz2 extension return "%s.bz2" % wal_name elif self.compression == "snappy": # add snappy extension return "%s.snappy" % wal_name else: raise ValueError("Unknown compression type: %s" % self.compression) if __name__ == "__main__": main() barman-2.18/barman/clients/cloud_check_wal_archive.py0000644000621200062120000000571714172556763021174 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging from barman.clients.cloud_cli import ( create_argument_parser, GeneralErrorExit, OperationErrorExit, NetworkErrorExit, UrlArgumentType, ) from barman.cloud import configure_logging, CloudBackupCatalog from barman.cloud_providers import get_cloud_interface from barman.exceptions import WalArchiveContentError from barman.utils import force_str, check_positive from barman.xlog import check_archive_usable def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) if not cloud_interface.test_connectivity(): # Deliberately raise an error if we cannot connect raise NetworkErrorExit() if not cloud_interface.bucket_exists: # If the bucket does not exist then the check should pass return catalog = CloudBackupCatalog(cloud_interface, config.server_name) wals = list(catalog.get_wal_paths().keys()) check_archive_usable( wals, timeline=config.timeline, ) except WalArchiveContentError as err: logging.error( "WAL archive check failed for server %s: %s", config.server_name, force_str(err), ) raise OperationErrorExit() except Exception as exc: logging.error("Barman cloud WAL archive check exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit() def parse_arguments(args=None): """ Parse command line arguments :return: The options parsed """ parser, _, _ = create_argument_parser( description="Checks that the WAL archive on the specified cloud storage " "can be safely used for a new PostgreSQL server.", source_or_destination=UrlArgumentType.destination, ) parser.add_argument( "--timeline", help="The earliest timeline whose WALs should cause the check to fail", type=check_positive, ) return parser.parse_args(args=args) if __name__ == "__main__": main() barman-2.18/barman/clients/cloud_restore.py0000644000621200062120000002052614172556763017231 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging import os from contextlib import closing from barman.clients.cloud_cli import ( CLIErrorExit, create_argument_parser, GeneralErrorExit, NetworkErrorExit, OperationErrorExit, ) from barman.cloud import CloudBackupCatalog, configure_logging from barman.cloud_providers import get_cloud_interface from barman.utils import force_str def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) # Validate the destination directory before starting recovery if os.path.exists(config.recovery_dir) and os.listdir(config.recovery_dir): logging.error( "Destination %s already exists and it is not empty", config.recovery_dir ) raise OperationErrorExit() try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): downloader = CloudBackupDownloader( cloud_interface=cloud_interface, server_name=config.server_name ) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() downloader.download_backup( config.backup_id, config.recovery_dir, tablespace_map(config.tablespace), ) except KeyboardInterrupt as exc: logging.error("Barman cloud restore was interrupted by the user") logging.debug("Exception details:", exc_info=exc) raise OperationErrorExit() except Exception as exc: logging.error("Barman cloud restore exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit() def parse_arguments(args=None): """ Parse command line arguments :return: The options parsed """ parser, _, _ = create_argument_parser( description="This script can be used to download a backup " "previously made with barman-cloud-backup command." "Currently AWS S3 and Azure Blob Storage are supported.", ) parser.add_argument("backup_id", help="the backup ID") parser.add_argument("recovery_dir", help="the path to a directory for recovery.") parser.add_argument( "--tablespace", help="tablespace relocation rule", metavar="NAME:LOCATION", action="append", default=[], ) return parser.parse_args(args=args) def tablespace_map(rules): """ Return a mapping from tablespace names to locations built from any `--tablespace name:/loc/ation` rules specified. """ tablespaces = {} for rule in rules: try: tablespaces.update([rule.split(":", 1)]) except ValueError: logging.error( "Invalid tablespace relocation rule '%s'\n" "HINT: The valid syntax for a relocation rule is " "NAME:LOCATION", rule, ) raise CLIErrorExit() return tablespaces class CloudBackupDownloader(object): """ Cloud storage download client """ def __init__(self, cloud_interface, server_name): """ Object responsible for handling interactions with cloud storage :param CloudInterface cloud_interface: The interface to use to upload the backup :param str server_name: The name of the server as configured in Barman """ self.cloud_interface = cloud_interface self.server_name = server_name self.catalog = CloudBackupCatalog(cloud_interface, server_name) def download_backup(self, backup_id, destination_dir, tablespaces): """ Download a backup from cloud storage :param str backup_id: The backup id to restore :param str destination_dir: Path to the destination directory """ backup_info = self.catalog.get_backup_info(backup_id) if not backup_info: logging.error( "Backup %s for server %s does not exists", backup_id, self.server_name ) raise OperationErrorExit() backup_files = self.catalog.get_backup_files(backup_info) # We must download and restore a bunch of .tar files that contain PGDATA # and each tablespace. First, we determine a target directory to extract # each tar file into and record these in copy_jobs. For each tablespace, # the location may be overriden by `--tablespace name:/new/location` on # the command-line; and we must also add an entry to link_jobs to create # a symlink from $PGDATA/pg_tblspc/oid to the correct location after the # downloads. copy_jobs = [] link_jobs = [] for oid in backup_files: file_info = backup_files[oid] # PGDATA is restored where requested (destination_dir) if oid is None: target_dir = destination_dir else: for tblspc in backup_info.tablespaces: if oid == tblspc.oid: target_dir = tblspc.location if tblspc.name in tablespaces: target_dir = os.path.realpath(tablespaces[tblspc.name]) logging.debug( "Tablespace %s (oid=%s) will be located at %s", tblspc.name, oid, target_dir, ) link_jobs.append( ["%s/pg_tblspc/%s" % (destination_dir, oid), target_dir] ) break else: raise AssertionError( "The backup file oid '%s' must be present " "in backupinfo.tablespaces list" ) # Validate the destination directory before starting recovery if os.path.exists(target_dir) and os.listdir(target_dir): logging.error( "Destination %s already exists and it is not empty", target_dir ) raise OperationErrorExit() copy_jobs.append([file_info, target_dir]) for additional_file in file_info.additional_files: copy_jobs.append([additional_file, target_dir]) # Now it's time to download the files for file_info, target_dir in copy_jobs: # Download the file logging.debug( "Extracting %s to %s (%s)", file_info.path, target_dir, "decompressing " + file_info.compression if file_info.compression else "no compression", ) self.cloud_interface.extract_tar(file_info.path, target_dir) for link, target in link_jobs: os.symlink(target, link) # If we did not restore the pg_wal directory from one of the uploaded # backup files, we must recreate it here. (If pg_wal was originally a # symlink, it would not have been uploaded.) wal_path = os.path.join(destination_dir, backup_info.wal_directory()) if not os.path.exists(wal_path): os.mkdir(wal_path) if __name__ == "__main__": main() barman-2.18/barman/clients/cloud_walrestore.py0000644000621200062120000001503314172556763017732 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging import os import sys from contextlib import closing from barman.clients.cloud_cli import ( create_argument_parser, CLIErrorExit, GeneralErrorExit, NetworkErrorExit, OperationErrorExit, ) from barman.cloud import configure_logging, ALLOWED_COMPRESSIONS from barman.cloud_providers import get_cloud_interface from barman.exceptions import BarmanException from barman.utils import force_str from barman.xlog import hash_dir, is_any_xlog_file, is_backup_file def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) # Validate the WAL file name before downloading it if not is_any_xlog_file(config.wal_name): logging.error("%s is an invalid name for a WAL file" % config.wal_name) raise CLIErrorExit() try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): downloader = CloudWalDownloader( cloud_interface=cloud_interface, server_name=config.server_name ) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() downloader.download_wal(config.wal_name, config.wal_dest) except Exception as exc: logging.error("Barman cloud WAL restore exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit() def parse_arguments(args=None): """ Parse command line arguments :return: The options parsed """ parser, _, _ = create_argument_parser( description="This script can be used as a `restore_command` " "to download WAL files previously archived with " "barman-cloud-wal-archive command. " "Currently AWS S3 and Azure Blob Storage are supported.", ) parser.add_argument( "wal_name", help="The value of the '%%f' keyword (according to 'restore_command').", ) parser.add_argument( "wal_dest", help="The value of the '%%p' keyword (according to 'restore_command').", ) return parser.parse_args(args=args) class CloudWalDownloader(object): """ Cloud storage download client """ def __init__(self, cloud_interface, server_name): """ Object responsible for handling interactions with cloud storage :param CloudInterface cloud_interface: The interface to use to upload the backup :param str server_name: The name of the server as configured in Barman """ self.cloud_interface = cloud_interface self.server_name = server_name def download_wal(self, wal_name, wal_dest): """ Download a WAL file from cloud storage :param str wal_name: Name of the WAL file :param str wal_dest: Full path of the destination WAL file """ # Correctly format the source path on s3 source_dir = os.path.join( self.cloud_interface.path, self.server_name, "wals", hash_dir(wal_name) ) # Add a path separator if needed if not source_dir.endswith(os.path.sep): source_dir += os.path.sep wal_path = os.path.join(source_dir, wal_name) remote_name = None # Automatically detect compression based on the file extension compression = None for item in self.cloud_interface.list_bucket(source_dir): # perfect match (uncompressed file) if item == wal_path: remote_name = item # look for compressed files or .partial files elif item.startswith(wal_path): # Detect compression basename = item for e, c in ALLOWED_COMPRESSIONS.items(): if item[-len(e) :] == e: # Strip extension basename = basename[: -len(e)] compression = c break # Check basename is a known xlog file (.partial?) if not is_any_xlog_file(basename): logging.warning("Unknown WAL file: %s", item) continue # Exclude backup informative files (not needed in recovery) elif is_backup_file(basename): logging.info("Skipping backup file: %s", item) continue # Found candidate remote_name = item logging.info( "Found WAL %s for server %s as %s", wal_name, self.server_name, remote_name, ) break if not remote_name: logging.info( "WAL file %s for server %s does not exists", wal_name, self.server_name ) raise OperationErrorExit() if compression and sys.version_info < (3, 0, 0): raise BarmanException( "Compressed WALs cannot be restored with Python 2.x - " "please upgrade to a supported version of Python 3" ) # Download the file logging.debug( "Downloading %s to %s (%s)", remote_name, wal_dest, "decompressing " + compression if compression else "no compression", ) self.cloud_interface.download_file(remote_name, wal_dest, compression) if __name__ == "__main__": main() barman-2.18/barman/clients/walarchive.py0000755000621200062120000002543614172556763016515 0ustar 00000000000000# -*- coding: utf-8 -*- # walarchive - Remote Barman WAL archive command for PostgreSQL # # This script remotely sends WAL files to Barman via SSH, on demand. # It is intended to be used as archive_command in PostgreSQL configuration. # # See the help page for usage information. # # © Copyright EnterpriseDB UK Limited 2019-2022 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from __future__ import print_function import argparse import copy import hashlib import os import subprocess import sys import tarfile import time from contextlib import closing from io import BytesIO import barman DEFAULT_USER = "barman" BUFSIZE = 16 * 1024 def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) # Do connectivity test if requested if config.test: connectivity_test(config) return # never reached # Check WAL destination is not a directory if os.path.isdir(config.wal_path): exit_with_error("WAL_PATH cannot be a directory: %s" % config.wal_path) try: # Execute barman put-wal through the ssh connection ssh_process = RemotePutWal(config, config.wal_path) except EnvironmentError as exc: exit_with_error("Error executing ssh: %s" % exc) return # never reached # Wait for termination of every subprocess. If CTRL+C is pressed, # terminate all of them RemotePutWal.wait_for_all() # If the command succeeded exit here if ssh_process.returncode == 0: return # Report the exit code, remapping ssh failure code (255) to 3 if ssh_process.returncode == 255: exit_with_error("Connection problem with ssh", 3) else: exit_with_error( "Remote 'barman put-wal' command has failed!", ssh_process.returncode ) def build_ssh_command(config): """ Prepare an ssh command according to the arguments passed on command line :param argparse.Namespace config: the configuration from command line :return list[str]: the ssh command as list of string """ ssh_command = [ "ssh", "-q", # quiet mode - suppress warnings "-T", # disable pseudo-terminal allocation "%s@%s" % (config.user, config.barman_host), "barman", ] if config.config: ssh_command.append("--config='%s'" % config.config) ssh_command.extend(["put-wal", config.server_name]) if config.test: ssh_command.append("--test") return ssh_command def exit_with_error(message, status=2): """ Print ``message`` and terminate the script with ``status`` :param str message: message to print :param int status: script exit code """ print("ERROR: %s" % message, file=sys.stderr) sys.exit(status) def connectivity_test(config): """ Invoke remote put-wal --test to test the connection with Barman server :param argparse.Namespace config: the configuration from command line """ ssh_command = build_ssh_command(config) try: output = subprocess.Popen(ssh_command, stdout=subprocess.PIPE).communicate() print(output[0].decode("utf-8")) sys.exit(0) except subprocess.CalledProcessError as e: exit_with_error("Impossible to invoke remote put-wal: %s" % e) def parse_arguments(args=None): """ Parse the command line arguments :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] :rtype: argparse.Namespace """ parser = argparse.ArgumentParser( description="This script will be used as an 'archive_command' " "based on the put-wal feature of Barman. " "A ssh connection will be opened to the Barman host.", ) parser.add_argument( "-V", "--version", action="version", version="%%(prog)s %s" % barman.__version__ ) parser.add_argument( "-U", "--user", default=DEFAULT_USER, help="The user used for the ssh connection to the Barman server. " "Defaults to '%(default)s'.", ) parser.add_argument( "-c", "--config", metavar="CONFIG", help="configuration file on the Barman server", ) parser.add_argument( "-t", "--test", action="store_true", help="test both the connection and the configuration of the " "requested PostgreSQL server in Barman for WAL retrieval. " "With this option, the 'wal_name' mandatory argument is " "ignored.", ) parser.add_argument( "barman_host", metavar="BARMAN_HOST", help="The host of the Barman server.", ) parser.add_argument( "server_name", metavar="SERVER_NAME", help="The server name configured in Barman from which WALs are taken.", ) parser.add_argument( "wal_path", metavar="WAL_PATH", help="The value of the '%%p' keyword (according to 'archive_command').", ) return parser.parse_args(args=args) def md5copyfileobj(src, dst, length=None): """ Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. This method is used by the ChecksumTarFile.addfile(). Returns the md5 checksum """ checksum = hashlib.md5() if length == 0: return checksum.hexdigest() if length is None: while 1: buf = src.read(BUFSIZE) if not buf: break checksum.update(buf) dst.write(buf) return checksum.hexdigest() blocks, remainder = divmod(length, BUFSIZE) for _ in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") checksum.update(buf) dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") checksum.update(buf) dst.write(buf) return checksum.hexdigest() class ChecksumTarInfo(tarfile.TarInfo): """ Special TarInfo that can hold a file checksum """ def __init__(self, *args, **kwargs): super(ChecksumTarInfo, self).__init__(*args, **kwargs) self.data_checksum = None class ChecksumTarFile(tarfile.TarFile): """ Custom TarFile class that automatically calculates md5 checksum of each file and appends a file called 'MD5SUMS' to the stream. """ tarinfo = ChecksumTarInfo # The default TarInfo class used by TarFile format = tarfile.PAX_FORMAT # Use PAX format to better preserve metadata MD5SUMS_FILE = "MD5SUMS" def addfile(self, tarinfo, fileobj=None): """ Add the provided fileobj to the tar using md5copyfileobj and saves the file md5 in the provided ChecksumTarInfo object. This method completely replaces TarFile.addfile() """ self._check("aw") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: tarinfo.data_checksum = md5copyfileobj(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE) if remainder > 0: self.fileobj.write(tarfile.NUL * (tarfile.BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * tarfile.BLOCKSIZE self.members.append(tarinfo) def close(self): """ Add an MD5SUMS file to the tar just before closing. This method extends TarFile.close(). """ if self.closed: return if self.mode in "aw": with BytesIO() as md5sums: for tarinfo in self.members: line = "%s *%s\n" % (tarinfo.data_checksum, tarinfo.name) md5sums.write(line.encode()) md5sums.seek(0, os.SEEK_END) size = md5sums.tell() md5sums.seek(0, os.SEEK_SET) tarinfo = self.tarinfo(self.MD5SUMS_FILE) tarinfo.size = size self.addfile(tarinfo, md5sums) super(ChecksumTarFile, self).close() class RemotePutWal(object): """ Spawn a process that sends a WAL to a remote Barman server. :param argparse.Namespace config: the configuration from command line :param wal_path: The name of WAL to upload """ processes = set() """ The list of processes that has been spawned by RemotePutWal """ def __init__(self, config, wal_path): self.config = config self.wal_path = wal_path self.dest_file = None # Spawn a remote put-wal process self.ssh_process = subprocess.Popen( build_ssh_command(config), stdin=subprocess.PIPE ) # Register the spawned processes in the class registry self.processes.add(self.ssh_process) # Send the data as a tar file (containing checksums) with self.ssh_process.stdin as dest_file: with closing(ChecksumTarFile.open(mode="w|", fileobj=dest_file)) as tar: tar.add(wal_path, os.path.basename(wal_path)) @classmethod def wait_for_all(cls): """ Wait for the termination of all the registered spawned processes. """ try: while cls.processes: time.sleep(0.1) for process in cls.processes.copy(): if process.poll() is not None: cls.processes.remove(process) except KeyboardInterrupt: # If a SIGINT has been received, make sure that every subprocess # terminate for process in cls.processes: process.kill() exit_with_error("SIGINT received! Terminating.") @property def returncode(self): """ Return the exit code of the RemoteGetWal processes. :return: exit code of the RemoteGetWal processes """ if self.ssh_process.returncode != 0: return self.ssh_process.returncode return 0 if __name__ == "__main__": main() barman-2.18/barman/clients/cloud_backup_list.py0000644000621200062120000001023114172556763020036 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import json import logging from contextlib import closing from barman.clients.cloud_cli import ( create_argument_parser, GeneralErrorExit, NetworkErrorExit, OperationErrorExit, ) from barman.cloud import CloudBackupCatalog, configure_logging from barman.cloud_providers import get_cloud_interface from barman.infofile import BackupInfo from barman.utils import force_str def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): catalog = CloudBackupCatalog( cloud_interface=cloud_interface, server_name=config.server_name ) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() backup_list = catalog.get_backup_list() # Output if config.format == "console": COLUMNS = "{:<20}{:<25}{:<30}{:<16}" print( COLUMNS.format( "Backup ID", "End Time", "Begin Wal", "Archival Status" ) ) for backup_id in sorted(backup_list): item = backup_list[backup_id] if item and item.status == BackupInfo.DONE: keep_target = catalog.get_keep_target(item.backup_id) keep_status = ( keep_target and "KEEP:%s" % keep_target.upper() or "" ) print( COLUMNS.format( item.backup_id, item.end_time.strftime("%Y-%m-%d %H:%M:%S"), item.begin_wal, keep_status, ) ) else: print( json.dumps( { "backups_list": [ backup_list[backup_id].to_json() for backup_id in sorted(backup_list) ] } ) ) except Exception as exc: logging.error("Barman cloud backup list exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit() def parse_arguments(args=None): """ Parse command line arguments :return: The options parsed """ parser, _, _ = create_argument_parser( description="This script can be used to list backups " "made with barman-cloud-backup command. " "Currently AWS S3 and Azure Blob Storage are supported.", ) parser.add_argument( "--format", default="console", help="Output format (console or json). Default console.", ) return parser.parse_args(args=args) if __name__ == "__main__": main() barman-2.18/barman/storage/0000755000621200062120000000000014172556766014007 5ustar 00000000000000barman-2.18/barman/storage/__init__.py0000644000621200062120000000132414172556763016115 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2013-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . barman-2.18/barman/storage/file_manager.py0000644000621200062120000000340714172556763016773 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2013-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . from abc import ABCMeta, abstractmethod from barman.utils import with_metaclass class FileManager(with_metaclass(ABCMeta)): @abstractmethod def file_exist(self, file_path): """ Tests if file exists :param file_path: File path :type file_path: string :return: True if file exists False otherwise :rtype: bool """ @abstractmethod def get_file_stats(self, file_path): """ Tests if file exists :param file_path: File path :type file_path: string :return: :rtype: FileStats """ @abstractmethod def get_file_list(self, path): """ List all files within a path, including subdirectories :param path: Path to analyze :type path: string :return: List of file path :rtype: list """ @abstractmethod def get_file_content(self, file_path, file_mode="rb"): """ """ @abstractmethod def save_content_to_file(self, file_path, content, file_mode="wb"): """ """ barman-2.18/barman/storage/local_file_manager.py0000644000621200062120000000453514172556763020150 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2013-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import os from .file_manager import FileManager from .file_stats import FileStats class LocalFileManager(FileManager): def file_exist(self, file_path): """ Tests if file exists :param file_path: File path :type file_path: string :return: True if file exists False otherwise :rtype: bool """ return os.path.isfile(file_path) def get_file_stats(self, file_path): """ Tests if file exists :param file_path: File path :type file_path: string :return: :rtype: FileStats """ if not self.file_exist(file_path): raise IOError("Missing file " + file_path) sts = os.stat(file_path) return FileStats(sts.st_size, sts.st_mtime) def get_file_list(self, path): """ List all files within a path, including subdirectories :param path: Path to analyze :type path: string :return: List of file path :rtype: list """ if not os.path.isdir(path): raise NotADirectoryError(path) file_list = [] for root, dirs, files in os.walk(path): file_list.extend( list(map(lambda x, prefix=root: os.path.join(prefix, x), files)) ) return file_list def get_file_content(self, file_path, file_mode="rb"): with open(file_path, file_mode) as reader: content = reader.read() return content def save_content_to_file(self, file_path, content, file_mode="wb"): """ """ with open(file_path, file_mode) as writer: writer.write(content) barman-2.18/barman/storage/file_stats.py0000644000621200062120000000321714172556763016516 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2013-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . from datetime import datetime try: from datetime import timezone utc = timezone.utc except ImportError: # python 2.7 compatibility from dateutil import tz utc = tz.tzutc() class FileStats: def __init__(self, size, last_modified): """ Arbitrary timezone set to UTC. There is probably possible improvement here. :param size: file size in bytes :type size: int :param last_modified: Time of last modification in seconds :type last_modified: int """ self.size = size self.last_modified = datetime.fromtimestamp(last_modified, tz=utc) def get_size(self): """ """ return self.size def get_last_modified(self, datetime_format="%Y-%m-%d %H:%M:%S"): """ :param datetime_format: Format to apply on datetime object :type datetime_format: str """ return self.last_modified.strftime(datetime_format) barman-2.18/barman/retention_policies.py0000644000621200062120000004524514172556763016622 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module defines backup retention policies. A backup retention policy in Barman is a user-defined policy for determining how long backups and archived logs (WAL segments) need to be retained for media recovery. You can define a retention policy in terms of backup redundancy or a recovery window. Barman retains the periodical backups required to satisfy the current retention policy, and any archived WAL files required for complete recovery of those backups. """ import logging import re from abc import ABCMeta, abstractmethod from datetime import datetime, timedelta from dateutil import tz from barman.annotations import KeepManager from barman.exceptions import InvalidRetentionPolicy from barman.infofile import BackupInfo from barman.utils import with_metaclass _logger = logging.getLogger(__name__) class RetentionPolicy(with_metaclass(ABCMeta, object)): """Abstract base class for retention policies""" def __init__(self, mode, unit, value, context, server): """Constructor of the retention policy base class""" self.mode = mode self.unit = unit self.value = int(value) self.context = context self.server = server self._first_backup = None self._first_wal = None def report(self, source=None, context=None): """Report obsolete/valid objects according to the retention policy""" if context is None: context = self.context # Overrides the list of available backups if source is None: source = self.server.available_backups if context == "BASE": return self._backup_report(source) elif context == "WAL": return self._wal_report() else: raise ValueError("Invalid context %s", context) def backup_status(self, backup_id): """Report the status of a backup according to the retention policy""" source = self.server.available_backups if self.context == "BASE": return self._backup_report(source)[backup_id] else: return BackupInfo.NONE def first_backup(self): """Returns the first valid backup according to retention policies""" if not self._first_backup: self.report(context="BASE") return self._first_backup def first_wal(self): """Returns the first valid WAL according to retention policies""" if not self._first_wal: self.report(context="WAL") return self._first_wal @abstractmethod def __str__(self): """String representation""" pass @abstractmethod def debug(self): """Debug information""" pass @abstractmethod def _backup_report(self, source): """Report obsolete/valid backups according to the retention policy""" pass @abstractmethod def _wal_report(self): """Report obsolete/valid WALs according to the retention policy""" pass @classmethod def create(cls, server, option, value): """ If given option and value from the configuration file match, creates the retention policy object for the given server """ # using @abstractclassmethod from python3 would be better here raise NotImplementedError( "The class %s must override the create() class method", cls.__name__ ) def to_json(self): """ Output representation of the obj for JSON serialization """ return "%s %s %s" % (self.mode, self.value, self.unit) class RedundancyRetentionPolicy(RetentionPolicy): """ Retention policy based on redundancy, the setting that determines many periodical backups to keep. A redundancy-based retention policy is contrasted with retention policy that uses a recovery window. """ _re = re.compile(r"^\s*redundancy\s+(\d+)\s*$", re.IGNORECASE) def __init__(self, context, value, server): super(RedundancyRetentionPolicy, self).__init__( "redundancy", "b", value, "BASE", server ) assert value >= 0 def __str__(self): return "REDUNDANCY %s" % self.value def debug(self): return "Redundancy: %s (%s)" % (self.value, self.context) def _backup_report(self, source): """Report obsolete/valid backups according to the retention policy""" report = dict() backups = source # Normalise the redundancy value (according to minimum redundancy) redundancy = self.value if redundancy < self.server.minimum_redundancy: _logger.warning( "Retention policy redundancy (%s) is lower than " "the required minimum redundancy (%s). Enforce %s.", redundancy, self.server.minimum_redundancy, self.server.minimum_redundancy, ) redundancy = self.server.minimum_redundancy # Map the latest 'redundancy' DONE backups as VALID # The remaining DONE backups are classified as OBSOLETE # Non DONE backups are classified as NONE # NOTE: reverse key orders (simulate reverse chronology) i = 0 for bid in sorted(backups.keys(), reverse=True): if backups[bid].status == BackupInfo.DONE: keep_target = self.server.get_keep_target(bid) if keep_target == KeepManager.TARGET_STANDALONE: report[bid] = BackupInfo.KEEP_STANDALONE elif keep_target: # Any other recovery target is treated as KEEP_FULL for safety report[bid] = BackupInfo.KEEP_FULL elif i < redundancy: report[bid] = BackupInfo.VALID self._first_backup = bid else: report[bid] = BackupInfo.OBSOLETE i = i + 1 else: report[bid] = BackupInfo.NONE return report def _wal_report(self): """Report obsolete/valid WALs according to the retention policy""" pass @classmethod def create(cls, server, context, optval): # Detect Redundancy retention type mtch = cls._re.match(optval) if not mtch: return None value = int(mtch.groups()[0]) return cls(context, value, server) class RecoveryWindowRetentionPolicy(RetentionPolicy): """ Retention policy based on recovery window. The DBA specifies a period of time and Barman ensures retention of backups and archived WAL files required for point-in-time recovery to any time during the recovery window. The interval always ends with the current time and extends back in time for the number of days specified by the user. For example, if the retention policy is set for a recovery window of seven days, and the current time is 9:30 AM on Friday, Barman retains the backups required to allow point-in-time recovery back to 9:30 AM on the previous Friday. """ _re = re.compile( r""" ^\s* recovery\s+window\s+of\s+ # recovery window of (\d+)\s+(day|month|week)s? # N (day|month|week) with optional 's' \s*$ """, re.IGNORECASE | re.VERBOSE, ) _kw = {"d": "DAYS", "m": "MONTHS", "w": "WEEKS"} def __init__(self, context, value, unit, server): super(RecoveryWindowRetentionPolicy, self).__init__( "window", unit, value, context, server ) assert value >= 0 assert unit == "d" or unit == "m" or unit == "w" assert context == "WAL" or context == "BASE" # Calculates the time delta if unit == "d": self.timedelta = timedelta(days=self.value) elif unit == "w": self.timedelta = timedelta(weeks=self.value) elif unit == "m": self.timedelta = timedelta(days=(31 * self.value)) def __str__(self): return "RECOVERY WINDOW OF %s %s" % (self.value, self._kw[self.unit]) def debug(self): return "Recovery Window: %s %s: %s (%s)" % ( self.value, self.unit, self.context, self._point_of_recoverability(), ) def _point_of_recoverability(self): """ Based on the current time and the window, calculate the point of recoverability, which will be then used to define the first backup or the first WAL """ return datetime.now(tz.tzlocal()) - self.timedelta def _backup_report(self, source): """Report obsolete/valid backups according to the retention policy""" report = dict() backups = source # Map as VALID all DONE backups having end time lower than # the point of recoverability. The older ones # are classified as OBSOLETE. # Non DONE backups are classified as NONE found = False valid = 0 # NOTE: reverse key orders (simulate reverse chronology) for bid in sorted(backups.keys(), reverse=True): # We are interested in DONE backups only if backups[bid].status == BackupInfo.DONE: keep_target = self.server.get_keep_target(bid) if keep_target == KeepManager.TARGET_STANDALONE: keep_target = BackupInfo.KEEP_STANDALONE elif keep_target: # Any other recovery target is treated as KEEP_FULL for safety keep_target = BackupInfo.KEEP_FULL # By found, we mean "found the first backup outside the recovery # window" if that is the case then this bid is potentially obsolete. if found: # Check minimum redundancy requirements if valid < self.server.minimum_redundancy: if keep_target: _logger.info( "Keeping obsolete backup %s for server %s " "(older than %s) " "due to keep status: %s", bid, self.server.name, self._point_of_recoverability, keep_target, ) report[bid] = keep_target else: _logger.warning( "Keeping obsolete backup %s for server %s " "(older than %s) " "due to minimum redundancy requirements (%s)", bid, self.server.name, self._point_of_recoverability(), self.server.minimum_redundancy, ) # We mark the backup as potentially obsolete # as we must respect minimum redundancy requirements report[bid] = BackupInfo.POTENTIALLY_OBSOLETE self._first_backup = bid valid = valid + 1 else: if keep_target: _logger.info( "Keeping obsolete backup %s for server %s " "(older than %s) " "due to keep status: %s", bid, self.server.name, self._point_of_recoverability, keep_target, ) report[bid] = keep_target else: # We mark this backup as obsolete # (older than the first valid one) _logger.info( "Reporting backup %s for server %s as OBSOLETE " "(older than %s)", bid, self.server.name, self._point_of_recoverability(), ) report[bid] = BackupInfo.OBSOLETE else: _logger.debug( "Reporting backup %s for server %s as VALID (newer than %s)", bid, self.server.name, self._point_of_recoverability(), ) # Backup within the recovery window report[bid] = keep_target or BackupInfo.VALID self._first_backup = bid valid = valid + 1 # TODO: Currently we use the backup local end time # We need to make this more accurate if backups[bid].end_time < self._point_of_recoverability(): found = True else: report[bid] = BackupInfo.NONE return report def _wal_report(self): """Report obsolete/valid WALs according to the retention policy""" pass @classmethod def create(cls, server, context, optval): # Detect Recovery Window retention type match = cls._re.match(optval) if not match: return None value = int(match.groups()[0]) unit = match.groups()[1][0].lower() return cls(context, value, unit, server) class SimpleWALRetentionPolicy(RetentionPolicy): """Simple retention policy for WAL files (identical to the main one)""" _re = re.compile(r"^\s*main\s*$", re.IGNORECASE) def __init__(self, context, policy, server): super(SimpleWALRetentionPolicy, self).__init__( "simple-wal", policy.unit, policy.value, context, server ) # The referred policy must be of type 'BASE' assert self.context == "WAL" and policy.context == "BASE" self.policy = policy def __str__(self): return "MAIN" def debug(self): return "Simple WAL Retention Policy (%s)" % self.policy def _backup_report(self, source): """Report obsolete/valid backups according to the retention policy""" pass def _wal_report(self): """Report obsolete/valid backups according to the retention policy""" self.policy.report(context="WAL") def first_wal(self): """Returns the first valid WAL according to retention policies""" return self.policy.first_wal() @classmethod def create(cls, server, context, optval): # Detect Redundancy retention type match = cls._re.match(optval) if not match: return None return cls(context, server.retention_policy, server) class ServerMetadata(object): """ Static retention metadata for a barman-managed server This will return the same values regardless of any changes in the state of the barman-managed server and associated backups. """ def __init__(self, server_name, backup_info_list, keep_manager): self.name = server_name self.minimum_redundancy = 0 self.retention_policy = None self.backup_info_list = backup_info_list self.keep_manager = keep_manager @property def available_backups(self): return self.backup_info_list def get_keep_target(self, backup_id): return self.keep_manager.get_keep_target(backup_id) class ServerMetadataLive(ServerMetadata): """ Live retention metadata for a barman-managed server This will always return the current values for the barman.Server passed in at construction time. """ def __init__(self, server, keep_manager): self.server = server self.keep_manager = keep_manager @property def name(self): return self.server.config.name @property def minimum_redundancy(self): return self.server.config.minimum_redundancy @property def retention_policy(self): return self.server.config.retention_policy @property def available_backups(self): return self.server.get_available_backups(BackupInfo.STATUS_NOT_EMPTY) def get_keep_target(self, backup_id): return self.keep_manager.get_keep_target(backup_id) class RetentionPolicyFactory(object): """Factory for retention policy objects""" # Available retention policy types policy_classes = [ RedundancyRetentionPolicy, RecoveryWindowRetentionPolicy, SimpleWALRetentionPolicy, ] @classmethod def create(cls, option, value, server=None, server_name=None, catalog=None): """ Based on the given option and value from the configuration file, creates the appropriate retention policy object for the given server Either server *or* server_name and backup_info_list must be provided. If server (a `barman.Server`) is provided then the returned RetentionPolicy will update as the state of the `barman.Server` changes. If server_name and backup_info_list are provided then the RetentionPolicy will be a snapshot based on the backup_info_list passed at construction time. """ if option == "wal_retention_policy": context = "WAL" elif option == "retention_policy": context = "BASE" else: raise InvalidRetentionPolicy( "Unknown option for retention policy: %s" % option ) if server: server_metadata = ServerMetadataLive( server, keep_manager=server.backup_manager ) else: server_metadata = ServerMetadata( server_name, catalog.get_backup_list(), keep_manager=catalog ) # Look for the matching rule for policy_class in cls.policy_classes: policy = policy_class.create(server_metadata, context, value) if policy: return policy raise InvalidRetentionPolicy("Cannot parse option %s: %s" % (option, value)) barman-2.18/barman/utils.py0000644000621200062120000004761214172556763014064 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains utility functions used in Barman. """ import datetime import decimal import errno import grp import hashlib import json import logging import logging.handlers import os import pwd import re import signal import sys from argparse import ArgumentTypeError from abc import ABCMeta, abstractmethod from contextlib import contextmanager from distutils.version import Version from barman.exceptions import TimeoutError _logger = logging.getLogger(__name__) if sys.version_info[0] >= 3: _text_type = str _string_types = str else: _text_type = unicode # noqa _string_types = basestring # noqa def drop_privileges(user): """ Change the system user of the current python process. It will only work if called as root or as the target user. :param string user: target user :raise KeyError: if the target user doesn't exists :raise OSError: when the user change fails """ pw = pwd.getpwnam(user) if pw.pw_uid == os.getuid(): return groups = [e.gr_gid for e in grp.getgrall() if pw.pw_name in e.gr_mem] groups.append(pw.pw_gid) os.setgroups(groups) os.setgid(pw.pw_gid) os.setuid(pw.pw_uid) os.environ["HOME"] = pw.pw_dir def mkpath(directory): """ Recursively create a target directory. If the path already exists it does nothing. :param str directory: directory to be created """ if not os.path.isdir(directory): os.makedirs(directory) def configure_logging( log_file, log_level=logging.INFO, log_format="%(asctime)s %(name)s %(levelname)s: %(message)s", ): """ Configure the logging module :param str,None log_file: target file path. If None use standard error. :param int log_level: min log level to be reported in log file. Default to INFO :param str log_format: format string used for a log line. Default to "%(asctime)s %(name)s %(levelname)s: %(message)s" """ warn = None handler = logging.StreamHandler() if log_file: log_file = os.path.abspath(log_file) log_dir = os.path.dirname(log_file) try: mkpath(log_dir) handler = logging.handlers.WatchedFileHandler(log_file, encoding="utf-8") except (OSError, IOError): # fallback to standard error warn = ( "Failed opening the requested log file. " "Using standard error instead." ) formatter = logging.Formatter(log_format) handler.setFormatter(formatter) logging.root.addHandler(handler) if warn: # this will be always displayed because the default level is WARNING _logger.warn(warn) logging.root.setLevel(log_level) def parse_log_level(log_level): """ Convert a log level to its int representation as required by logging module. :param log_level: An integer or a string :return: an integer or None if an invalid argument is provided """ try: log_level_int = int(log_level) except ValueError: log_level_int = logging.getLevelName(str(log_level).upper()) if isinstance(log_level_int, int): return log_level_int return None # noinspection PyProtectedMember def get_log_levels(): """ Return a list of available log level names """ try: level_to_name = logging._levelToName except AttributeError: level_to_name = dict( [ (key, logging._levelNames[key]) for key in logging._levelNames if isinstance(key, int) ] ) for level in sorted(level_to_name): yield level_to_name[level] def pretty_size(size, unit=1024): """ This function returns a pretty representation of a size value :param int|long|float size: the number to to prettify :param int unit: 1000 or 1024 (the default) :rtype: str """ suffixes = ["B"] + [i + {1000: "B", 1024: "iB"}[unit] for i in "KMGTPEZY"] if unit == 1000: suffixes[1] = "kB" # special case kB instead of KB # cast to float to avoid losing decimals size = float(size) for suffix in suffixes: if abs(size) < unit or suffix == suffixes[-1]: if suffix == suffixes[0]: return "%d %s" % (size, suffix) else: return "%.1f %s" % (size, suffix) else: size /= unit def human_readable_timedelta(timedelta): """ Given a time interval, returns a human readable string :param timedelta: the timedelta to transform in a human readable form """ delta = abs(timedelta) # Calculate time units for the given interval time_map = { "day": int(delta.days), "hour": int(delta.seconds / 3600), "minute": int(delta.seconds / 60) % 60, "second": int(delta.seconds % 60), } # Build the resulting string time_list = [] # 'Day' part if time_map["day"] > 0: if time_map["day"] == 1: time_list.append("%s day" % time_map["day"]) else: time_list.append("%s days" % time_map["day"]) # 'Hour' part if time_map["hour"] > 0: if time_map["hour"] == 1: time_list.append("%s hour" % time_map["hour"]) else: time_list.append("%s hours" % time_map["hour"]) # 'Minute' part if time_map["minute"] > 0: if time_map["minute"] == 1: time_list.append("%s minute" % time_map["minute"]) else: time_list.append("%s minutes" % time_map["minute"]) # 'Second' part if time_map["second"] > 0: if time_map["second"] == 1: time_list.append("%s second" % time_map["second"]) else: time_list.append("%s seconds" % time_map["second"]) human = ", ".join(time_list) # Take care of timedelta when is shorter than a second if delta < datetime.timedelta(seconds=1): human = "less than one second" # If timedelta is negative append 'ago' suffix if delta != timedelta: human += " ago" return human def total_seconds(timedelta): """ Compatibility method because the total_seconds method has been introduced in Python 2.7 :param timedelta: a timedelta object :rtype: float """ if hasattr(timedelta, "total_seconds"): return timedelta.total_seconds() else: secs = (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6 return (timedelta.microseconds + secs) / 10.0 ** 6 def which(executable, path=None): """ This method is useful to find if a executable is present into the os PATH :param str executable: The name of the executable to find :param str|None path: An optional search path to override the current one. :return str|None: the path of the executable or None """ # Get the system path if needed if path is None: path = os.getenv("PATH") # If the path is None at this point we have nothing to search if path is None: return None # If executable is an absolute path, check if it exists and is executable # otherwise return failure. if os.path.isabs(executable): if os.path.exists(executable) and os.access(executable, os.X_OK): return executable else: return None # Search the requested executable in every directory present in path and # return the first occurrence that exists and is executable. for file_path in path.split(os.path.pathsep): file_path = os.path.join(file_path, executable) # If the file exists and is executable return the full path. if os.path.exists(file_path) and os.access(file_path, os.X_OK): return file_path # If no matching file is present on the system return None return None class BarmanEncoder(json.JSONEncoder): """ Custom JSON encoder used for BackupInfo encoding This encoder supports the following types: * dates and timestamps if they have a ctime() method. * objects that implement the 'to_json' method. * binary strings (python 3) """ def default(self, obj): # If the object implements to_json() method use it if hasattr(obj, "to_json"): return obj.to_json() # Serialise date and datetime objects using ctime() method if hasattr(obj, "ctime") and callable(obj.ctime): return obj.ctime() # Serialise timedelta objects using human_readable_timedelta() if isinstance(obj, datetime.timedelta): return human_readable_timedelta(obj) # Serialise Decimal objects using their string representation # WARNING: When deserialized they will be treat as float values # which have a lower precision if isinstance(obj, decimal.Decimal): return float(obj) # Binary strings must be decoded before using them in # an unicode string if hasattr(obj, "decode") and callable(obj.decode): return obj.decode("utf-8", "replace") # Manage (Loose|Strict)Version objects as strings. if isinstance(obj, Version): return str(obj) # Let the base class default method raise the TypeError return super(BarmanEncoder, self).default(obj) def fsync_dir(dir_path): """ Execute fsync on a directory ensuring it is synced to disk :param str dir_path: The directory to sync :raise OSError: If fail opening the directory """ dir_fd = os.open(dir_path, os.O_DIRECTORY) try: os.fsync(dir_fd) except OSError as e: # On some filesystem doing a fsync on a directory # raises an EINVAL error. Ignoring it is usually safe. if e.errno != errno.EINVAL: raise finally: os.close(dir_fd) def fsync_file(file_path): """ Execute fsync on a file ensuring it is synced to disk Returns the file stats :param str file_path: The file to sync :return: file stat :raise OSError: If something fails """ file_fd = os.open(file_path, os.O_RDONLY) file_stat = os.fstat(file_fd) try: os.fsync(file_fd) return file_stat except OSError as e: # On some filesystem doing a fsync on a O_RDONLY fd # raises an EACCES error. In that case we need to try again after # reopening as O_RDWR. if e.errno != errno.EACCES: raise finally: os.close(file_fd) file_fd = os.open(file_path, os.O_RDWR) try: os.fsync(file_fd) finally: os.close(file_fd) return file_stat def simplify_version(version_string): """ Simplify a version number by removing the patch level :param version_string: the version number to simplify :return str: the simplified version number """ if version_string is None: return None version = version_string.split(".") # If a development/beta/rc version, split out the string part unreleased = re.search(r"[^0-9.]", version[-1]) if unreleased: last_component = version.pop() number = last_component[: unreleased.start()] string = last_component[unreleased.start() :] version += [number, string] return ".".join(version[:-1]) def with_metaclass(meta, *bases): """ Function from jinja2/_compat.py. License: BSD. Create a base class with a metaclass. :param type meta: Metaclass to add to base class """ # This requires a bit of explanation: the basic idea is to make a # dummy metaclass for one level of class instantiation that replaces # itself with the actual metaclass. class Metaclass(type): def __new__(mcs, name, this_bases, d): return meta(name, bases, d) return type.__new__(Metaclass, "temporary_class", (), {}) @contextmanager def timeout(timeout_duration): """ ContextManager responsible for timing out the contained block of code after a defined time interval. """ # Define the handler for the alarm signal def handler(signum, frame): raise TimeoutError() # set the timeout handler previous_handler = signal.signal(signal.SIGALRM, handler) if previous_handler != signal.SIG_DFL and previous_handler != signal.SIG_IGN: signal.signal(signal.SIGALRM, previous_handler) raise AssertionError("Another timeout is already defined") # set the timeout duration signal.alarm(timeout_duration) try: # Execute the contained block of code yield finally: # Reset the signal signal.alarm(0) signal.signal(signal.SIGALRM, signal.SIG_DFL) def is_power_of_two(number): """ Check if a number is a power of two or not """ # Returns None if number is set to None. if number is None: return None # This is a fast method to check for a power of two. # # A power of two has this structure: 100000 (one or more zeroes) # This is the same number minus one: 011111 (composed by ones) # This is the bitwise and: 000000 # # This is true only for every power of two return number != 0 and (number & (number - 1)) == 0 def file_md5(file_path, buffer_size=1024 * 16): """ Calculate the md5 checksum for the provided file path :param str file_path: path of the file to read :param int buffer_size: read buffer size, default 16k :return str: Hexadecimal md5 string """ md5 = hashlib.md5() with open(file_path, "rb") as file_object: while 1: buf = file_object.read(buffer_size) if not buf: break md5.update(buf) return md5.hexdigest() # Might be better to use stream instead of full file content. As done in file_md5. # Might create performance issue for large files. class ChecksumAlgorithm(with_metaclass(ABCMeta)): @abstractmethod def checksum(self, value): """ Creates hash hexadecimal string from input byte :param value: Value to create checksum from :type value: byte :return: Return the digest value as a string of hexadecimal digits. :rtype: str """ def checksum_from_str(self, value, encoding="utf-8"): """ Creates hash hexadecimal string from input string :param value: Value to create checksum from :type value: str :param encoding: The encoding in which to encode the string. :type encoding: str :return: Return the digest value as a string of hexadecimal digits. :rtype: str """ return self.checksum(value.encode(encoding)) def get_name(self): return self.__class__.__name__ class SHA256(ChecksumAlgorithm): def checksum(self, value): """ Creates hash hexadecimal string from input byte :param value: Value to create checksum from :type value: byte :return: Return the digest value as a string of hexadecimal digits. :rtype: str """ sha = hashlib.sha256(value) return sha.hexdigest() def force_str(obj, encoding="utf-8", errors="replace"): """ Force any object to an unicode string. Code inspired by Django's force_text function """ # Handle the common case first for performance reasons. if issubclass(type(obj), _text_type): return obj try: if issubclass(type(obj), _string_types): obj = obj.decode(encoding, errors) else: if sys.version_info[0] >= 3: if isinstance(obj, bytes): obj = _text_type(obj, encoding, errors) else: obj = _text_type(obj) elif hasattr(obj, "__unicode__"): obj = _text_type(obj) else: obj = _text_type(bytes(obj), encoding, errors) except (UnicodeDecodeError, TypeError): if isinstance(obj, Exception): # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. obj = " ".join(force_str(arg, encoding, errors) for arg in obj.args) else: # As last resort, use a repr call to avoid any exception obj = repr(obj) return obj def redact_passwords(text): """ Redact passwords from the input text. Password are found in these two forms: Keyword/Value Connection Strings: - host=localhost port=5432 dbname=mydb password=SHAME_ON_ME Connection URIs: - postgresql://[user[:password]][netloc][:port][/dbname] :param str text: Input content :return: String with passwords removed """ # Remove passwords as found in key/value connection strings text = re.sub("password=('(\\'|[^'])+'|[^ '\"]*)", "password=*REDACTED*", text) # Remove passwords in connection URLs text = re.sub(r"(?<=postgresql:\/\/)([^ :@]+:)([^ :@]+)?@", r"\1*REDACTED*@", text) return text def check_non_negative(value): """ Check for a positive integer option :param value: str containing the value to check """ if value is None: return None try: int_value = int(value) except Exception: raise ArgumentTypeError("'%s' is not a valid non negative integer" % value) if int_value < 0: raise ArgumentTypeError("'%s' is not a valid non negative integer" % value) return int_value def check_positive(value): """ Check for a positive integer option :param value: str containing the value to check """ if value is None: return None try: int_value = int(value) except Exception: raise ArgumentTypeError("'%s' is not a valid positive integer" % value) if int_value < 1: raise ArgumentTypeError("'%s' is not a valid positive integer" % value) return int_value def check_size(value): """ Check user input for a human readable size :param value: str containing the value to check """ if value is None: return None # Ignore cases value = value.upper() try: # If value ends with `B` we try to parse the multiplier, # otherwise it is a plain integer if value[-1] == "B": # By default we use base=1024, if the value ends with `iB` # it is a SI value and we use base=1000 if value[-2] == "I": base = 1000 idx = 3 else: base = 1024 idx = 2 multiplier = base # Parse the multiplicative prefix for prefix in "KMGTPEZY": if value[-idx] == prefix: int_value = int(float(value[:-idx]) * multiplier) break multiplier *= base else: # If we do not find the prefix, remove the unit # and try to parse the remainder as an integer # (e.g. '1234B') int_value = int(value[: -idx + 1]) else: int_value = int(value) except ValueError: raise ArgumentTypeError("'%s' is not a valid size string" % value) if int_value is None or int_value < 1: raise ArgumentTypeError("'%s' is not a valid size string" % value) return int_value barman-2.18/barman/lockfile.py0000644000621200062120000002615614172556763014514 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module is the lock manager for Barman """ import errno import fcntl import os import re from barman.exceptions import ( LockFileBusy, LockFileParsingError, LockFilePermissionDenied, ) class LockFile(object): """ Ensures that there is only one process which is running against a specified LockFile. It supports the Context Manager interface, allowing the use in with statements. with LockFile('file.lock') as locked: if not locked: print "failed" else: You can also use exceptions on failures try: with LockFile('file.lock', True): except LockFileBusy, e, file: print "failed to lock %s" % file """ LOCK_PATTERN = None r""" If defined in a subclass, it must be a compiled regular expression which matches the lock filename. It must provide named groups for the constructor parameters which produce the same lock name. I.e.: >>> ServerWalReceiveLock('/tmp', 'server-name').filename '/tmp/.server-name-receive-wal.lock' >>> ServerWalReceiveLock.LOCK_PATTERN = re.compile( r'\.(?P.+)-receive-wal\.lock') >>> m = ServerWalReceiveLock.LOCK_PATTERN.match( '.server-name-receive-wal.lock') >>> ServerWalReceiveLock('/tmp', **(m.groupdict())).filename '/tmp/.server-name-receive-wal.lock' """ @classmethod def build_if_matches(cls, path): """ Factory method that creates a lock instance if the path matches the lock filename created by the actual class :param path: the full path of a LockFile :return: """ # If LOCK_PATTERN is not defined always return None if not cls.LOCK_PATTERN: return None # Matches the provided path against LOCK_PATTERN lock_directory = os.path.abspath(os.path.dirname(path)) lock_name = os.path.basename(path) match = cls.LOCK_PATTERN.match(lock_name) if match: # Build the lock object for the provided path return cls(lock_directory, **(match.groupdict())) return None def __init__(self, filename, raise_if_fail=True, wait=False): self.filename = os.path.abspath(filename) self.fd = None self.raise_if_fail = raise_if_fail self.wait = wait def acquire(self, raise_if_fail=None, wait=None, update_pid=True): """ Creates and holds on to the lock file. When raise_if_fail, a LockFileBusy is raised if the lock is held by someone else and a LockFilePermissionDenied is raised when the user executing barman have insufficient rights for the creation of a LockFile. Returns True if lock has been successfully acquired, False otherwise. :param bool raise_if_fail: If True raise an exception on failure :param bool wait: If True issue a blocking request :param bool update_pid: Whether to write our pid in the lockfile :returns bool: whether the lock has been acquired """ if self.fd: return True fd = None # method arguments take precedence on class parameters raise_if_fail = ( raise_if_fail if raise_if_fail is not None else self.raise_if_fail ) wait = wait if wait is not None else self.wait try: # 384 is 0600 in octal, 'rw-------' fd = os.open(self.filename, os.O_CREAT | os.O_RDWR, 384) flags = fcntl.LOCK_EX if not wait: flags |= fcntl.LOCK_NB fcntl.flock(fd, flags) if update_pid: # Once locked, replace the content of the file os.lseek(fd, 0, os.SEEK_SET) os.write(fd, ("%s\n" % os.getpid()).encode("ascii")) # Truncate the file at the current position os.ftruncate(fd, os.lseek(fd, 0, os.SEEK_CUR)) self.fd = fd return True except (OSError, IOError) as e: if fd: os.close(fd) # let's not leak file descriptors if raise_if_fail: if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK): raise LockFileBusy(self.filename) elif e.errno == errno.EACCES: raise LockFilePermissionDenied(self.filename) else: raise else: return False def release(self): """ Releases the lock. If the lock is not held by the current process it does nothing. """ if not self.fd: return try: fcntl.flock(self.fd, fcntl.LOCK_UN) os.close(self.fd) except (OSError, IOError): pass self.fd = None def __del__(self): """ Avoid stale lock files. """ self.release() # Contextmanager interface def __enter__(self): return self.acquire() def __exit__(self, exception_type, value, traceback): self.release() def get_owner_pid(self): """ Test whether a lock is already held by a process. Returns the PID of the owner process or None if the lock is available. :rtype: int|None :raises LockFileParsingError: when the lock content is garbled :raises LockFilePermissionDenied: when the lockfile is not accessible """ try: self.acquire(raise_if_fail=True, wait=False, update_pid=False) except LockFileBusy: try: # Read the lock content and parse the PID # NOTE: We cannot read it in the self.acquire method to avoid # reading the previous locker PID with open(self.filename, "r") as file_object: return int(file_object.readline().strip()) except ValueError as e: # This should not happen raise LockFileParsingError(e) # release the lock and return None self.release() return None class GlobalCronLock(LockFile): """ This lock protects cron from multiple executions. Creates a global '.cron.lock' lock file under the given lock_directory. """ def __init__(self, lock_directory): super(GlobalCronLock, self).__init__( os.path.join(lock_directory, ".cron.lock"), raise_if_fail=True ) class ServerBackupLock(LockFile): """ This lock protects a server from multiple executions of backup command Creates a '.-backup.lock' lock file under the given lock_directory for the named SERVER. """ def __init__(self, lock_directory, server_name): super(ServerBackupLock, self).__init__( os.path.join(lock_directory, ".%s-backup.lock" % server_name), raise_if_fail=True, ) class ServerCronLock(LockFile): """ This lock protects a server from multiple executions of cron command Creates a '.-cron.lock' lock file under the given lock_directory for the named SERVER. """ def __init__(self, lock_directory, server_name): super(ServerCronLock, self).__init__( os.path.join(lock_directory, ".%s-cron.lock" % server_name), raise_if_fail=True, wait=False, ) class ServerXLOGDBLock(LockFile): """ This lock protects a server's xlogdb access Creates a '.-xlogdb.lock' lock file under the given lock_directory for the named SERVER. """ def __init__(self, lock_directory, server_name): super(ServerXLOGDBLock, self).__init__( os.path.join(lock_directory, ".%s-xlogdb.lock" % server_name), raise_if_fail=True, wait=True, ) class ServerWalArchiveLock(LockFile): """ This lock protects a server from multiple executions of wal-archive command Creates a '.-archive-wal.lock' lock file under the given lock_directory for the named SERVER. """ def __init__(self, lock_directory, server_name): super(ServerWalArchiveLock, self).__init__( os.path.join(lock_directory, ".%s-archive-wal.lock" % server_name), raise_if_fail=True, wait=False, ) class ServerWalReceiveLock(LockFile): """ This lock protects a server from multiple executions of receive-wal command Creates a '.-receive-wal.lock' lock file under the given lock_directory for the named SERVER. """ # TODO: Implement on the other LockFile subclasses LOCK_PATTERN = re.compile(r"\.(?P.+)-receive-wal\.lock") def __init__(self, lock_directory, server_name): super(ServerWalReceiveLock, self).__init__( os.path.join(lock_directory, ".%s-receive-wal.lock" % server_name), raise_if_fail=True, wait=False, ) class ServerBackupIdLock(LockFile): """ This lock protects from changing a backup that is in use. Creates a '.-.lock' lock file under the given lock_directory for a BACKUP of a SERVER. """ def __init__(self, lock_directory, server_name, backup_id): super(ServerBackupIdLock, self).__init__( os.path.join(lock_directory, ".%s-%s.lock" % (server_name, backup_id)), raise_if_fail=True, wait=False, ) class ServerBackupSyncLock(LockFile): """ This lock protects from multiple executions of the sync command on the same backup. Creates a '.--sync-backup.lock' lock file under the given lock_directory for a BACKUP of a SERVER. """ def __init__(self, lock_directory, server_name, backup_id): super(ServerBackupSyncLock, self).__init__( os.path.join( lock_directory, ".%s-%s-sync-backup.lock" % (server_name, backup_id) ), raise_if_fail=True, wait=False, ) class ServerWalSyncLock(LockFile): """ This lock protects from multiple executions of the sync-wal command Creates a '.-sync-wal.lock' lock file under the given lock_directory for the named SERVER. """ def __init__(self, lock_directory, server_name): super(ServerWalSyncLock, self).__init__( os.path.join(lock_directory, ".%s-sync-wal.lock" % server_name), raise_if_fail=True, wait=True, ) barman-2.18/barman/process.py0000644000621200062120000001351314172556763014373 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see import errno import logging import os import signal import time from glob import glob from barman import output from barman.exceptions import LockFileParsingError from barman.lockfile import ServerWalReceiveLock _logger = logging.getLogger(__name__) class ProcessInfo(object): """ Barman process representation """ def __init__(self, pid, server_name, task): """ This object contains all the information required to identify a barman process :param int pid: Process ID :param string server_name: Name of the server owning the process :param string task: Task name (receive-wal, archive-wal...) """ self.pid = pid self.server_name = server_name self.task = task class ProcessManager(object): """ Class for the management of barman processes owned by a server """ # Map containing the tasks we want to retrieve (and eventually manage) TASKS = {"receive-wal": ServerWalReceiveLock} def __init__(self, config): """ Build a ProcessManager for the provided server :param config: configuration of the server owning the process manager """ self.config = config self.process_list = [] # Cycle over the lock files in the lock directory for this server for path in glob( os.path.join( self.config.barman_lock_directory, ".%s-*.lock" % self.config.name ) ): for task, lock_class in self.TASKS.items(): # Check the lock_name against the lock class lock = lock_class.build_if_matches(path) if lock: try: # Use the lock to get the owner pid pid = lock.get_owner_pid() except LockFileParsingError: _logger.warning( "Skipping the %s process for server %s: " "Error reading the PID from lock file '%s'", task, self.config.name, path, ) break # If there is a pid save it in the process list if pid: self.process_list.append(ProcessInfo(pid, config.name, task)) # In any case, we found a match, so we must stop iterating # over the task types and handle the the next path break def list(self, task_filter=None): """ Returns a list of processes owned by this server If no filter is provided, all the processes are returned. :param str task_filter: Type of process we want to retrieve :return list[ProcessInfo]: List of processes for the server """ server_tasks = [] for process in self.process_list: # Filter the processes if necessary if task_filter and process.task != task_filter: continue server_tasks.append(process) return server_tasks def kill(self, process_info, retries=10): """ Kill a process Returns True if killed successfully False otherwise :param ProcessInfo process_info: representation of the process we want to kill :param int retries: number of times the method will check if the process is still alive :rtype: bool """ # Try to kill the process try: _logger.debug("Sending SIGINT to PID %s", process_info.pid) os.kill(process_info.pid, signal.SIGINT) _logger.debug("os.kill call succeeded") except OSError as e: _logger.debug("os.kill call failed: %s", e) # The process doesn't exists. It has probably just terminated. if e.errno == errno.ESRCH: return True # Something unexpected has happened output.error("%s", e) return False # Check if the process have been killed. the fastest (and maybe safest) # way is to send a kill with 0 as signal. # If the method returns an OSError exceptions, the process have been # killed successfully, otherwise is still alive. for counter in range(retries): try: _logger.debug( "Checking with SIG_DFL if PID %s is still alive", process_info.pid ) os.kill(process_info.pid, signal.SIG_DFL) _logger.debug("os.kill call succeeded") except OSError as e: _logger.debug("os.kill call failed: %s", e) # If the process doesn't exists, we are done. if e.errno == errno.ESRCH: return True # Something unexpected has happened output.error("%s", e) return False time.sleep(1) _logger.debug( "The PID %s has not been terminated after %s retries", process_info.pid, retries, ) return False barman-2.18/barman/server.py0000644000621200062120000047067714172556763014245 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module represents a Server. Barman is able to manage multiple servers. """ import datetime import errno import json import logging import os import re import shutil import sys import tarfile import time from collections import namedtuple from contextlib import closing, contextmanager from glob import glob from tempfile import NamedTemporaryFile import dateutil.tz import barman from barman import output, xlog from barman.backup import BackupManager from barman.command_wrappers import BarmanSubProcess, Command, Rsync from barman.copy_controller import RsyncCopyController from barman.exceptions import ( ArchiverFailure, BadXlogSegmentName, CommandFailedException, ConninfoException, InvalidRetentionPolicy, LockFileBusy, LockFileException, LockFilePermissionDenied, PostgresDuplicateReplicationSlot, PostgresException, PostgresInvalidReplicationSlot, PostgresIsInRecovery, PostgresReplicationSlotInUse, PostgresReplicationSlotsFull, PostgresSuperuserRequired, PostgresUnsupportedFeature, SyncError, SyncNothingToDo, SyncToBeDeleted, TimeoutError, UnknownBackupIdException, ) from barman.infofile import BackupInfo, LocalBackupInfo, WalFileInfo from barman.lockfile import ( ServerBackupIdLock, ServerBackupLock, ServerBackupSyncLock, ServerCronLock, ServerWalArchiveLock, ServerWalReceiveLock, ServerWalSyncLock, ServerXLOGDBLock, ) from barman.postgres import PostgreSQLConnection, StreamingConnection from barman.process import ProcessManager from barman.remote_status import RemoteStatusMixin from barman.retention_policies import RetentionPolicyFactory, RetentionPolicy from barman.utils import ( BarmanEncoder, file_md5, force_str, fsync_dir, fsync_file, human_readable_timedelta, is_power_of_two, mkpath, pretty_size, timeout, ) from barman.wal_archiver import FileWalArchiver, StreamingWalArchiver, WalArchiver PARTIAL_EXTENSION = ".partial" PRIMARY_INFO_FILE = "primary.info" SYNC_WALS_INFO_FILE = "sync-wals.info" _logger = logging.getLogger(__name__) # NamedTuple for a better readability of SyncWalInfo SyncWalInfo = namedtuple("SyncWalInfo", "last_wal last_position") class CheckStrategy(object): """ This strategy for the 'check' collects the results of every check and does not print any message. This basic class is also responsible for immediately logging any performed check with an error in case of check failure and a debug message in case of success. """ # create a namedtuple object called CheckResult to manage check results CheckResult = namedtuple("CheckResult", "server_name check status") # Default list used as a filter to identify non-critical checks NON_CRITICAL_CHECKS = [ "minimum redundancy requirements", "backup maximum age", "backup minimum size", "failed backups", "archiver errors", "empty incoming directory", "empty streaming directory", "incoming WALs directory", "streaming WALs directory", "wal maximum age", ] def __init__(self, ignore_checks=NON_CRITICAL_CHECKS): """ Silent Strategy constructor :param list ignore_checks: list of checks that can be ignored """ self.ignore_list = ignore_checks self.check_result = [] self.has_error = False self.running_check = None def init_check(self, check_name): """ Mark in the debug log when barman starts the execution of a check :param str check_name: the name of the check that is starting """ self.running_check = check_name _logger.debug("Starting check: '%s'" % check_name) def _check_name(self, check): if not check: check = self.running_check assert check return check def result(self, server_name, status, hint=None, check=None, perfdata=None): """ Store the result of a check (with no output). Log any check result (error or debug level). :param str server_name: the server is being checked :param bool status: True if succeeded :param str,None hint: hint to print if not None: :param str,None check: the check name :param str,None perfdata: additional performance data to print if not None """ check = self._check_name(check) if not status: # If the name of the check is not in the filter list, # treat it as a blocking error, then notify the error # and change the status of the strategy if check not in self.ignore_list: self.has_error = True _logger.error( "Check '%s' failed for server '%s'" % (check, server_name) ) else: # otherwise simply log the error (as info) _logger.info( "Ignoring failed check '%s' for server '%s'" % (check, server_name) ) else: _logger.debug("Check '%s' succeeded for server '%s'" % (check, server_name)) # Store the result and does not output anything result = self.CheckResult(server_name, check, status) self.check_result.append(result) self.running_check = None class CheckOutputStrategy(CheckStrategy): """ This strategy for the 'check' command immediately sends the result of a check to the designated output channel. This class derives from the basic CheckStrategy, reuses the same logic and adds output messages. """ def __init__(self): """ Output Strategy constructor """ super(CheckOutputStrategy, self).__init__(ignore_checks=()) def result(self, server_name, status, hint=None, check=None, perfdata=None): """ Store the result of a check. Log any check result (error or debug level). Output the result to the user :param str server_name: the server being checked :param str check: the check name :param bool status: True if succeeded :param str,None hint: hint to print if not None: :param str,None perfdata: additional performance data to print if not None """ check = self._check_name(check) super(CheckOutputStrategy, self).result( server_name, status, hint, check, perfdata ) # Send result to output output.result("check", server_name, check, status, hint, perfdata) class Server(RemoteStatusMixin): """ This class represents the PostgreSQL server to backup. """ XLOG_DB = "xlog.db" # the strategy for the management of the results of the various checks __default_check_strategy = CheckOutputStrategy() def __init__(self, config): """ Server constructor. :param barman.config.ServerConfig config: the server configuration """ super(Server, self).__init__() self.config = config self.path = self._build_path(self.config.path_prefix) self.process_manager = ProcessManager(self.config) # If 'primary_ssh_command' is specified, the source of the backup # for this server is a Barman installation (not a Postgres server) self.passive_node = config.primary_ssh_command is not None self.enforce_retention_policies = False self.postgres = None self.streaming = None self.archivers = [] # Postgres configuration is available only if node is not passive if not self.passive_node: # Initialize the main PostgreSQL connection try: # Check that 'conninfo' option is properly set if config.conninfo is None: raise ConninfoException( "Missing 'conninfo' parameter for server '%s'" % config.name ) self.postgres = PostgreSQLConnection( config.conninfo, config.immediate_checkpoint, config.slot_name ) # If the PostgreSQLConnection creation fails, disable the Server except ConninfoException as e: self.config.disabled = True self.config.msg_list.append( "PostgreSQL connection: " + force_str(e).strip() ) # Initialize the streaming PostgreSQL connection only when # backup_method is postgres or the streaming_archiver is in use if config.backup_method == "postgres" or config.streaming_archiver: try: if config.streaming_conninfo is None: raise ConninfoException( "Missing 'streaming_conninfo' parameter for " "server '%s'" % config.name ) self.streaming = StreamingConnection(config.streaming_conninfo) # If the StreamingConnection creation fails, disable the server except ConninfoException as e: self.config.disabled = True self.config.msg_list.append( "Streaming connection: " + force_str(e).strip() ) # Initialize the backup manager self.backup_manager = BackupManager(self) if not self.passive_node: # Initialize the StreamingWalArchiver # WARNING: Order of items in self.archivers list is important! # The files will be archived in that order. if self.config.streaming_archiver: try: self.archivers.append(StreamingWalArchiver(self.backup_manager)) # If the StreamingWalArchiver creation fails, # disable the server except AttributeError as e: _logger.debug(e) self.config.disabled = True self.config.msg_list.append( "Unable to initialise the streaming archiver" ) # IMPORTANT: The following lines of code have been # temporarily commented in order to make the code # back-compatible after the introduction of 'archiver=off' # as default value in Barman 2.0. # When the back compatibility feature for archiver will be # removed, the following lines need to be decommented. # ARCHIVER_OFF_BACKCOMPATIBILITY - START OF CODE # # At least one of the available archive modes should be enabled # if len(self.archivers) < 1: # self.config.disabled = True # self.config.msg_list.append( # "No archiver enabled for server '%s'. " # "Please turn on 'archiver', 'streaming_archiver' or both" # % config.name) # ARCHIVER_OFF_BACKCOMPATIBILITY - END OF CODE # Sanity check: if file based archiver is disabled, and only # WAL streaming is enabled, a replication slot name must be # configured. if ( not self.config.archiver and self.config.streaming_archiver and self.config.slot_name is None ): self.config.disabled = True self.config.msg_list.append( "Streaming-only archiver requires 'streaming_conninfo' " "and 'slot_name' options to be properly configured" ) # ARCHIVER_OFF_BACKCOMPATIBILITY - START OF CODE # IMPORTANT: This is a back-compatibility feature that has # been added in Barman 2.0. It highlights a deprecated # behaviour, and helps users during this transition phase. # It forces 'archiver=on' when both archiver and streaming_archiver # are set to 'off' (default values) and displays a warning, # requesting users to explicitly set the value in the # configuration. # When this back-compatibility feature will be removed from Barman # (in a couple of major releases), developers will need to remove # this block completely and reinstate the block of code you find # a few lines below (search for ARCHIVER_OFF_BACKCOMPATIBILITY # throughout the code). if ( self.config.archiver is False and self.config.streaming_archiver is False ): output.warning( "No archiver enabled for server '%s'. " "Please turn on 'archiver', " "'streaming_archiver' or both", self.config.name, ) output.warning("Forcing 'archiver = on'") self.config.archiver = True # ARCHIVER_OFF_BACKCOMPATIBILITY - END OF CODE # Initialize the FileWalArchiver # WARNING: Order of items in self.archivers list is important! # The files will be archived in that order. if self.config.archiver: try: self.archivers.append(FileWalArchiver(self.backup_manager)) except AttributeError as e: _logger.debug(e) self.config.disabled = True self.config.msg_list.append( "Unable to initialise the file based archiver" ) # Set bandwidth_limit if self.config.bandwidth_limit: try: self.config.bandwidth_limit = int(self.config.bandwidth_limit) except ValueError: _logger.warning( 'Invalid bandwidth_limit "%s" for server "%s" ' '(fallback to "0")' % (self.config.bandwidth_limit, self.config.name) ) self.config.bandwidth_limit = None # set tablespace_bandwidth_limit if self.config.tablespace_bandwidth_limit: rules = {} for rule in self.config.tablespace_bandwidth_limit.split(): try: key, value = rule.split(":", 1) value = int(value) if value != self.config.bandwidth_limit: rules[key] = value except ValueError: _logger.warning( "Invalid tablespace_bandwidth_limit rule '%s'" % rule ) if len(rules) > 0: self.config.tablespace_bandwidth_limit = rules else: self.config.tablespace_bandwidth_limit = None # Set minimum redundancy (default 0) try: self.config.minimum_redundancy = int(self.config.minimum_redundancy) if self.config.minimum_redundancy < 0: _logger.warning( 'Negative value of minimum_redundancy "%s" ' 'for server "%s" (fallback to "0")' % (self.config.minimum_redundancy, self.config.name) ) self.config.minimum_redundancy = 0 except ValueError: _logger.warning( 'Invalid minimum_redundancy "%s" for server "%s" ' '(fallback to "0")' % (self.config.minimum_redundancy, self.config.name) ) self.config.minimum_redundancy = 0 # Initialise retention policies self._init_retention_policies() def _init_retention_policies(self): # Set retention policy mode if self.config.retention_policy_mode != "auto": _logger.warning( 'Unsupported retention_policy_mode "%s" for server "%s" ' '(fallback to "auto")' % (self.config.retention_policy_mode, self.config.name) ) self.config.retention_policy_mode = "auto" # If retention_policy is present, enforce them if self.config.retention_policy and not isinstance( self.config.retention_policy, RetentionPolicy ): # Check wal_retention_policy if self.config.wal_retention_policy != "main": _logger.warning( 'Unsupported wal_retention_policy value "%s" ' 'for server "%s" (fallback to "main")' % (self.config.wal_retention_policy, self.config.name) ) self.config.wal_retention_policy = "main" # Create retention policy objects try: rp = RetentionPolicyFactory.create( "retention_policy", self.config.retention_policy, server=self ) # Reassign the configuration value (we keep it in one place) self.config.retention_policy = rp _logger.debug( "Retention policy for server %s: %s" % (self.config.name, self.config.retention_policy) ) try: rp = RetentionPolicyFactory.create( "wal_retention_policy", self.config.wal_retention_policy, server=self, ) # Reassign the configuration value # (we keep it in one place) self.config.wal_retention_policy = rp _logger.debug( "WAL retention policy for server %s: %s" % (self.config.name, self.config.wal_retention_policy) ) except InvalidRetentionPolicy: _logger.exception( 'Invalid wal_retention_policy setting "%s" ' 'for server "%s" (fallback to "main")' % (self.config.wal_retention_policy, self.config.name) ) rp = RetentionPolicyFactory.create( "wal_retention_policy", "main", server=self ) self.config.wal_retention_policy = rp self.enforce_retention_policies = True except InvalidRetentionPolicy: _logger.exception( 'Invalid retention_policy setting "%s" for server "%s"' % (self.config.retention_policy, self.config.name) ) def get_identity_file_path(self): """ Get the path of the file that should contain the identity of the cluster :rtype: str """ return os.path.join(self.config.backup_directory, "identity.json") def write_identity_file(self): """ Store the identity of the server if it doesn't already exist. """ file_path = self.get_identity_file_path() # Do not write the identity if file already exists if os.path.exists(file_path): return systemid = self.systemid if systemid: try: with open(file_path, "w") as fp: json.dump( { "systemid": systemid, "version": self.postgres.server_major_version, }, fp, indent=4, sort_keys=True, ) fp.write("\n") except IOError: _logger.exception( 'Cannot write system Id file for server "%s"' % (self.config.name) ) def read_identity_file(self): """ Read the server identity :rtype: dict[str,str] """ file_path = self.get_identity_file_path() try: with open(file_path, "r") as fp: return json.load(fp) except IOError: _logger.exception( 'Cannot read system Id file for server "%s"' % (self.config.name) ) return {} def close(self): """ Close all the open connections to PostgreSQL """ if self.postgres: self.postgres.close() if self.streaming: self.streaming.close() def check(self, check_strategy=__default_check_strategy): """ Implements the 'server check' command and makes sure SSH and PostgreSQL connections work properly. It checks also that backup directories exist (and if not, it creates them). The check command will time out after a time interval defined by the check_timeout configuration value (default 30 seconds) :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ try: with timeout(self.config.check_timeout): # Check WAL archive self.check_archive(check_strategy) # Postgres configuration is not available on passive nodes if not self.passive_node: self.check_postgres(check_strategy) # Check barman directories from barman configuration self.check_directories(check_strategy) # Check retention policies self.check_retention_policy_settings(check_strategy) # Check for backup validity self.check_backup_validity(check_strategy) # Check WAL archiving is happening self.check_wal_validity(check_strategy) # Executes the backup manager set of checks self.backup_manager.check(check_strategy) # Check if the msg_list of the server # contains messages and output eventual failures self.check_configuration(check_strategy) # Check the system Id coherence between # streaming and normal connections self.check_identity(check_strategy) # Executes check() for every archiver, passing # remote status information for efficiency for archiver in self.archivers: archiver.check(check_strategy) # Check archiver errors self.check_archiver_errors(check_strategy) except TimeoutError: # The check timed out. # Add a failed entry to the check strategy for this. _logger.debug( "Check command timed out executing '%s' check" % check_strategy.running_check ) check_strategy.result( self.config.name, False, hint="barman check command timed out", check="check timeout", ) def check_archive(self, check_strategy): """ Checks WAL archive :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("WAL archive") # Make sure that WAL archiving has been setup # XLOG_DB needs to exist and its size must be > 0 # NOTE: we do not need to acquire a lock in this phase xlogdb_empty = True if os.path.exists(self.xlogdb_file_name): with open(self.xlogdb_file_name, "rb") as fxlogdb: if os.fstat(fxlogdb.fileno()).st_size > 0: xlogdb_empty = False # NOTE: This check needs to be only visible if it fails if xlogdb_empty: # Skip the error if we have a terminated backup # with status WAITING_FOR_WALS. # TODO: Improve this check backup_id = self.get_last_backup_id([BackupInfo.WAITING_FOR_WALS]) if not backup_id: check_strategy.result( self.config.name, False, hint="please make sure WAL shipping is setup", ) # Check the number of wals in the incoming directory self._check_wal_queue(check_strategy, "incoming", "archiver") # Check the number of wals in the streaming directory self._check_wal_queue(check_strategy, "streaming", "streaming_archiver") def _check_wal_queue(self, check_strategy, dir_name, archiver_name): """ Check if one of the wal queue directories beyond the max file threshold """ # Read the wal queue location from the configuration config_name = "%s_wals_directory" % dir_name assert hasattr(self.config, config_name) incoming_dir = getattr(self.config, config_name) # Check if the archiver is enabled assert hasattr(self.config, archiver_name) enabled = getattr(self.config, archiver_name) # Inspect the wal queue directory file_count = 0 for file_item in glob(os.path.join(incoming_dir, "*")): # Ignore temporary files if file_item.endswith(".tmp"): continue file_count += 1 max_incoming_wal = self.config.max_incoming_wals_queue # Subtract one from the count because of .partial file inside the # streaming directory if dir_name == "streaming": file_count -= 1 # If this archiver is disabled, check the number of files in the # corresponding directory. # If the directory is NOT empty, fail the check and warn the user. # NOTE: This check is visible only when it fails check_strategy.init_check("empty %s directory" % dir_name) if not enabled: if file_count > 0: check_strategy.result( self.config.name, False, hint="'%s' must be empty when %s=off" % (incoming_dir, archiver_name), ) # No more checks are required if the archiver # is not enabled return # At this point if max_wals_count is none, # means that no limit is set so we just need to return if max_incoming_wal is None: return check_strategy.init_check("%s WALs directory" % dir_name) if file_count > max_incoming_wal: msg = "there are too many WALs in queue: %s, max %s" % ( file_count, max_incoming_wal, ) check_strategy.result(self.config.name, False, hint=msg) def check_postgres(self, check_strategy): """ Checks PostgreSQL connection :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("PostgreSQL") # Take the status of the remote server remote_status = self.get_remote_status() if remote_status.get("server_txt_version"): check_strategy.result(self.config.name, True) else: check_strategy.result(self.config.name, False) return # Check for superuser privileges or # privileges needed to perform backups if remote_status.get("has_backup_privileges") is not None: check_strategy.init_check( "superuser or standard user with backup privileges" ) if remote_status.get("has_backup_privileges"): check_strategy.result(self.config.name, True) else: check_strategy.result( self.config.name, False, hint="privileges for PostgreSQL backup functions are " "required (see documentation)", check="no access to backup functions", ) if "streaming_supported" in remote_status: check_strategy.init_check("PostgreSQL streaming") hint = None # If a streaming connection is available, # add its status to the output of the check if remote_status["streaming_supported"] is None: hint = remote_status["connection_error"] elif not remote_status["streaming_supported"]: hint = "Streaming connection not supported for PostgreSQL < 9.2" check_strategy.result( self.config.name, remote_status.get("streaming"), hint=hint ) # Check wal_level parameter: must be different from 'minimal' # the parameter has been introduced in postgres >= 9.0 if "wal_level" in remote_status: check_strategy.init_check("wal_level") if remote_status["wal_level"] != "minimal": check_strategy.result(self.config.name, True) else: check_strategy.result( self.config.name, False, hint="please set it to a higher level than 'minimal'", ) # Check the presence and the status of the configured replication slot # This check will be skipped if `slot_name` is undefined if self.config.slot_name: check_strategy.init_check("replication slot") slot = remote_status["replication_slot"] # The streaming_archiver is enabled if self.config.streaming_archiver is True: # Error if PostgreSQL is too old if not remote_status["replication_slot_support"]: check_strategy.result( self.config.name, False, hint="slot_name parameter set but PostgreSQL server " "is too old (%s < 9.4)" % remote_status["server_txt_version"], ) # Replication slots are supported else: # The slot is not present if slot is None: check_strategy.result( self.config.name, False, hint="replication slot '%s' doesn't exist. " "Please execute 'barman receive-wal " "--create-slot %s'" % (self.config.slot_name, self.config.name), ) else: # The slot is present but not initialised if slot.restart_lsn is None: check_strategy.result( self.config.name, False, hint="slot '%s' not initialised: is " "'receive-wal' running?" % self.config.slot_name, ) # The slot is present but not active elif slot.active is False: check_strategy.result( self.config.name, False, hint="slot '%s' not active: is " "'receive-wal' running?" % self.config.slot_name, ) else: check_strategy.result(self.config.name, True) else: # If the streaming_archiver is disabled and the slot_name # option is present in the configuration, we check that # a replication slot with the specified name is NOT present # and NOT active. # NOTE: This is not a failure, just a warning. if slot is not None: if slot.restart_lsn is not None: slot_status = "initialised" # Check if the slot is also active if slot.active: slot_status = "active" # Warn the user check_strategy.result( self.config.name, True, hint="WARNING: slot '%s' is %s but not required " "by the current config" % (self.config.slot_name, slot_status), ) def _make_directories(self): """ Make backup directories in case they do not exist """ for key in self.config.KEYS: if key.endswith("_directory") and hasattr(self.config, key): val = getattr(self.config, key) if val is not None and not os.path.isdir(val): # noinspection PyTypeChecker os.makedirs(val) def check_directories(self, check_strategy): """ Checks backup directories and creates them if they do not exist :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("directories") if not self.config.disabled: try: self._make_directories() except OSError as e: check_strategy.result( self.config.name, False, "%s: %s" % (e.filename, e.strerror) ) else: check_strategy.result(self.config.name, True) def check_configuration(self, check_strategy): """ Check for error messages in the message list of the server and output eventual errors :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("configuration") if len(self.config.msg_list): check_strategy.result(self.config.name, False) for conflict_paths in self.config.msg_list: output.info("\t\t%s" % conflict_paths) def check_retention_policy_settings(self, check_strategy): """ Checks retention policy setting :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("retention policy settings") config = self.config if config.retention_policy and not self.enforce_retention_policies: check_strategy.result(self.config.name, False, hint="see log") else: check_strategy.result(self.config.name, True) def check_backup_validity(self, check_strategy): """ Check if backup validity requirements are satisfied :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("backup maximum age") # first check: check backup maximum age if self.config.last_backup_maximum_age is not None: # get maximum age information backup_age = self.backup_manager.validate_last_backup_maximum_age( self.config.last_backup_maximum_age ) # format the output check_strategy.result( self.config.name, backup_age[0], hint="interval provided: %s, latest backup age: %s" % ( human_readable_timedelta(self.config.last_backup_maximum_age), backup_age[1], ), ) else: # last_backup_maximum_age provided by the user check_strategy.result( self.config.name, True, hint="no last_backup_maximum_age provided" ) # second check: check backup minimum size check_strategy.init_check("backup minimum size") if self.config.last_backup_minimum_size is not None: backup_size = self.backup_manager.validate_last_backup_min_size( self.config.last_backup_minimum_size ) gtlt = ">" if backup_size[0] else "<" check_strategy.result( self.config.name, backup_size[0], hint="last backup size %s %s %s minimum" % ( pretty_size(backup_size[1]), gtlt, pretty_size(self.config.last_backup_minimum_size), ), perfdata=backup_size[1], ) else: # no last_backup_minimum_size provided by the user backup_size = self.backup_manager.validate_last_backup_min_size(0) check_strategy.result( self.config.name, True, hint=pretty_size(backup_size[1]), perfdata=backup_size[1], ) def _check_wal_info(self, wal_info, last_wal_maximum_age): """ Checks the supplied wal_info is within the last_wal_maximum_age. :param last_backup_minimum_age: timedelta representing the time from now during which a WAL is considered valid :return tuple: a tuple containing the boolean result of the check, a string with auxiliary information about the check, and an integer representing the size of the WAL in bytes """ wal_last = datetime.datetime.fromtimestamp( wal_info["wal_last_timestamp"], dateutil.tz.tzlocal() ) now = datetime.datetime.now(dateutil.tz.tzlocal()) wal_age = now - wal_last if wal_age <= last_wal_maximum_age: wal_age_isok = True else: wal_age_isok = False wal_message = "interval provided: %s, latest wal age: %s" % ( human_readable_timedelta(last_wal_maximum_age), human_readable_timedelta(wal_age), ) if wal_info["wal_until_next_size"] is None: wal_size = 0 else: wal_size = wal_info["wal_until_next_size"] return wal_age_isok, wal_message, wal_size def check_wal_validity(self, check_strategy): """ Check if wal archiving requirements are satisfied """ check_strategy.init_check("wal maximum age") backup_id = self.backup_manager.get_last_backup_id() backup_info = self.get_backup(backup_id) if backup_info is not None: wal_info = self.get_wal_info(backup_info) # first check: check wal maximum age if self.config.last_wal_maximum_age is not None: # get maximum age information if backup_info is None or wal_info["wal_last_timestamp"] is None: # No WAL files received # (we should have the .backup file, as a minimum) # This may also be an indication that 'barman cron' is not # running wal_age_isok = False wal_message = "No WAL files archived for last backup" wal_size = 0 else: wal_age_isok, wal_message, wal_size = self._check_wal_info( wal_info, self.config.last_wal_maximum_age ) # format the output check_strategy.result(self.config.name, wal_age_isok, hint=wal_message) else: # no last_wal_maximum_age provided by the user if backup_info is None or wal_info["wal_until_next_size"] is None: wal_size = 0 else: wal_size = wal_info["wal_until_next_size"] check_strategy.result( self.config.name, True, hint="no last_wal_maximum_age provided" ) check_strategy.init_check("wal size") check_strategy.result( self.config.name, True, pretty_size(wal_size), perfdata=wal_size ) def check_archiver_errors(self, check_strategy): """ Checks the presence of archiving errors :param CheckStrategy check_strategy: the strategy for the management of the results of the check """ check_strategy.init_check("archiver errors") if os.path.isdir(self.config.errors_directory): errors = os.listdir(self.config.errors_directory) else: errors = [] check_strategy.result( self.config.name, len(errors) == 0, hint=WalArchiver.summarise_error_files(errors), ) def check_identity(self, check_strategy): """ Check the systemid retrieved from the streaming connection is the same that is retrieved from the standard connection, and then verifies it matches the one stored on disk. :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("systemid coherence") remote_status = self.get_remote_status() # Get system identifier from streaming and standard connections systemid_from_streaming = remote_status.get("streaming_systemid") systemid_from_postgres = remote_status.get("postgres_systemid") # If both available, makes sure they are coherent with each other if systemid_from_streaming and systemid_from_postgres: if systemid_from_streaming != systemid_from_postgres: check_strategy.result( self.config.name, systemid_from_streaming == systemid_from_postgres, hint="is the streaming DSN targeting the same server " "of the PostgreSQL connection string?", ) return systemid_from_server = systemid_from_streaming or systemid_from_postgres if not systemid_from_server: # Can't check without system Id information check_strategy.result(self.config.name, True, hint="no system Id available") return # Retrieves the content on disk and matches it with the live ID file_path = self.get_identity_file_path() if not os.path.exists(file_path): # We still don't have the systemid cached on disk, # so let's wait until we store it check_strategy.result( self.config.name, True, hint="no system Id stored on disk" ) return identity_from_file = self.read_identity_file() if systemid_from_server != identity_from_file.get("systemid"): check_strategy.result( self.config.name, False, hint="the system Id of the connected PostgreSQL server " 'changed, stored in "%s"' % file_path, ) else: check_strategy.result(self.config.name, True) def status_postgres(self): """ Status of PostgreSQL server """ remote_status = self.get_remote_status() if remote_status["server_txt_version"]: output.result( "status", self.config.name, "pg_version", "PostgreSQL version", remote_status["server_txt_version"], ) else: output.result( "status", self.config.name, "pg_version", "PostgreSQL version", "FAILED trying to get PostgreSQL version", ) return # Define the cluster state as pg_controldata do. if remote_status["is_in_recovery"]: output.result( "status", self.config.name, "is_in_recovery", "Cluster state", "in archive recovery", ) else: output.result( "status", self.config.name, "is_in_recovery", "Cluster state", "in production", ) if remote_status["pgespresso_installed"]: output.result( "status", self.config.name, "pgespresso", "pgespresso extension", "Available", ) else: output.result( "status", self.config.name, "pgespresso", "pgespresso extension", "Not available", ) if remote_status.get("current_size") is not None: output.result( "status", self.config.name, "current_size", "Current data size", pretty_size(remote_status["current_size"]), ) if remote_status["data_directory"]: output.result( "status", self.config.name, "data_directory", "PostgreSQL Data directory", remote_status["data_directory"], ) if remote_status["current_xlog"]: output.result( "status", self.config.name, "current_xlog", "Current WAL segment", remote_status["current_xlog"], ) def status_wal_archiver(self): """ Status of WAL archiver(s) """ for archiver in self.archivers: archiver.status() def status_retention_policies(self): """ Status of retention policies enforcement """ if self.enforce_retention_policies: output.result( "status", self.config.name, "retention_policies", "Retention policies", "enforced " "(mode: %s, retention: %s, WAL retention: %s)" % ( self.config.retention_policy_mode, self.config.retention_policy, self.config.wal_retention_policy, ), ) else: output.result( "status", self.config.name, "retention_policies", "Retention policies", "not enforced", ) def status(self): """ Implements the 'server-status' command. """ if self.config.description: output.result( "status", self.config.name, "description", "Description", self.config.description, ) output.result( "status", self.config.name, "active", "Active", self.config.active ) output.result( "status", self.config.name, "disabled", "Disabled", self.config.disabled ) # Postgres status is available only if node is not passive if not self.passive_node: self.status_postgres() self.status_wal_archiver() output.result( "status", self.config.name, "passive_node", "Passive node", self.passive_node, ) self.status_retention_policies() # Executes the backup manager status info method self.backup_manager.status() def fetch_remote_status(self): """ Get the status of the remote server This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ result = {} # Merge status for a postgres connection if self.postgres: result.update(self.postgres.get_remote_status()) # Merge status for a streaming connection if self.streaming: result.update(self.streaming.get_remote_status()) # Merge status for each archiver for archiver in self.archivers: result.update(archiver.get_remote_status()) # Merge status defined by the BackupManager result.update(self.backup_manager.get_remote_status()) return result def show(self): """ Shows the server configuration """ # Populate result map with all the required keys result = self.config.to_json() # Is the server a passive node? result["passive_node"] = self.passive_node # Skip remote status if the server is passive if not self.passive_node: remote_status = self.get_remote_status() result.update(remote_status) # Backup maximum age section if self.config.last_backup_maximum_age is not None: age = self.backup_manager.validate_last_backup_maximum_age( self.config.last_backup_maximum_age ) # If latest backup is between the limits of the # last_backup_maximum_age configuration, display how old is # the latest backup. if age[0]: msg = "%s (latest backup: %s )" % ( human_readable_timedelta(self.config.last_backup_maximum_age), age[1], ) else: # If latest backup is outside the limits of the # last_backup_maximum_age configuration (or the configuration # value is none), warn the user. msg = "%s (WARNING! latest backup is %s old)" % ( human_readable_timedelta(self.config.last_backup_maximum_age), age[1], ) result["last_backup_maximum_age"] = msg else: result["last_backup_maximum_age"] = "None" output.result("show_server", self.config.name, result) def delete_backup(self, backup): """Deletes a backup :param backup: the backup to delete """ try: # Lock acquisition: if you can acquire a ServerBackupLock # it means that no backup process is running on that server, # so there is no need to check the backup status. # Simply proceed with the normal delete process. server_backup_lock = ServerBackupLock( self.config.barman_lock_directory, self.config.name ) server_backup_lock.acquire( server_backup_lock.raise_if_fail, server_backup_lock.wait ) server_backup_lock.release() except LockFileBusy: # Otherwise if the lockfile is busy, a backup process is actually # running on that server. To be sure that it's safe # to delete the backup, we must check its status and its position # in the catalogue. # If it is the first and it is STARTED or EMPTY, we are trying to # remove a running backup. This operation must be forbidden. # Otherwise, normally delete the backup. first_backup_id = self.get_first_backup_id(BackupInfo.STATUS_ALL) if backup.backup_id == first_backup_id and backup.status in ( BackupInfo.STARTED, BackupInfo.EMPTY, ): output.error( "Cannot delete a running backup (%s %s)" % (self.config.name, backup.backup_id) ) return except LockFilePermissionDenied as e: # We cannot access the lockfile. # Exit without removing the backup. output.error("Permission denied, unable to access '%s'" % e) return try: # Take care of the backup lock. # Only one process can modify a backup at a time lock = ServerBackupIdLock( self.config.barman_lock_directory, self.config.name, backup.backup_id ) with lock: deleted = self.backup_manager.delete_backup(backup) # At this point no-one should try locking a backup that # doesn't exists, so we can remove the lock # WARNING: the previous statement is true only as long as # no-one wait on this lock if deleted: os.remove(lock.filename) return deleted except LockFileBusy: # If another process is holding the backup lock, # warn the user and terminate output.error( "Another process is holding the lock for " "backup %s of server %s." % (backup.backup_id, self.config.name) ) return except LockFilePermissionDenied as e: # We cannot access the lockfile. # warn the user and terminate output.error("Permission denied, unable to access '%s'" % e) return def backup(self, wait=False, wait_timeout=None): """ Performs a backup for the server :param bool wait: wait for all the required WAL files to be archived :param int|None wait_timeout: the time, in seconds, the backup will wait for the required WAL files to be archived before timing out """ # The 'backup' command is not available on a passive node. # We assume that if we get here the node is not passive assert not self.passive_node try: # Default strategy for check in backup is CheckStrategy # This strategy does not print any output - it only logs checks strategy = CheckStrategy() self.check(strategy) if strategy.has_error: output.error( "Impossible to start the backup. Check the log " "for more details, or run 'barman check %s'" % self.config.name ) return # check required backup directories exist self._make_directories() except OSError as e: output.error("failed to create %s directory: %s", e.filename, e.strerror) return # Save the database identity self.write_identity_file() # Make sure we are not wasting an precious streaming PostgreSQL # connection that may have been opened by the self.check() call if self.streaming: self.streaming.close() try: # lock acquisition and backup execution with ServerBackupLock(self.config.barman_lock_directory, self.config.name): backup_info = self.backup_manager.backup( wait=wait, wait_timeout=wait_timeout ) # Archive incoming WALs and update WAL catalogue self.archive_wal(verbose=False) # Invoke sanity check of the backup if backup_info.status == BackupInfo.WAITING_FOR_WALS: self.check_backup(backup_info) # At this point is safe to remove any remaining WAL file before the # first backup previous_backup = self.get_previous_backup(backup_info.backup_id) if not previous_backup: self.backup_manager.remove_wal_before_backup(backup_info) if backup_info.status == BackupInfo.WAITING_FOR_WALS: output.warning( "IMPORTANT: this backup is classified as " "WAITING_FOR_WALS, meaning that Barman has not received " "yet all the required WAL files for the backup " "consistency.\n" "This is a common behaviour in concurrent backup " "scenarios, and Barman automatically set the backup as " "DONE once all the required WAL files have been " "archived.\n" "Hint: execute the backup command with '--wait'" ) except LockFileBusy: output.error("Another backup process is running") except LockFilePermissionDenied as e: output.error("Permission denied, unable to access '%s'" % e) def get_available_backups(self, status_filter=BackupManager.DEFAULT_STATUS_FILTER): """ Get a list of available backups param: status_filter: the status of backups to return, default to BackupManager.DEFAULT_STATUS_FILTER """ return self.backup_manager.get_available_backups(status_filter) def get_last_backup_id(self, status_filter=BackupManager.DEFAULT_STATUS_FILTER): """ Get the id of the latest/last backup in the catalog (if exists) :param status_filter: The status of the backup to return, default to DEFAULT_STATUS_FILTER. :return string|None: ID of the backup """ return self.backup_manager.get_last_backup_id(status_filter) def get_first_backup_id(self, status_filter=BackupManager.DEFAULT_STATUS_FILTER): """ Get the id of the oldest/first backup in the catalog (if exists) :param status_filter: The status of the backup to return, default to DEFAULT_STATUS_FILTER. :return string|None: ID of the backup """ return self.backup_manager.get_first_backup_id(status_filter) def list_backups(self): """ Lists all the available backups for the server """ retention_status = self.report_backups() backups = self.get_available_backups(BackupInfo.STATUS_ALL) for key in sorted(backups.keys(), reverse=True): backup = backups[key] backup_size = backup.size or 0 wal_size = 0 rstatus = None if backup.status in BackupInfo.STATUS_COPY_DONE: try: wal_info = self.get_wal_info(backup) backup_size += wal_info["wal_size"] wal_size = wal_info["wal_until_next_size"] except BadXlogSegmentName as e: output.error( "invalid WAL segment name %r\n" 'HINT: Please run "barman rebuild-xlogdb %s" ' "to solve this issue", force_str(e), self.config.name, ) if ( self.enforce_retention_policies and retention_status[backup.backup_id] != BackupInfo.VALID ): rstatus = retention_status[backup.backup_id] output.result("list_backup", backup, backup_size, wal_size, rstatus) def get_backup(self, backup_id): """ Return the backup information for the given backup id. If the backup_id is None or backup.info file doesn't exists, it returns None. :param str|None backup_id: the ID of the backup to return :rtype: barman.infofile.LocalBackupInfo|None """ return self.backup_manager.get_backup(backup_id) def get_previous_backup(self, backup_id): """ Get the previous backup (if any) from the catalog :param backup_id: the backup id from which return the previous """ return self.backup_manager.get_previous_backup(backup_id) def get_next_backup(self, backup_id): """ Get the next backup (if any) from the catalog :param backup_id: the backup id from which return the next """ return self.backup_manager.get_next_backup(backup_id) def get_required_xlog_files( self, backup, target_tli=None, target_time=None, target_xid=None ): """ Get the xlog files required for a recovery params: BackupInfo backup: a backup object params: target_tli : target timeline param: target_time: target time """ begin = backup.begin_wal end = backup.end_wal # If timeline isn't specified, assume it is the same timeline # of the backup if not target_tli: target_tli, _, _ = xlog.decode_segment_name(end) with self.xlogdb() as fxlogdb: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) # Handle .history files: add all of them to the output, # regardless of their age if xlog.is_history_file(wal_info.name): yield wal_info continue if wal_info.name < begin: continue tli, _, _ = xlog.decode_segment_name(wal_info.name) if tli > target_tli: continue yield wal_info if wal_info.name > end: end = wal_info.name if target_time and wal_info.time > target_time: break # return all the remaining history files for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if xlog.is_history_file(wal_info.name): yield wal_info # TODO: merge with the previous def get_wal_until_next_backup(self, backup, include_history=False): """ Get the xlog files between backup and the next :param BackupInfo backup: a backup object, the starting point to retrieve WALs :param bool include_history: option for the inclusion of include_history files into the output """ begin = backup.begin_wal next_end = None if self.get_next_backup(backup.backup_id): next_end = self.get_next_backup(backup.backup_id).end_wal backup_tli, _, _ = xlog.decode_segment_name(begin) with self.xlogdb() as fxlogdb: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) # Handle .history files: add all of them to the output, # regardless of their age, if requested (the 'include_history' # parameter is True) if xlog.is_history_file(wal_info.name): if include_history: yield wal_info continue if wal_info.name < begin: continue tli, _, _ = xlog.decode_segment_name(wal_info.name) if tli > backup_tli: continue if not xlog.is_wal_file(wal_info.name): continue if next_end and wal_info.name > next_end: break yield wal_info def get_wal_full_path(self, wal_name): """ Build the full path of a WAL for a server given the name :param wal_name: WAL file name """ # Build the path which contains the file hash_dir = os.path.join(self.config.wals_directory, xlog.hash_dir(wal_name)) # Build the WAL file full path full_path = os.path.join(hash_dir, wal_name) return full_path def get_wal_possible_paths(self, wal_name, partial=False): """ Build a list of possible positions of a WAL file :param str wal_name: WAL file name :param bool partial: add also the '.partial' paths """ paths = list() # Path in the archive hash_dir = os.path.join(self.config.wals_directory, xlog.hash_dir(wal_name)) full_path = os.path.join(hash_dir, wal_name) paths.append(full_path) # Path in incoming directory incoming_path = os.path.join(self.config.incoming_wals_directory, wal_name) paths.append(incoming_path) # Path in streaming directory streaming_path = os.path.join(self.config.streaming_wals_directory, wal_name) paths.append(streaming_path) # If partial files are required check also the '.partial' path if partial: paths.append(streaming_path + PARTIAL_EXTENSION) # Add the streaming_path again to handle races with pg_receivewal # completing the WAL file paths.append(streaming_path) # The following two path are only useful to retrieve the last # incomplete segment archived before a promotion. paths.append(full_path + PARTIAL_EXTENSION) paths.append(incoming_path + PARTIAL_EXTENSION) # Append the archive path again, to handle races with the archiver paths.append(full_path) return paths def get_wal_info(self, backup_info): """ Returns information about WALs for the given backup :param barman.infofile.LocalBackupInfo backup_info: the target backup """ begin = backup_info.begin_wal end = backup_info.end_wal # counters wal_info = dict.fromkeys( ( "wal_num", "wal_size", "wal_until_next_num", "wal_until_next_size", "wal_until_next_compression_ratio", "wal_compression_ratio", ), 0, ) # First WAL (always equal to begin_wal) and Last WAL names and ts wal_info["wal_first"] = None wal_info["wal_first_timestamp"] = None wal_info["wal_last"] = None wal_info["wal_last_timestamp"] = None # WAL rate (default 0.0 per second) wal_info["wals_per_second"] = 0.0 for item in self.get_wal_until_next_backup(backup_info): if item.name == begin: wal_info["wal_first"] = item.name wal_info["wal_first_timestamp"] = item.time if item.name <= end: wal_info["wal_num"] += 1 wal_info["wal_size"] += item.size else: wal_info["wal_until_next_num"] += 1 wal_info["wal_until_next_size"] += item.size wal_info["wal_last"] = item.name wal_info["wal_last_timestamp"] = item.time # Calculate statistics only for complete backups # If the cron is not running for any reason, the required # WAL files could be missing if wal_info["wal_first"] and wal_info["wal_last"]: # Estimate WAL ratio # Calculate the difference between the timestamps of # the first WAL (begin of backup) and the last WAL # associated to the current backup wal_last_timestamp = wal_info["wal_last_timestamp"] wal_first_timestamp = wal_info["wal_first_timestamp"] wal_info["wal_total_seconds"] = wal_last_timestamp - wal_first_timestamp if wal_info["wal_total_seconds"] > 0: wal_num = wal_info["wal_num"] wal_until_next_num = wal_info["wal_until_next_num"] wal_total_seconds = wal_info["wal_total_seconds"] wal_info["wals_per_second"] = ( float(wal_num + wal_until_next_num) / wal_total_seconds ) # evaluation of compression ratio for basebackup WAL files wal_info["wal_theoretical_size"] = wal_info["wal_num"] * float( backup_info.xlog_segment_size ) try: wal_size = wal_info["wal_size"] wal_info["wal_compression_ratio"] = 1 - ( wal_size / wal_info["wal_theoretical_size"] ) except ZeroDivisionError: wal_info["wal_compression_ratio"] = 0.0 # evaluation of compression ratio of WAL files wal_until_next_num = wal_info["wal_until_next_num"] wal_info["wal_until_next_theoretical_size"] = wal_until_next_num * float( backup_info.xlog_segment_size ) try: wal_until_next_size = wal_info["wal_until_next_size"] until_next_theoretical_size = wal_info[ "wal_until_next_theoretical_size" ] wal_info["wal_until_next_compression_ratio"] = 1 - ( wal_until_next_size / until_next_theoretical_size ) except ZeroDivisionError: wal_info["wal_until_next_compression_ratio"] = 0.0 return wal_info def recover( self, backup_info, dest, tablespaces=None, remote_command=None, **kwargs ): """ Performs a recovery of a backup :param barman.infofile.LocalBackupInfo backup_info: the backup to recover :param str dest: the destination directory :param dict[str,str]|None tablespaces: a tablespace name -> location map (for relocation) :param str|None remote_command: default None. The remote command to recover the base backup, in case of remote backup. :kwparam str|None target_tli: the target timeline :kwparam str|None target_time: the target time :kwparam str|None target_xid: the target xid :kwparam str|None target_lsn: the target LSN :kwparam str|None target_name: the target name created previously with pg_create_restore_point() function call :kwparam bool|None target_immediate: end recovery as soon as consistency is reached :kwparam bool exclusive: whether the recovery is exclusive or not :kwparam str|None target_action: the recovery target action :kwparam bool|None standby_mode: the standby mode """ return self.backup_manager.recover( backup_info, dest, tablespaces, remote_command, **kwargs ) def get_wal( self, wal_name, compression=None, output_directory=None, peek=None, partial=False, ): """ Retrieve a WAL file from the archive :param str wal_name: id of the WAL file to find into the WAL archive :param str|None compression: compression format for the output :param str|None output_directory: directory where to deposit the WAL file :param int|None peek: if defined list the next N WAL file :param bool partial: retrieve also partial WAL files """ # If used through SSH identify the client to add it to logs source_suffix = "" ssh_connection = os.environ.get("SSH_CONNECTION") if ssh_connection: # The client IP is the first value contained in `SSH_CONNECTION` # which contains four space-separated values: client IP address, # client port number, server IP address, and server port number. source_suffix = " (SSH host: %s)" % (ssh_connection.split()[0],) # Sanity check if not xlog.is_any_xlog_file(wal_name): output.error( "'%s' is not a valid wal file name%s", wal_name, source_suffix, exit_code=3, ) return # If peek is requested we only output a list of files if peek: # Get the next ``peek`` files following the provided ``wal_name``. # If ``wal_name`` is not a simple wal file, # we cannot guess the names of the following WAL files. # So ``wal_name`` is the only possible result, if exists. if xlog.is_wal_file(wal_name): # We can't know what was the segment size of PostgreSQL WAL # files at backup time. Because of this, we generate all # the possible names for a WAL segment, and then we check # if the requested one is included. wal_peek_list = xlog.generate_segment_names(wal_name) else: wal_peek_list = iter([wal_name]) # Output the content of wal_peek_list until we have displayed # enough files or find a missing file count = 0 while count < peek: try: wal_peek_name = next(wal_peek_list) except StopIteration: # No more item in wal_peek_list break # Get list of possible location. We do not prefetch # partial files wal_peek_paths = self.get_wal_possible_paths( wal_peek_name, partial=False ) # If the next WAL file is found, output the name # and continue to the next one if any(os.path.exists(path) for path in wal_peek_paths): count += 1 output.info(wal_peek_name, log=False) continue # If ``wal_peek_file`` doesn't exist, check if we need to # look in the following segment tli, log, seg = xlog.decode_segment_name(wal_peek_name) # If `seg` is not a power of two, it is not possible that we # are at the end of a WAL group, so we are done if not is_power_of_two(seg): break # This is a possible WAL group boundary, let's try the # following group seg = 0 log += 1 # Install a new generator from the start of the next segment. # If the file doesn't exists we will terminate because # zero is not a power of two wal_peek_name = xlog.encode_segment_name(tli, log, seg) wal_peek_list = xlog.generate_segment_names(wal_peek_name) # Do not output anything else return # If an output directory was provided write the file inside it # otherwise we use standard output if output_directory is not None: destination_path = os.path.join(output_directory, wal_name) destination_description = "into '%s' file" % destination_path # Use the standard output for messages logger = output try: destination = open(destination_path, "wb") except IOError as e: output.error( "Unable to open '%s' file%s: %s", destination_path, source_suffix, e, exit_code=3, ) return else: destination_description = "to standard output" # Do not use the standard output for messages, otherwise we would # taint the output stream logger = _logger try: # Python 3.x destination = sys.stdout.buffer except AttributeError: # Python 2.x destination = sys.stdout # Get the list of WAL file possible paths wal_paths = self.get_wal_possible_paths(wal_name, partial) for wal_file in wal_paths: # Check for file existence if not os.path.exists(wal_file): continue logger.info( "Sending WAL '%s' for server '%s' %s%s", os.path.basename(wal_file), self.config.name, destination_description, source_suffix, ) try: # Try returning the wal_file to the client self.get_wal_sendfile(wal_file, compression, destination) # We are done, return to the caller return except CommandFailedException: # If an external command fails we cannot really know why, # but if the WAL file disappeared, we assume # it has been moved in the archive so we ignore the error. # This file will be retrieved later, as the last entry # returned by get_wal_possible_paths() is the archive position if not os.path.exists(wal_file): pass else: raise except OSError as exc: # If the WAL file disappeared just ignore the error # This file will be retrieved later, as the last entry # returned by get_wal_possible_paths() is the archive # position if exc.errno == errno.ENOENT and exc.filename == wal_file: pass else: raise logger.info("Skipping vanished WAL file '%s'%s", wal_file, source_suffix) output.error( "WAL file '%s' not found in server '%s'%s", wal_name, self.config.name, source_suffix, ) def get_wal_sendfile(self, wal_file, compression, destination): """ Send a WAL file to the destination file, using the required compression :param str wal_file: WAL file path :param str compression: required compression :param destination: file stream to use to write the data """ # Identify the wal file wal_info = self.backup_manager.compression_manager.get_wal_file_info(wal_file) # Get a decompressor for the file (None if not compressed) wal_compressor = self.backup_manager.compression_manager.get_compressor( wal_info.compression ) # Get a compressor for the output (None if not compressed) out_compressor = self.backup_manager.compression_manager.get_compressor( compression ) # Initially our source is the stored WAL file and we do not have # any temporary file source_file = wal_file uncompressed_file = None compressed_file = None # If the required compression is different from the source we # decompress/compress it into the required format (getattr is # used here to gracefully handle None objects) if getattr(wal_compressor, "compression", None) != getattr( out_compressor, "compression", None ): # If source is compressed, decompress it into a temporary file if wal_compressor is not None: uncompressed_file = NamedTemporaryFile( dir=self.config.wals_directory, prefix=".%s." % os.path.basename(wal_file), suffix=".uncompressed", ) # decompress wal file wal_compressor.decompress(source_file, uncompressed_file.name) source_file = uncompressed_file.name # If output compression is required compress the source # into a temporary file if out_compressor is not None: compressed_file = NamedTemporaryFile( dir=self.config.wals_directory, prefix=".%s." % os.path.basename(wal_file), suffix=".compressed", ) out_compressor.compress(source_file, compressed_file.name) source_file = compressed_file.name # Copy the prepared source file to destination with open(source_file, "rb") as input_file: shutil.copyfileobj(input_file, destination) # Remove temp files if uncompressed_file is not None: uncompressed_file.close() if compressed_file is not None: compressed_file.close() def put_wal(self, fileobj): """ Receive a WAL file from SERVER_NAME and securely store it in the incoming directory. The file will be read from the fileobj passed as parameter. """ # If used through SSH identify the client to add it to logs source_suffix = "" ssh_connection = os.environ.get("SSH_CONNECTION") if ssh_connection: # The client IP is the first value contained in `SSH_CONNECTION` # which contains four space-separated values: client IP address, # client port number, server IP address, and server port number. source_suffix = " (SSH host: %s)" % (ssh_connection.split()[0],) # Incoming directory is where the files will be extracted dest_dir = self.config.incoming_wals_directory # Ensure the presence of the destination directory mkpath(dest_dir) incoming_file = namedtuple( "incoming_file", [ "name", "tmp_path", "path", "checksum", ], ) # Stream read tar from stdin, store content in incoming directory # The closing wrapper is needed only for Python 2.6 extracted_files = {} validated_files = {} md5sums = {} try: with closing(tarfile.open(mode="r|", fileobj=fileobj)) as tar: for item in tar: name = item.name # Strip leading './' - tar has been manually created if name.startswith("./"): name = name[2:] # Requires a regular file as tar item if not item.isreg(): output.error( "Unsupported file type '%s' for file '%s' " "in put-wal for server '%s'%s", item.type, name, self.config.name, source_suffix, ) return # Subdirectories are not supported if "/" in name: output.error( "Unsupported filename '%s' in put-wal for server '%s'%s", name, self.config.name, source_suffix, ) return # Checksum file if name == "MD5SUMS": # Parse content and store it in md5sums dictionary for line in tar.extractfile(item).readlines(): line = line.decode().rstrip() try: # Split checksums and path info checksum, path = re.split(r" [* ]", line, 1) except ValueError: output.warning( "Bad checksum line '%s' found " "in put-wal for server '%s'%s", line, self.config.name, source_suffix, ) continue # Strip leading './' from path in the checksum file if path.startswith("./"): path = path[2:] md5sums[path] = checksum else: # Extract using a temp name (with PID) tmp_path = os.path.join( dest_dir, ".%s-%s" % (os.getpid(), name) ) path = os.path.join(dest_dir, name) tar.makefile(item, tmp_path) # Set the original timestamp tar.utime(item, tmp_path) # Add the tuple to the dictionary of extracted files extracted_files[name] = incoming_file( name, tmp_path, path, file_md5(tmp_path) ) validated_files[name] = False # For each received checksum verify the corresponding file for name in md5sums: # Check that file is present in the tar archive if name not in extracted_files: output.error( "Checksum without corresponding file '%s' " "in put-wal for server '%s'%s", name, self.config.name, source_suffix, ) return # Verify the checksum of the file if extracted_files[name].checksum != md5sums[name]: output.error( "Bad file checksum '%s' (should be %s) " "for file '%s' " "in put-wal for server '%s'%s", extracted_files[name].checksum, md5sums[name], name, self.config.name, source_suffix, ) return _logger.info( "Received file '%s' with checksum '%s' " "by put-wal for server '%s'%s", name, md5sums[name], self.config.name, source_suffix, ) validated_files[name] = True # Put the files in the final place, atomically and fsync all for item in extracted_files.values(): # Final verification of checksum presence for each file if not validated_files[item.name]: output.error( "Missing checksum for file '%s' " "in put-wal for server '%s'%s", item.name, self.config.name, source_suffix, ) return # If a file with the same name exists, returns an error. # PostgreSQL archive command will retry again later and, # at that time, Barman's WAL archiver should have already # managed this file. if os.path.exists(item.path): output.error( "Impossible to write already existing file '%s' " "in put-wal for server '%s'%s", item.name, self.config.name, source_suffix, ) return os.rename(item.tmp_path, item.path) fsync_file(item.path) fsync_dir(dest_dir) finally: # Cleanup of any remaining temp files (where applicable) for item in extracted_files.values(): if os.path.exists(item.tmp_path): os.unlink(item.tmp_path) def cron(self, wals=True, retention_policies=True, keep_descriptors=False): """ Maintenance operations :param bool wals: WAL archive maintenance :param bool retention_policies: retention policy maintenance :param bool keep_descriptors: whether to keep subprocess descriptors, defaults to False """ try: # Actually this is the highest level of locking in the cron, # this stops the execution of multiple cron on the same server with ServerCronLock(self.config.barman_lock_directory, self.config.name): # When passive call sync.cron() and never run # local WAL archival if self.passive_node: self.sync_cron(keep_descriptors) # WAL management and maintenance elif wals: # Execute the archive-wal sub-process self.cron_archive_wal(keep_descriptors) if self.config.streaming_archiver: # Spawn the receive-wal sub-process self.cron_receive_wal(keep_descriptors) else: # Terminate the receive-wal sub-process if present self.kill("receive-wal", fail_if_not_present=False) # Verify backup self.cron_check_backup(keep_descriptors) # Retention policies execution if retention_policies: self.backup_manager.cron_retention_policy() except LockFileBusy: output.info( "Another cron process is already running on server %s. " "Skipping to the next server" % self.config.name ) except LockFilePermissionDenied as e: output.error("Permission denied, unable to access '%s'" % e) except (OSError, IOError) as e: output.error("%s", e) def cron_archive_wal(self, keep_descriptors): """ Method that handles the start of an 'archive-wal' sub-process. This method must be run protected by ServerCronLock :param bool keep_descriptors: whether to keep subprocess descriptors attached to this process. """ try: # Try to acquire ServerWalArchiveLock, if the lock is available, # no other 'archive-wal' processes are running on this server. # # There is a very little race condition window here because # even if we are protected by ServerCronLock, the user could run # another 'archive-wal' command manually. However, it would result # in one of the two commands failing on lock acquisition, # with no other consequence. with ServerWalArchiveLock( self.config.barman_lock_directory, self.config.name ): # Output and release the lock immediately output.info( "Starting WAL archiving for server %s", self.config.name, log=False ) # Init a Barman sub-process object archive_process = BarmanSubProcess( subcommand="archive-wal", config=barman.__config__.config_file, args=[self.config.name], keep_descriptors=keep_descriptors, ) # Launch the sub-process archive_process.execute() except LockFileBusy: # Another archive process is running for the server, # warn the user and skip to the next one. output.info( "Another archive-wal process is already running " "on server %s. Skipping to the next server" % self.config.name ) def cron_receive_wal(self, keep_descriptors): """ Method that handles the start of a 'receive-wal' sub process This method must be run protected by ServerCronLock :param bool keep_descriptors: whether to keep subprocess descriptors attached to this process. """ try: # Try to acquire ServerWalReceiveLock, if the lock is available, # no other 'receive-wal' processes are running on this server. # # There is a very little race condition window here because # even if we are protected by ServerCronLock, the user could run # another 'receive-wal' command manually. However, it would result # in one of the two commands failing on lock acquisition, # with no other consequence. with ServerWalReceiveLock( self.config.barman_lock_directory, self.config.name ): # Output and release the lock immediately output.info( "Starting streaming archiver for server %s", self.config.name, log=False, ) # Start a new receive-wal process receive_process = BarmanSubProcess( subcommand="receive-wal", config=barman.__config__.config_file, args=[self.config.name], keep_descriptors=keep_descriptors, ) # Launch the sub-process receive_process.execute() except LockFileBusy: # Another receive-wal process is running for the server # exit without message _logger.debug( "Another STREAMING ARCHIVER process is running for " "server %s" % self.config.name ) def cron_check_backup(self, keep_descriptors): """ Method that handles the start of a 'check-backup' sub process :param bool keep_descriptors: whether to keep subprocess descriptors attached to this process. """ backup_id = self.get_first_backup_id([BackupInfo.WAITING_FOR_WALS]) if not backup_id: # Nothing to be done for this server return try: # Try to acquire ServerBackupIdLock, if the lock is available, # no other 'check-backup' processes are running on this backup. # # There is a very little race condition window here because # even if we are protected by ServerCronLock, the user could run # another command that takes the lock. However, it would result # in one of the two commands failing on lock acquisition, # with no other consequence. with ServerBackupIdLock( self.config.barman_lock_directory, self.config.name, backup_id ): # Output and release the lock immediately output.info( "Starting check-backup for backup %s of server %s", backup_id, self.config.name, log=False, ) # Start a check-backup process check_process = BarmanSubProcess( subcommand="check-backup", config=barman.__config__.config_file, args=[self.config.name, backup_id], keep_descriptors=keep_descriptors, ) check_process.execute() except LockFileBusy: # Another process is holding the backup lock _logger.debug( "Another process is holding the backup lock for %s " "of server %s" % (backup_id, self.config.name) ) def archive_wal(self, verbose=True): """ Perform the WAL archiving operations. Usually run as subprocess of the barman cron command, but can be executed manually using the barman archive-wal command :param bool verbose: if false outputs something only if there is at least one file """ output.debug("Starting archive-wal for server %s", self.config.name) try: # Take care of the archive lock. # Only one archive job per server is admitted with ServerWalArchiveLock( self.config.barman_lock_directory, self.config.name ): self.backup_manager.archive_wal(verbose) except LockFileBusy: # If another process is running for this server, # warn the user and skip to the next server output.info( "Another archive-wal process is already running " "on server %s. Skipping to the next server" % self.config.name ) def create_physical_repslot(self): """ Create a physical replication slot using the streaming connection """ if not self.streaming: output.error( "Unable to create a physical replication slot: " "streaming connection not configured" ) return # Replication slots are not supported by PostgreSQL < 9.4 try: if self.streaming.server_version < 90400: output.error( "Unable to create a physical replication slot: " "not supported by '%s' " "(9.4 or higher is required)" % self.streaming.server_major_version ) return except PostgresException as exc: msg = "Cannot connect to server '%s'" % self.config.name output.error(msg, log=False) _logger.error("%s: %s", msg, force_str(exc).strip()) return if not self.config.slot_name: output.error( "Unable to create a physical replication slot: " "slot_name configuration option required" ) return output.info( "Creating physical replication slot '%s' on server '%s'", self.config.slot_name, self.config.name, ) try: self.streaming.create_physical_repslot(self.config.slot_name) output.info("Replication slot '%s' created", self.config.slot_name) except PostgresDuplicateReplicationSlot: output.error("Replication slot '%s' already exists", self.config.slot_name) except PostgresReplicationSlotsFull: output.error( "All replication slots for server '%s' are in use\n" "Free one or increase the max_replication_slots " "value on your PostgreSQL server.", self.config.name, ) except PostgresException as exc: output.error( "Cannot create replication slot '%s' on server '%s': %s", self.config.slot_name, self.config.name, force_str(exc).strip(), ) def drop_repslot(self): """ Drop a replication slot using the streaming connection """ if not self.streaming: output.error( "Unable to drop a physical replication slot: " "streaming connection not configured" ) return # Replication slots are not supported by PostgreSQL < 9.4 try: if self.streaming.server_version < 90400: output.error( "Unable to drop a physical replication slot: " "not supported by '%s' (9.4 or higher is " "required)" % self.streaming.server_major_version ) return except PostgresException as exc: msg = "Cannot connect to server '%s'" % self.config.name output.error(msg, log=False) _logger.error("%s: %s", msg, force_str(exc).strip()) return if not self.config.slot_name: output.error( "Unable to drop a physical replication slot: " "slot_name configuration option required" ) return output.info( "Dropping physical replication slot '%s' on server '%s'", self.config.slot_name, self.config.name, ) try: self.streaming.drop_repslot(self.config.slot_name) output.info("Replication slot '%s' dropped", self.config.slot_name) except PostgresInvalidReplicationSlot: output.error("Replication slot '%s' does not exist", self.config.slot_name) except PostgresReplicationSlotInUse: output.error( "Cannot drop replication slot '%s' on server '%s' " "because it is in use.", self.config.slot_name, self.config.name, ) except PostgresException as exc: output.error( "Cannot drop replication slot '%s' on server '%s': %s", self.config.slot_name, self.config.name, force_str(exc).strip(), ) def receive_wal(self, reset=False): """ Enable the reception of WAL files using streaming protocol. Usually started by barman cron command. Executing this manually, the barman process will not terminate but will continuously receive WAL files from the PostgreSQL server. :param reset: When set, resets the status of receive-wal """ # Execute the receive-wal command only if streaming_archiver # is enabled if not self.config.streaming_archiver: output.error( "Unable to start receive-wal process: " "streaming_archiver option set to 'off' in " "barman configuration file" ) return if not reset: output.info("Starting receive-wal for server %s", self.config.name) try: # Take care of the receive-wal lock. # Only one receiving process per server is permitted with ServerWalReceiveLock( self.config.barman_lock_directory, self.config.name ): try: # Only the StreamingWalArchiver implementation # does something. # WARNING: This codes assumes that there is only one # StreamingWalArchiver in the archivers list. for archiver in self.archivers: archiver.receive_wal(reset) except ArchiverFailure as e: output.error(e) except LockFileBusy: # If another process is running for this server, if reset: output.info( "Unable to reset the status of receive-wal " "for server %s. Process is still running" % self.config.name ) else: output.info( "Another receive-wal process is already running " "for server %s." % self.config.name ) @property def systemid(self): """ Get the system identifier, as returned by the PostgreSQL server :return str: the system identifier """ status = self.get_remote_status() # Main PostgreSQL connection has higher priority if status.get("postgres_systemid"): return status.get("postgres_systemid") # Fallback: streaming connection return status.get("streaming_systemid") @property def xlogdb_file_name(self): """ The name of the file containing the XLOG_DB :return str: the name of the file that contains the XLOG_DB """ return os.path.join(self.config.wals_directory, self.XLOG_DB) @contextmanager def xlogdb(self, mode="r"): """ Context manager to access the xlogdb file. This method uses locking to make sure only one process is accessing the database at a time. The database file will be created if it not exists. Usage example: with server.xlogdb('w') as file: file.write(new_line) :param str mode: open the file with the required mode (default read-only) """ if not os.path.exists(self.config.wals_directory): os.makedirs(self.config.wals_directory) xlogdb = self.xlogdb_file_name with ServerXLOGDBLock(self.config.barman_lock_directory, self.config.name): # If the file doesn't exist and it is required to read it, # we open it in a+ mode, to be sure it will be created if not os.path.exists(xlogdb) and mode.startswith("r"): if "+" not in mode: mode = "a%s+" % mode[1:] else: mode = "a%s" % mode[1:] with open(xlogdb, mode) as f: # execute the block nested in the with statement try: yield f finally: # we are exiting the context # if file is writable (mode contains w, a or +) # make sure the data is written to disk # http://docs.python.org/2/library/os.html#os.fsync if any((c in "wa+") for c in f.mode): f.flush() os.fsync(f.fileno()) def report_backups(self): if not self.enforce_retention_policies: return dict() else: return self.config.retention_policy.report() def rebuild_xlogdb(self): """ Rebuild the whole xlog database guessing it from the archive content. """ return self.backup_manager.rebuild_xlogdb() def get_backup_ext_info(self, backup_info): """ Return a dictionary containing all available information about a backup The result is equivalent to the sum of information from * BackupInfo object * the Server.get_wal_info() return value * the context in the catalog (if available) * the retention policy status :param backup_info: the target backup :rtype dict: all information about a backup """ backup_ext_info = backup_info.to_dict() if backup_info.status in BackupInfo.STATUS_COPY_DONE: try: previous_backup = self.backup_manager.get_previous_backup( backup_ext_info["backup_id"] ) next_backup = self.backup_manager.get_next_backup( backup_ext_info["backup_id"] ) if previous_backup: backup_ext_info["previous_backup_id"] = previous_backup.backup_id else: backup_ext_info["previous_backup_id"] = None if next_backup: backup_ext_info["next_backup_id"] = next_backup.backup_id else: backup_ext_info["next_backup_id"] = None except UnknownBackupIdException: # no next_backup_id and previous_backup_id items # means "Not available" pass backup_ext_info.update(self.get_wal_info(backup_info)) if self.enforce_retention_policies: policy = self.config.retention_policy backup_ext_info["retention_policy_status"] = policy.backup_status( backup_info.backup_id ) else: backup_ext_info["retention_policy_status"] = None # Check any child timeline exists children_timelines = self.get_children_timelines( backup_ext_info["timeline"], forked_after=backup_info.end_xlog ) backup_ext_info["children_timelines"] = children_timelines return backup_ext_info def show_backup(self, backup_info): """ Output all available information about a backup :param backup_info: the target backup """ try: backup_ext_info = self.get_backup_ext_info(backup_info) output.result("show_backup", backup_ext_info) except BadXlogSegmentName as e: output.error( "invalid xlog segment name %r\n" 'HINT: Please run "barman rebuild-xlogdb %s" ' "to solve this issue", force_str(e), self.config.name, ) output.close_and_exit() @staticmethod def _build_path(path_prefix=None): """ If a path_prefix is provided build a string suitable to be used in PATH environment variable by joining the path_prefix with the current content of PATH environment variable. If the `path_prefix` is None returns None. :rtype: str|None """ if not path_prefix: return None sys_path = os.environ.get("PATH") return "%s%s%s" % (path_prefix, os.pathsep, sys_path) def kill(self, task, fail_if_not_present=True): """ Given the name of a barman sub-task type, attempts to stop all the processes :param string task: The task we want to stop :param bool fail_if_not_present: Display an error when the process is not present (default: True) """ process_list = self.process_manager.list(task) for process in process_list: if self.process_manager.kill(process): output.info("Stopped process %s(%s)", process.task, process.pid) return else: output.error( "Cannot terminate process %s(%s)", process.task, process.pid ) return if fail_if_not_present: output.error( "Termination of %s failed: no such process for server %s", task, self.config.name, ) def switch_wal(self, force=False, archive=None, archive_timeout=None): """ Execute the switch-wal command on the target server """ closed_wal = None try: if force: # If called with force, execute a checkpoint before the # switch_wal command _logger.info("Force a CHECKPOINT before pg_switch_wal()") self.postgres.checkpoint() # Perform the switch_wal. expect a WAL name only if the switch # has been successfully executed, False otherwise. closed_wal = self.postgres.switch_wal() if closed_wal is None: # Something went wrong during the execution of the # pg_switch_wal command output.error( "Unable to perform pg_switch_wal " "for server '%s'." % self.config.name ) return if closed_wal: # The switch_wal command have been executed successfully output.info( "The WAL file %s has been closed on server '%s'" % (closed_wal, self.config.name) ) else: # Is not necessary to perform a switch_wal output.info("No switch required for server '%s'" % self.config.name) except PostgresIsInRecovery: output.info( "No switch performed because server '%s' " "is a standby." % self.config.name ) except PostgresSuperuserRequired: # Superuser rights are required to perform the switch_wal output.error("Barman switch-wal requires superuser rights") return # If the user has asked to wait for a WAL file to be archived, # wait until a new WAL file has been found # or the timeout has expired if archive: self.wait_for_wal(closed_wal, archive_timeout) def wait_for_wal(self, wal_file=None, archive_timeout=None): """ Wait for a WAL file to be archived on the server :param str|None wal_file: Name of the WAL file, or None if we should just wait for a new WAL file to be archived :param int|None archive_timeout: Timeout in seconds """ max_msg = "" if archive_timeout: max_msg = " (max: %s seconds)" % archive_timeout initial_wals = dict() if not wal_file: wals = self.backup_manager.get_latest_archived_wals_info() initial_wals = dict([(tli, wals[tli].name) for tli in wals]) if wal_file: output.info( "Waiting for the WAL file %s from server '%s'%s", wal_file, self.config.name, max_msg, ) else: output.info( "Waiting for a WAL file from server '%s' to be archived%s", self.config.name, max_msg, ) # Wait for a new file until end_time or forever if no archive_timeout end_time = None if archive_timeout: end_time = time.time() + archive_timeout while not end_time or time.time() < end_time: self.archive_wal(verbose=False) # Finish if the closed wal file is in the archive. if wal_file: if os.path.exists(self.get_wal_full_path(wal_file)): break else: # Check if any new file has been archived, on any timeline wals = self.backup_manager.get_latest_archived_wals_info() current_wals = dict([(tli, wals[tli].name) for tli in wals]) if current_wals != initial_wals: break # sleep a bit before retrying time.sleep(0.1) else: if wal_file: output.error( "The WAL file %s has not been received in %s seconds", wal_file, archive_timeout, ) else: output.info( "A WAL file has not been received in %s seconds", archive_timeout ) def replication_status(self, target="all"): """ Implements the 'replication-status' command. """ if target == "hot-standby": client_type = PostgreSQLConnection.STANDBY elif target == "wal-streamer": client_type = PostgreSQLConnection.WALSTREAMER else: client_type = PostgreSQLConnection.ANY_STREAMING_CLIENT try: standby_info = self.postgres.get_replication_stats(client_type) if standby_info is None: output.error("Unable to connect to server %s" % self.config.name) else: output.result( "replication_status", self.config.name, target, self.postgres.current_xlog_location, standby_info, ) except PostgresUnsupportedFeature as e: output.info(" Requires PostgreSQL %s or higher", e) except PostgresSuperuserRequired: output.info(" Requires superuser rights") def get_children_timelines(self, tli, forked_after=None): """ Get a list of the children of the passed timeline :param int tli: Id of the timeline to check :param str forked_after: XLog location after which the timeline must have been created :return List[xlog.HistoryFileData]: the list of timelines that have the timeline with id 'tli' as parent """ comp_manager = self.backup_manager.compression_manager if forked_after: forked_after = xlog.parse_lsn(forked_after) children = [] # Search all the history files after the passed timeline children_tli = tli while True: children_tli += 1 history_path = os.path.join( self.config.wals_directory, "%08X.history" % children_tli ) # If the file doesn't exists, stop searching if not os.path.exists(history_path): break # Create the WalFileInfo object using the file wal_info = comp_manager.get_wal_file_info(history_path) # Get content of the file. We need to pass a compressor manager # here to handle an eventual compression of the history file history_info = xlog.decode_history_file( wal_info, self.backup_manager.compression_manager ) # Save the history only if is reachable from this timeline. for tinfo in history_info: # The history file contains the full genealogy # but we keep only the line with `tli` timeline as parent. if tinfo.parent_tli != tli: continue # We need to return this history info only if this timeline # has been forked after the passed LSN if forked_after and tinfo.switchpoint < forked_after: continue children.append(tinfo) return children def check_backup(self, backup_info): """ Make sure that we have all the WAL files required by a physical backup for consistency (from the first to the last WAL file) :param backup_info: the target backup """ output.debug( "Checking backup %s of server %s", backup_info.backup_id, self.config.name ) try: # No need to check a backup which is not waiting for WALs. # Doing that we could also mark as DONE backups which # were previously FAILED due to copy errors if backup_info.status == BackupInfo.FAILED: output.error("The validity of a failed backup cannot be checked") return # Take care of the backup lock. # Only one process can modify a backup a a time with ServerBackupIdLock( self.config.barman_lock_directory, self.config.name, backup_info.backup_id, ): orig_status = backup_info.status self.backup_manager.check_backup(backup_info) if orig_status == backup_info.status: output.debug( "Check finished: the status of backup %s of server %s " "remains %s", backup_info.backup_id, self.config.name, backup_info.status, ) else: output.debug( "Check finished: the status of backup %s of server %s " "changed from %s to %s", backup_info.backup_id, self.config.name, orig_status, backup_info.status, ) except LockFileBusy: # If another process is holding the backup lock, # notify the user and terminate. # This is not an error condition because it happens when # another process is validating the backup. output.info( "Another process is holding the lock for " "backup %s of server %s." % (backup_info.backup_id, self.config.name) ) return except LockFilePermissionDenied as e: # We cannot access the lockfile. # warn the user and terminate output.error("Permission denied, unable to access '%s'" % e) return def sync_status(self, last_wal=None, last_position=None): """ Return server status for sync purposes. The method outputs JSON, containing: * list of backups (with DONE status) * server configuration * last read position (in xlog.db) * last read wal * list of archived wal files If last_wal is provided, the method will discard all the wall files older than last_wal. If last_position is provided the method will try to read the xlog.db file using last_position as starting point. If the wal file at last_position does not match last_wal, read from the start and use last_wal as limit :param str|None last_wal: last read wal :param int|None last_position: last read position (in xlog.db) """ sync_status = {} wals = [] # Get all the backups using default filter for # get_available_backups method # (BackupInfo.DONE) backups = self.get_available_backups() # Retrieve the first wal associated to a backup, it will be useful # to filter our eventual WAL too old to be useful first_useful_wal = None if backups: first_useful_wal = backups[sorted(backups.keys())[0]].begin_wal # Read xlogdb file. with self.xlogdb() as fxlogdb: starting_point = self.set_sync_starting_point( fxlogdb, last_wal, last_position ) check_first_wal = starting_point == 0 and last_wal is not None # The wal_info and line variables are used after the loop. # We initialize them here to avoid errors with an empty xlogdb. line = None wal_info = None for line in fxlogdb: # Parse the line wal_info = WalFileInfo.from_xlogdb_line(line) # Check if user is requesting data that is not available. # TODO: probably the check should be something like # TODO: last_wal + 1 < wal_info.name if check_first_wal: if last_wal < wal_info.name: raise SyncError( "last_wal '%s' is older than the first" " available wal '%s'" % (last_wal, wal_info.name) ) else: check_first_wal = False # If last_wal is provided, discard any line older than last_wal if last_wal: if wal_info.name <= last_wal: continue # Else don't return any WAL older than first available backup elif first_useful_wal and wal_info.name < first_useful_wal: continue wals.append(wal_info) if wal_info is not None: # Check if user is requesting data that is not available. if last_wal is not None and last_wal > wal_info.name: raise SyncError( "last_wal '%s' is newer than the last available wal " " '%s'" % (last_wal, wal_info.name) ) # Set last_position with the current position - len(last_line) # (returning the beginning of the last line) sync_status["last_position"] = fxlogdb.tell() - len(line) # Set the name of the last wal of the file sync_status["last_name"] = wal_info.name else: # we started over sync_status["last_position"] = 0 sync_status["last_name"] = "" sync_status["backups"] = backups sync_status["wals"] = wals sync_status["version"] = barman.__version__ sync_status["config"] = self.config json.dump(sync_status, sys.stdout, cls=BarmanEncoder, indent=4) def sync_cron(self, keep_descriptors): """ Manage synchronisation operations between passive node and master node. The method recover information from the remote master server, evaluate if synchronisation with the master is required and spawn barman sub processes, syncing backups and WAL files :param bool keep_descriptors: whether to keep subprocess descriptors attached to this process. """ # Recover information from primary node sync_wal_info = self.load_sync_wals_info() # Use last_wal and last_position for the remote call to the # master server try: remote_info = self.primary_node_info( sync_wal_info.last_wal, sync_wal_info.last_position ) except SyncError as exc: output.error( "Failed to retrieve the primary node status: %s" % force_str(exc) ) return # Perform backup synchronisation if remote_info["backups"]: # Get the list of backups that need to be synced # with the local server local_backup_list = self.get_available_backups() # Subtract the list of the already # synchronised backups from the remote backup lists, # obtaining the list of backups still requiring synchronisation sync_backup_list = set(remote_info["backups"]) - set(local_backup_list) else: # No backup to synchronisation required output.info( "No backup synchronisation required for server %s", self.config.name, log=False, ) sync_backup_list = [] for backup_id in sorted(sync_backup_list): # Check if this backup_id needs to be synchronized by spawning a # sync-backup process. # The same set of checks will be executed by the spawned process. # This "double check" is necessary because we don't want the cron # to spawn unnecessary processes. try: local_backup_info = self.get_backup(backup_id) self.check_sync_required(backup_id, remote_info, local_backup_info) except SyncError as e: # It means that neither the local backup # nor the remote one exist. # This should not happen here. output.exception("Unexpected state: %s", e) break except SyncToBeDeleted: # The backup does not exist on primary server # and is FAILED here. # It must be removed by the sync-backup process. pass except SyncNothingToDo: # It could mean that the local backup is in DONE state or # that it is obsolete according to # the local retention policies. # In both cases, continue with the next backup. continue # Now that we are sure that a backup-sync subprocess is necessary, # we need to acquire the backup lock, to be sure that # there aren't other processes synchronising the backup. # If cannot acquire the lock, another synchronisation process # is running, so we give up. try: with ServerBackupSyncLock( self.config.barman_lock_directory, self.config.name, backup_id ): output.info( "Starting copy of backup %s for server %s", backup_id, self.config.name, ) except LockFileBusy: output.info( "A synchronisation process for backup %s" " on server %s is already in progress", backup_id, self.config.name, log=False, ) # Stop processing this server break # Init a Barman sub-process object sub_process = BarmanSubProcess( subcommand="sync-backup", config=barman.__config__.config_file, args=[self.config.name, backup_id], keep_descriptors=keep_descriptors, ) # Launch the sub-process sub_process.execute() # Stop processing this server break # Perform WAL synchronisation if remote_info["wals"]: # We need to acquire a sync-wal lock, to be sure that # there aren't other processes synchronising the WAL files. # If cannot acquire the lock, another synchronisation process # is running, so we give up. try: with ServerWalSyncLock( self.config.barman_lock_directory, self.config.name, ): output.info( "Started copy of WAL files for server %s", self.config.name ) except LockFileBusy: output.info( "WAL synchronisation already running for server %s", self.config.name, log=False, ) return # Init a Barman sub-process object sub_process = BarmanSubProcess( subcommand="sync-wals", config=barman.__config__.config_file, args=[self.config.name], keep_descriptors=keep_descriptors, ) # Launch the sub-process sub_process.execute() else: # no WAL synchronisation is required output.info( "No WAL synchronisation required for server %s", self.config.name, log=False, ) def check_sync_required(self, backup_name, primary_info, local_backup_info): """ Check if it is necessary to sync a backup. If the backup is present on the Primary node: * if it does not exist locally: continue (synchronise it) * if it exists and is DONE locally: raise SyncNothingToDo (nothing to do) * if it exists and is FAILED locally: continue (try to recover it) If the backup is not present on the Primary node: * if it does not exist locally: raise SyncError (wrong call) * if it exists and is DONE locally: raise SyncNothingToDo (nothing to do) * if it exists and is FAILED locally: raise SyncToBeDeleted (remove it) If a backup needs to be synchronised but it is obsolete according to local retention policies, raise SyncNothingToDo, else return to the caller. :param str backup_name: str name of the backup to sync :param dict primary_info: dict containing the Primary node status :param barman.infofile.BackupInfo local_backup_info: BackupInfo object representing the current backup state :raise SyncError: There is an error in the user request :raise SyncNothingToDo: Nothing to do for this request :raise SyncToBeDeleted: Backup is not recoverable and must be deleted """ backups = primary_info["backups"] # Backup not present on Primary node, and not present # locally. Raise exception. if backup_name not in backups and local_backup_info is None: raise SyncError( "Backup %s is absent on %s server" % (backup_name, self.config.name) ) # Backup not present on Primary node, but is # present locally with status FAILED: backup incomplete. # Remove the backup and warn the user if ( backup_name not in backups and local_backup_info is not None and local_backup_info.status == BackupInfo.FAILED ): raise SyncToBeDeleted( "Backup %s is absent on %s server and is incomplete locally" % (backup_name, self.config.name) ) # Backup not present on Primary node, but is # present locally with status DONE. Sync complete, local only. if ( backup_name not in backups and local_backup_info is not None and local_backup_info.status == BackupInfo.DONE ): raise SyncNothingToDo( "Backup %s is absent on %s server, but present locally " "(local copy only)" % (backup_name, self.config.name) ) # Backup present on Primary node, and present locally # with status DONE. Sync complete. if ( backup_name in backups and local_backup_info is not None and local_backup_info.status == BackupInfo.DONE ): raise SyncNothingToDo( "Backup %s is already synced with" " %s server" % (backup_name, self.config.name) ) # Retention Policy: if the local server has a Retention policy, # check that the remote backup is not obsolete. enforce_retention_policies = self.enforce_retention_policies retention_policy_mode = self.config.retention_policy_mode if enforce_retention_policies and retention_policy_mode == "auto": # All the checks regarding retention policies are in # this boolean method. if self.is_backup_locally_obsolete(backup_name, backups): # The remote backup is obsolete according to # local retention policies. # Nothing to do. raise SyncNothingToDo( "Remote backup %s/%s is obsolete for " "local retention policies." % (primary_info["config"]["name"], backup_name) ) def load_sync_wals_info(self): """ Load the content of SYNC_WALS_INFO_FILE for the given server :return collections.namedtuple: last read wal and position information """ sync_wals_info_file = os.path.join( self.config.wals_directory, SYNC_WALS_INFO_FILE ) if not os.path.exists(sync_wals_info_file): return SyncWalInfo(None, None) try: with open(sync_wals_info_file) as f: return SyncWalInfo._make(f.readline().split("\t")) except (OSError, IOError) as e: raise SyncError( "Cannot open %s file for server %s: %s" % (SYNC_WALS_INFO_FILE, self.config.name, e) ) def primary_node_info(self, last_wal=None, last_position=None): """ Invoke sync-info directly on the specified primary node The method issues a call to the sync-info method on the primary node through an SSH connection :param barman.server.Server self: the Server object :param str|None last_wal: last read wal :param int|None last_position: last read position (in xlog.db) :raise SyncError: if the ssh command fails """ # First we need to check if the server is in passive mode _logger.debug( "primary sync-info(%s, %s, %s)", self.config.name, last_wal, last_position ) if not self.passive_node: raise SyncError("server %s is not passive" % self.config.name) # Issue a call to 'barman sync-info' to the primary node, # using primary_ssh_command option to establish an # SSH connection. remote_command = Command( cmd=self.config.primary_ssh_command, shell=True, check=True, path=self.path ) # We run it in a loop to retry when the master issues error. while True: try: # Include the config path as an option if configured for this server if self.config.forward_config_path: base_cmd = "barman -c %s sync-info" % barman.__config__.config_file else: base_cmd = "barman sync-info" # Build the command string cmd_str = "%s %s" % (base_cmd, self.config.name) # If necessary we add last_wal and last_position # to the command string if last_wal is not None: cmd_str += " %s " % last_wal if last_position is not None: cmd_str += " %s " % last_position # Then issue the command remote_command(cmd_str) # All good, exit the retry loop with 'break' break except CommandFailedException as exc: # In case we requested synchronisation with a last WAL info, # we try again requesting the full current status, but only if # exit code is 1. A different exit code means that # the error is not from Barman (i.e. ssh failure) if exc.args[0]["ret"] == 1 and last_wal is not None: last_wal = None last_position = None output.warning( "sync-info is out of sync. " "Self-recovery procedure started: " "requesting full synchronisation from " "primary server %s" % self.config.name ) continue # Wrap the CommandFailed exception with a SyncError # for custom message and logging. raise SyncError( "sync-info execution on remote " "primary server %s failed: %s" % (self.config.name, exc.args[0]["err"]) ) # Save the result on disk primary_info_file = os.path.join( self.config.backup_directory, PRIMARY_INFO_FILE ) # parse the json output remote_info = json.loads(remote_command.out) try: # TODO: rename the method to make it public # noinspection PyProtectedMember self._make_directories() # Save remote info to disk # We do not use a LockFile here. Instead we write all data # in a new file (adding '.tmp' extension) then we rename it # replacing the old one. # It works while the renaming is an atomic operation # (this is a POSIX requirement) primary_info_file_tmp = primary_info_file + ".tmp" with open(primary_info_file_tmp, "w") as info_file: info_file.write(remote_command.out) os.rename(primary_info_file_tmp, primary_info_file) except (OSError, IOError) as e: # Wrap file access exceptions using SyncError raise SyncError( "Cannot open %s file for server %s: %s" % (PRIMARY_INFO_FILE, self.config.name, e) ) return remote_info def is_backup_locally_obsolete(self, backup_name, remote_backups): """ Check if a remote backup is obsolete according with the local retention policies. :param barman.server.Server self: Server object :param str backup_name: str name of the backup to sync :param dict remote_backups: dict containing the Primary node status :return bool: returns if the backup is obsolete or not """ # Get the local backups and add the remote backup info. This will # simulate the situation after the copy of the remote backup. local_backups = self.get_available_backups(BackupInfo.STATUS_NOT_EMPTY) backup = remote_backups[backup_name] local_backups[backup_name] = LocalBackupInfo.from_json(self, backup) # Execute the local retention policy on the modified list of backups report = self.config.retention_policy.report(source=local_backups) # If the added backup is obsolete return true. return report[backup_name] == BackupInfo.OBSOLETE def sync_backup(self, backup_name): """ Method for the synchronisation of a backup from a primary server. The Method checks that the server is passive, then if it is possible to sync with the Primary. Acquires a lock at backup level and copy the backup from the Primary node using rsync. During the sync process the backup on the Passive node is marked as SYNCING and if the sync fails (due to network failure, user interruption...) it is marked as FAILED. :param barman.server.Server self: the passive Server object to sync :param str backup_name: the name of the backup to sync. """ _logger.debug("sync_backup(%s, %s)", self.config.name, backup_name) if not self.passive_node: raise SyncError("server %s is not passive" % self.config.name) local_backup_info = self.get_backup(backup_name) # Step 1. Parse data from Primary server. _logger.info( "Synchronising with server %s backup %s: step 1/3: " "parse server information", self.config.name, backup_name, ) try: primary_info = self.load_primary_info() self.check_sync_required(backup_name, primary_info, local_backup_info) except SyncError as e: # Invocation error: exit with return code 1 output.error("%s", e) return except SyncToBeDeleted as e: # The required backup does not exist on primary, # therefore it should be deleted also on passive node, # as it's not in DONE status. output.warning("%s, purging local backup", e) self.delete_backup(local_backup_info) return except SyncNothingToDo as e: # Nothing to do. Log as info level and exit output.info("%s", e) return # If the backup is present on Primary node, and is not present at all # locally or is present with FAILED status, execute sync. # Retrieve info about the backup from PRIMARY_INFO_FILE remote_backup_info = primary_info["backups"][backup_name] remote_backup_dir = primary_info["config"]["basebackups_directory"] # Try to acquire the backup lock, if the lock is not available abort # the copy. try: with ServerBackupSyncLock( self.config.barman_lock_directory, self.config.name, backup_name ): try: backup_manager = self.backup_manager # Build a BackupInfo object local_backup_info = LocalBackupInfo.from_json( self, remote_backup_info ) local_backup_info.set_attribute("status", BackupInfo.SYNCING) local_backup_info.save() backup_manager.backup_cache_add(local_backup_info) # Activate incremental copy if requested # Calculate the safe_horizon as the start time of the older # backup involved in the copy # NOTE: safe_horizon is a tz-aware timestamp because # BackupInfo class ensures that property reuse_mode = self.config.reuse_backup safe_horizon = None reuse_dir = None if reuse_mode: prev_backup = backup_manager.get_previous_backup(backup_name) next_backup = backup_manager.get_next_backup(backup_name) # If a newer backup is present, using it is preferable # because that backup will remain valid longer if next_backup: safe_horizon = local_backup_info.begin_time reuse_dir = next_backup.get_basebackup_directory() elif prev_backup: safe_horizon = prev_backup.begin_time reuse_dir = prev_backup.get_basebackup_directory() else: reuse_mode = None # Try to copy from the Primary node the backup using # the copy controller. copy_controller = RsyncCopyController( ssh_command=self.config.primary_ssh_command, network_compression=self.config.network_compression, path=self.path, reuse_backup=reuse_mode, safe_horizon=safe_horizon, retry_times=self.config.basebackup_retry_times, retry_sleep=self.config.basebackup_retry_sleep, workers=self.config.parallel_jobs, ) copy_controller.add_directory( "basebackup", ":%s/%s/" % (remote_backup_dir, backup_name), local_backup_info.get_basebackup_directory(), exclude_and_protect=["/backup.info", "/.backup.lock"], bwlimit=self.config.bandwidth_limit, reuse=reuse_dir, item_class=RsyncCopyController.PGDATA_CLASS, ) _logger.info( "Synchronising with server %s backup %s: step 2/3: " "file copy", self.config.name, backup_name, ) copy_controller.copy() # Save the backup state and exit _logger.info( "Synchronising with server %s backup %s: " "step 3/3: finalise sync", self.config.name, backup_name, ) local_backup_info.set_attribute("status", BackupInfo.DONE) local_backup_info.save() except CommandFailedException as e: # Report rsync errors msg = "failure syncing server %s backup %s: %s" % ( self.config.name, backup_name, e, ) output.error(msg) # Set the BackupInfo status to FAILED local_backup_info.set_attribute("status", BackupInfo.FAILED) local_backup_info.set_attribute("error", msg) local_backup_info.save() return # Catch KeyboardInterrupt (Ctrl+c) and all the exceptions except BaseException as e: msg_lines = force_str(e).strip().splitlines() if local_backup_info: # Use only the first line of exception message # in local_backup_info error field local_backup_info.set_attribute("status", BackupInfo.FAILED) # If the exception has no attached message # use the raw type name if not msg_lines: msg_lines = [type(e).__name__] local_backup_info.set_attribute( "error", "failure syncing server %s backup %s: %s" % (self.config.name, backup_name, msg_lines[0]), ) local_backup_info.save() output.error( "Backup failed syncing with %s: %s\n%s", self.config.name, msg_lines[0], "\n".join(msg_lines[1:]), ) except LockFileException: output.error( "Another synchronisation process for backup %s " "of server %s is already running.", backup_name, self.config.name, ) def sync_wals(self): """ Method for the synchronisation of WAL files on the passive node, by copying them from the primary server. The method checks if the server is passive, then tries to acquire a sync-wal lock. Recovers the id of the last locally archived WAL file from the status file ($wals_directory/sync-wals.info). Reads the primary.info file and parses it, then obtains the list of WAL files that have not yet been synchronised with the master. Rsync is used for file synchronisation with the primary server. Once the copy is finished, acquires a lock on xlog.db, updates it then releases the lock. Before exiting, the method updates the last_wal and last_position fields in the sync-wals.info file. :param barman.server.Server self: the Server object to synchronise """ _logger.debug("sync_wals(%s)", self.config.name) if not self.passive_node: raise SyncError("server %s is not passive" % self.config.name) # Try to acquire the sync-wal lock if the lock is not available, # abort the sync-wal operation try: with ServerWalSyncLock( self.config.barman_lock_directory, self.config.name, ): try: # Need to load data from status files: primary.info # and sync-wals.info sync_wals_info = self.load_sync_wals_info() primary_info = self.load_primary_info() # We want to exit if the compression on master is different # from the one on the local server if primary_info["config"]["compression"] != self.config.compression: raise SyncError( "Compression method on server %s " "(%s) does not match local " "compression method (%s) " % ( self.config.name, primary_info["config"]["compression"], self.config.compression, ) ) # If the first WAL that needs to be copied is older # than the begin WAL of the first locally available backup, # synchronisation is skipped. This means that we need # to copy a WAL file which won't be associated to any local # backup. Consider the following scenarios: # # bw: indicates the begin WAL of the first backup # sw: the first WAL to be sync-ed # # The following examples use truncated names for WAL files # (e.g. 1 instead of 000000010000000000000001) # # Case 1: bw = 10, sw = 9 - SKIP and wait for backup # Case 2: bw = 10, sw = 10 - SYNC # Case 3: bw = 10, sw = 15 - SYNC # # Search for the first WAL file (skip history, # backup and partial files) first_remote_wal = None for wal in primary_info["wals"]: if xlog.is_wal_file(wal["name"]): first_remote_wal = wal["name"] break first_backup_id = self.get_first_backup_id() first_backup = ( self.get_backup(first_backup_id) if first_backup_id else None ) # Also if there are not any backups on the local server # no wal synchronisation is required if not first_backup: output.warning( "No base backup for server %s" % self.config.name ) return if first_backup.begin_wal > first_remote_wal: output.warning( "Skipping WAL synchronisation for " "server %s: no available local backup " "for %s" % (self.config.name, first_remote_wal) ) return local_wals = [] wal_file_paths = [] for wal in primary_info["wals"]: # filter all the WALs that are smaller # or equal to the name of the latest synchronised WAL if ( sync_wals_info.last_wal and wal["name"] <= sync_wals_info.last_wal ): continue # Generate WalFileInfo Objects using remote WAL metas. # This list will be used for the update of the xlog.db wal_info_file = WalFileInfo(**wal) local_wals.append(wal_info_file) wal_file_paths.append(wal_info_file.relpath()) # Rsync Options: # recursive: recursive copy of subdirectories # perms: preserve permissions on synced files # times: preserve modification timestamps during # synchronisation # protect-args: force rsync to preserve the integrity of # rsync command arguments and filename. # inplace: for inplace file substitution # and update of files rsync = Rsync( args=[ "--recursive", "--perms", "--times", "--protect-args", "--inplace", ], ssh=self.config.primary_ssh_command, bwlimit=self.config.bandwidth_limit, allowed_retval=(0,), network_compression=self.config.network_compression, path=self.path, ) # Source and destination of the rsync operations src = ":%s/" % primary_info["config"]["wals_directory"] dest = "%s/" % self.config.wals_directory # Perform the rsync copy using the list of relative paths # obtained from the primary.info file rsync.from_file_list(wal_file_paths, src, dest) # If everything is synced without errors, # update xlog.db using the list of WalFileInfo object with self.xlogdb("a") as fxlogdb: for wal_info in local_wals: fxlogdb.write(wal_info.to_xlogdb_line()) # We need to update the sync-wals.info file with the latest # synchronised WAL and the latest read position. self.write_sync_wals_info_file(primary_info) except CommandFailedException as e: msg = "WAL synchronisation for server %s failed: %s" % ( self.config.name, e, ) output.error(msg) return except BaseException as e: msg_lines = force_str(e).strip().splitlines() # Use only the first line of exception message # If the exception has no attached message # use the raw type name if not msg_lines: msg_lines = [type(e).__name__] output.error( "WAL synchronisation for server %s failed with: %s\n%s", self.config.name, msg_lines[0], "\n".join(msg_lines[1:]), ) except LockFileException: output.error( "Another sync-wal operation is running for server %s ", self.config.name, ) @staticmethod def set_sync_starting_point(xlogdb_file, last_wal, last_position): """ Check if the xlog.db file has changed between two requests from the client and set the start point for reading the file :param file xlogdb_file: an open and readable xlog.db file object :param str|None last_wal: last read name :param int|None last_position: last read position :return int: the position has been set """ # If last_position is None start reading from the beginning of the file position = int(last_position) if last_position is not None else 0 # Seek to required position xlogdb_file.seek(position) # Read 24 char (the size of a wal name) wal_name = xlogdb_file.read(24) # If the WAL name is the requested one start from last_position if wal_name == last_wal: # Return to the line start xlogdb_file.seek(position) return position # If the file has been truncated, start over xlogdb_file.seek(0) return 0 def write_sync_wals_info_file(self, primary_info): """ Write the content of SYNC_WALS_INFO_FILE on disk :param dict primary_info: """ try: with open( os.path.join(self.config.wals_directory, SYNC_WALS_INFO_FILE), "w" ) as syncfile: syncfile.write( "%s\t%s" % (primary_info["last_name"], primary_info["last_position"]) ) except (OSError, IOError): # Wrap file access exceptions using SyncError raise SyncError( "Unable to write %s file for server %s" % (SYNC_WALS_INFO_FILE, self.config.name) ) def load_primary_info(self): """ Load the content of PRIMARY_INFO_FILE for the given server :return dict: primary server information """ primary_info_file = os.path.join( self.config.backup_directory, PRIMARY_INFO_FILE ) try: with open(primary_info_file) as f: return json.load(f) except (OSError, IOError) as e: # Wrap file access exceptions using SyncError raise SyncError( "Cannot open %s file for server %s: %s" % (PRIMARY_INFO_FILE, self.config.name, e) ) barman-2.18/barman/cloud_providers/0000755000621200062120000000000014172556766015546 5ustar 00000000000000barman-2.18/barman/cloud_providers/azure_blob_storage.py0000644000621200062120000004564114172556763021777 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see import logging import os import requests from io import BytesIO, RawIOBase, SEEK_END from barman.clients.cloud_compression import decompress_to_file from barman.cloud import CloudInterface, CloudProviderError, DecompressingStreamingIO try: # Python 3.x from urllib.parse import urlparse except ImportError: # Python 2.x from urlparse import urlparse try: from azure.storage.blob import ( BlobPrefix, ContainerClient, PartialBatchErrorException, ) from azure.core.exceptions import ( HttpResponseError, ResourceNotFoundError, ServiceRequestError, ) except ImportError: raise SystemExit("Missing required python module: azure-storage-blob") # Domain for azure blob URIs # See https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#resource-uri-syntax AZURE_BLOB_STORAGE_DOMAIN = "blob.core.windows.net" class StreamingBlobIO(RawIOBase): """ Wrap an azure-storage-blob StorageStreamDownloader in the IOBase API. Inherits the IOBase defaults of seekable() -> False and writable() -> False. """ def __init__(self, blob): self._chunks = blob.chunks() self._current_chunk = BytesIO() def readable(self): return True def read(self, n=1): """ Read at most n bytes from the stream. Fetches new chunks from the StorageStreamDownloader until the requested number of bytes have been read. :param int n: Number of bytes to read from the stream :return: Up to n bytes from the stream :rtype: bytes """ n = None if n < 0 else n blob_bytes = self._current_chunk.read(n) bytes_count = len(blob_bytes) try: while bytes_count < n: self._current_chunk = BytesIO(self._chunks.next()) new_blob_bytes = self._current_chunk.read(n - bytes_count) bytes_count += len(new_blob_bytes) blob_bytes += new_blob_bytes except StopIteration: pass return blob_bytes class AzureCloudInterface(CloudInterface): # Azure block blob limitations # https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs MAX_CHUNKS_PER_FILE = 50000 # Minimum block size allowed in Azure Blob Storage is 64KB MIN_CHUNK_SIZE = 64 << 10 # Azure Blob Storage permit a maximum of 4.75TB per file # This is a hard limit, while our upload procedure can go over the specified # MAX_ARCHIVE_SIZE - so we set a maximum of 1TB per file MAX_ARCHIVE_SIZE = 1 << 40 # The size of each chunk in a single object upload when the size of the # object exceeds max_single_put_size. We default to 2MB in order to # allow the default max_concurrency of 8 to be achieved when uploading # uncompressed WAL segments of the default 16MB size. DEFAULT_MAX_BLOCK_SIZE = 2 << 20 # The maximum amount of concurrent chunks allowed in a single object upload # where the size exceeds max_single_put_size. We default to 8 based on # experiments with in-region and inter-region transfers within Azure. DEFAULT_MAX_CONCURRENCY = 8 # The largest file size which will be uploaded in a single PUT request. This # should be lower than the size of the compressed WAL segment in order to # force the Azure client to use concurrent chunk upload for archiving WAL files. DEFAULT_MAX_SINGLE_PUT_SIZE = 4 << 20 # The maximum size of the requests connection pool used by the Azure client # to upload objects. REQUESTS_POOL_MAXSIZE = 32 def __init__( self, url, jobs=2, encryption_scope=None, credential=None, tags=None, max_block_size=DEFAULT_MAX_BLOCK_SIZE, max_concurrency=DEFAULT_MAX_CONCURRENCY, max_single_put_size=DEFAULT_MAX_SINGLE_PUT_SIZE, ): """ Create a new Azure Blob Storage interface given the supplied acccount url :param str url: Full URL of the cloud destination/source :param int jobs: How many sub-processes to use for asynchronous uploading, defaults to 2. """ super(AzureCloudInterface, self).__init__( url=url, jobs=jobs, tags=tags, ) self.encryption_scope = encryption_scope self.credential = credential self.max_block_size = max_block_size self.max_concurrency = max_concurrency self.max_single_put_size = max_single_put_size parsed_url = urlparse(url) if parsed_url.netloc.endswith(AZURE_BLOB_STORAGE_DOMAIN): # We have an Azure Storage URI so we use the following form: # ://..core.windows.net/ # where is /. # Note that although Azure supports an implicit root container, we require # that the container is always included. self.account_url = parsed_url.netloc try: self.bucket_name = parsed_url.path.split("/")[1] except IndexError: raise ValueError("azure blob storage URL %s is malformed" % url) path = parsed_url.path.split("/")[2:] else: # We are dealing with emulated storage so we use the following form: # http://:// logging.info("Using emulated storage URL: %s " % url) if "AZURE_STORAGE_CONNECTION_STRING" not in os.environ: raise ValueError( "A connection string must be provided when using emulated storage" ) try: self.bucket_name = parsed_url.path.split("/")[2] except IndexError: raise ValueError("emulated storage URL %s is malformed" % url) path = parsed_url.path.split("/")[3:] self.path = "/".join(path) self.bucket_exists = None self._reinit_session() def _reinit_session(self): """ Create a new session """ if self.credential: # Any supplied credential takes precedence over the environment credential = self.credential elif "AZURE_STORAGE_CONNECTION_STRING" in os.environ: logging.info("Authenticating to Azure with connection string") self.container_client = ContainerClient.from_connection_string( conn_str=os.getenv("AZURE_STORAGE_CONNECTION_STRING"), container_name=self.bucket_name, ) return else: if "AZURE_STORAGE_SAS_TOKEN" in os.environ: logging.info("Authenticating to Azure with SAS token") credential = os.getenv("AZURE_STORAGE_SAS_TOKEN") elif "AZURE_STORAGE_KEY" in os.environ: logging.info("Authenticating to Azure with shared key") credential = os.getenv("AZURE_STORAGE_KEY") else: logging.info("Authenticating to Azure with default credentials") # azure-identity is not part of azure-storage-blob so only import # it if needed try: from azure.identity import DefaultAzureCredential except ImportError: raise SystemExit("Missing required python module: azure-identity") credential = DefaultAzureCredential() session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_maxsize=self.REQUESTS_POOL_MAXSIZE) session.mount("https://", adapter) self.container_client = ContainerClient( account_url=self.account_url, container_name=self.bucket_name, credential=credential, max_single_put_size=self.max_single_put_size, max_block_size=self.max_block_size, session=session, ) @property def _extra_upload_args(self): optional_args = {} if self.encryption_scope: optional_args["encryption_scope"] = self.encryption_scope return optional_args def test_connectivity(self): """ Test Azure connectivity by trying to access a container """ try: # We are not even interested in the existence of the bucket, # we just want to see if Azure blob service is reachable. self.bucket_exists = self._check_bucket_existence() return True except (HttpResponseError, ServiceRequestError) as exc: logging.error("Can't connect to cloud provider: %s", exc) return False def _check_bucket_existence(self): """ Chck Azure Blob Storage for the target container Although there is an `exists` function it cannot be called by container-level shared access tokens. We therefore check for existence by calling list_blobs on the container. :return: True if the container exists, False otherwise :rtype: bool """ try: self.container_client.list_blobs().next() except ResourceNotFoundError: return False except StopIteration: # The bucket is empty but it does exist pass return True def _create_bucket(self): """ Create the container in cloud storage """ # By default public access is disabled for newly created containers. # Unlike S3 there is no concept of regions for containers (this is at # the storage account level in Azure) self.container_client.create_container() def _walk_blob_tree(self, obj, ignore=None): """ Walk a blob tree in a directory manner and return a list of directories and files. :param ItemPaged[BlobProperties] obj: Iterable response of BlobProperties obtained from ContainerClient.walk_blobs :param str|None ignore: An entry to be excluded from the returned list, typically the top level prefix :return: List of objects and directories in the tree :rtype: List[str] """ if obj.name != ignore: yield obj.name if isinstance(obj, BlobPrefix): # We are a prefix and not a leaf so iterate children for child in obj: for v in self._walk_blob_tree(child): yield v def list_bucket(self, prefix="", delimiter="/"): """ List bucket content in a directory manner :param str prefix: :param str delimiter: :return: List of objects and dirs right under the prefix :rtype: List[str] """ res = self.container_client.walk_blobs( name_starts_with=prefix, delimiter=delimiter ) return self._walk_blob_tree(res, ignore=prefix) def download_file(self, key, dest_path, decompress=None): """ Download a file from Azure Blob Storage :param str key: The key to download :param str dest_path: Where to put the destination file :param str|None decompress: Compression scheme to use for decompression """ obj = self.container_client.download_blob(key) with open(dest_path, "wb") as dest_file: if not decompress: obj.download_to_stream(dest_file) return blob = StreamingBlobIO(obj) decompress_to_file(blob, dest_file, decompress) def remote_open(self, key, decompressor=None): """ Open a remote Azure Blob Storage object and return a readable stream :param str key: The key identifying the object to open :param barman.clients.cloud_compression.ChunkedCompressor decompressor: A ChunkedCompressor object which will be used to decompress chunks of bytes as they are read from the stream :return: A file-like object from which the stream can be read or None if the key does not exist """ try: obj = self.container_client.download_blob(key) resp = StreamingBlobIO(obj) if decompressor: return DecompressingStreamingIO(resp, decompressor) else: return resp except ResourceNotFoundError: return None def upload_fileobj( self, fileobj, key, override_tags=None, ): """ Synchronously upload the content of a file-like object to a cloud key :param fileobj IOBase: File-like object to upload :param str key: The key to identify the uploaded object :param List[tuple] override_tags: List of tags as k,v tuples to be added to the uploaded object """ # Find length of the file so we can pass it to the Azure client fileobj.seek(0, SEEK_END) length = fileobj.tell() fileobj.seek(0) extra_args = self._extra_upload_args.copy() tags = override_tags or self.tags if tags is not None: extra_args["tags"] = dict(tags) self.container_client.upload_blob( name=key, data=fileobj, overwrite=True, length=length, max_concurrency=self.max_concurrency, **extra_args ) def create_multipart_upload(self, key): """No-op method because Azure has no concept of multipart uploads Instead of multipart upload, blob blocks are staged and then committed. However this does not require anything to be created up front. This method therefore does nothing. """ pass def _upload_part(self, upload_metadata, key, body, part_number): """ Upload a single block of this block blob. Uses the supplied part number to generate the block ID and returns it as the "PartNumber" in the part metadata. :param dict upload_metadata: Provider-specific metadata about the upload (not used in Azure) :param str key: The key to use in the cloud service :param object body: A stream-like object to upload :param int part_number: Part number, starting from 1 :return: The part metadata :rtype: dict[str, None|str] """ # Block IDs must be the same length for all bocks in the blob # and no greater than 64 characters. Given there is a limit of # 50000 blocks per blob we zero-pad the part_number to five # places. block_id = str(part_number).zfill(5) blob_client = self.container_client.get_blob_client(key) blob_client.stage_block(block_id, body, **self._extra_upload_args) return {"PartNumber": block_id} def _complete_multipart_upload(self, upload_metadata, key, parts): """ Finish a "multipart upload" by committing all blocks in the blob. :param dict upload_metadata: Provider-specific metadata about the upload (not used in Azure) :param str key: The key to use in the cloud service :param parts: The list of block IDs for the blocks which compose this blob """ blob_client = self.container_client.get_blob_client(key) block_list = [part["PartNumber"] for part in parts] extra_args = self._extra_upload_args.copy() if self.tags is not None: extra_args["tags"] = dict(self.tags) blob_client.commit_block_list(block_list, **extra_args) def _abort_multipart_upload(self, upload_metadata, key): """ Abort the upload of a block blob The objective of this method is to clean up any dangling resources - in this case those resources are uncommitted blocks. :param dict upload_metadata: Provider-specific metadata about the upload (not used in Azure) :param str key: The key to use in the cloud service """ # Ideally we would clean up uncommitted blocks at this point # however there is no way of doing that. # Uncommitted blocks will be discarded after 7 days or when # the blob is committed (if they're not included in the commit). # We therefore create an empty blob (thereby discarding all uploaded # blocks for that blob) and then delete it. blob_client = self.container_client.get_blob_client(key) blob_client.commit_block_list([], **self._extra_upload_args) blob_client.delete_blob() def delete_objects(self, paths): """ Delete the objects at the specified paths :param List[str] paths: """ try: # If paths is empty because the files have already been deleted then # delete_blobs will return successfully so we just call it with whatever # we were given responses = self.container_client.delete_blobs(*paths) except PartialBatchErrorException as exc: # Although the docs imply any errors will be returned in the response # object, in practice a PartialBatchErrorException is raised which contains # the response objects in its `parts` attribute. # We therefore set responses to reference the response in the exception and # treat it the same way we would a regular response. logging.warning( "PartialBatchErrorException received from Azure: %s" % exc.message ) responses = exc.parts # resp is an iterator of HttpResponse objects so we check the status codes # which should all be 202 if successful errors = False for resp in responses: if resp.status_code == 404: logging.warning( "Deletion of object %s failed because it could not be found" % resp.request.url ) elif resp.status_code != 202: errors = True logging.error( 'Deletion of object %s failed with error code: "%s"' % (resp.request.url, resp.status_code) ) if errors: raise CloudProviderError( "Error from cloud provider while deleting objects - " "please check the Barman logs" ) barman-2.18/barman/cloud_providers/__init__.py0000644000621200062120000000713314172556763017660 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see from barman.exceptions import BarmanException class CloudProviderUnsupported(BarmanException): """ Exception raised when an unsupported cloud provider is requested """ class CloudProviderOptionUnsupported(BarmanException): """ Exception raised when a supported cloud provider is given an unsupported option """ def _update_kwargs(kwargs, config, args): """ Helper which adds the attributes of config specified in args to the supplied kwargs dict if they exist. """ for arg in args: if arg in config: kwargs[arg] = getattr(config, arg) def _make_s3_cloud_interface(config, cloud_interface_kwargs): from barman.cloud_providers.aws_s3 import S3CloudInterface cloud_interface_kwargs.update( { "profile_name": config.profile, "endpoint_url": config.endpoint_url, } ) if "encryption" in config: cloud_interface_kwargs["encryption"] = config.encryption return S3CloudInterface(**cloud_interface_kwargs) def _make_azure_cloud_interface(config, cloud_interface_kwargs): from barman.cloud_providers.azure_blob_storage import AzureCloudInterface _update_kwargs( cloud_interface_kwargs, config, ( "encryption_scope", "max_block_size", "max_concurrency", "max_single_put_size", ), ) if "credential" in config and config.credential is not None: try: from azure.identity import AzureCliCredential, ManagedIdentityCredential except ImportError: raise SystemExit("Missing required python module: azure-identity") supported_credentials = { "azure-cli": AzureCliCredential, "managed-identity": ManagedIdentityCredential, } try: cloud_interface_kwargs["credential"] = supported_credentials[ config.credential ]() except KeyError: raise CloudProviderOptionUnsupported( "Unsupported credential: %s" % config.credential ) return AzureCloudInterface(**cloud_interface_kwargs) def get_cloud_interface(config): """ Create a CloudInterface for the specified cloud_provider :returns: A CloudInterface for the specified cloud_provider :rtype: CloudInterface """ cloud_interface_kwargs = { "url": config.source_url if "source_url" in config else config.destination_url } _update_kwargs(cloud_interface_kwargs, config, ("jobs", "tags")) if config.cloud_provider == "aws-s3": return _make_s3_cloud_interface(config, cloud_interface_kwargs) elif config.cloud_provider == "azure-blob-storage": return _make_azure_cloud_interface(config, cloud_interface_kwargs) else: raise CloudProviderUnsupported( "Unsupported cloud provider: %s" % config.cloud_provider ) barman-2.18/barman/cloud_providers/aws_s3.py0000644000621200062120000003346014172556763017322 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see import logging import shutil from io import RawIOBase from barman.clients.cloud_compression import decompress_to_file from barman.cloud import CloudInterface, CloudProviderError, DecompressingStreamingIO try: # Python 3.x from urllib.parse import urlencode, urlparse except ImportError: # Python 2.x from urlparse import urlparse from urllib import urlencode try: import boto3 from botocore.exceptions import ClientError, EndpointConnectionError except ImportError: raise SystemExit("Missing required python module: boto3") class StreamingBodyIO(RawIOBase): """ Wrap a boto StreamingBody in the IOBase API. """ def __init__(self, body): self.body = body def readable(self): return True def read(self, n=-1): n = None if n < 0 else n return self.body.read(n) class S3CloudInterface(CloudInterface): # S3 multipart upload limitations # http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html MAX_CHUNKS_PER_FILE = 10000 MIN_CHUNK_SIZE = 5 << 20 # S3 permit a maximum of 5TB per file # https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html # This is a hard limit, while our upload procedure can go over the specified # MAX_ARCHIVE_SIZE - so we set a maximum of 1TB per file MAX_ARCHIVE_SIZE = 1 << 40 def __getstate__(self): state = self.__dict__.copy() # Remove boto3 client reference from the state as it cannot be pickled # in Python >= 3.8 and multiprocessing will pickle the object when the # worker processes are created. # The worker processes create their own boto3 sessions so do not need # the boto3 session from the parent process. del state["s3"] return state def __setstate__(self, state): self.__dict__.update(state) def __init__( self, url, encryption=None, jobs=2, profile_name=None, endpoint_url=None, tags=None, ): """ Create a new S3 interface given the S3 destination url and the profile name :param str url: Full URL of the cloud destination/source :param str|None encryption: Encryption type string :param int jobs: How many sub-processes to use for asynchronous uploading, defaults to 2. :param str profile_name: Amazon auth profile identifier :param str endpoint_url: override default endpoint detection strategy with this one """ super(S3CloudInterface, self).__init__( url=url, jobs=jobs, tags=tags, ) self.profile_name = profile_name self.encryption = encryption self.endpoint_url = endpoint_url # Extract information from the destination URL parsed_url = urlparse(url) # If netloc is not present, the s3 url is badly formatted. if parsed_url.netloc == "" or parsed_url.scheme != "s3": raise ValueError("Invalid s3 URL address: %s" % url) self.bucket_name = parsed_url.netloc self.bucket_exists = None self.path = parsed_url.path.lstrip("/") # Build a session, so we can extract the correct resource self._reinit_session() def _reinit_session(self): """ Create a new session """ session = boto3.Session(profile_name=self.profile_name) self.s3 = session.resource("s3", endpoint_url=self.endpoint_url) @property def _extra_upload_args(self): """ Return a dict containing ExtraArgs to be passed to certain boto3 calls Because some boto3 calls accept `ExtraArgs: {}` and others do not, we return a nexted dict which can be expanded with `**` in the boto3 call. """ additional_args = {} if self.encryption: additional_args["ServerSideEncryption"] = self.encryption return additional_args def test_connectivity(self): """ Test AWS connectivity by trying to access a bucket """ try: # We are not even interested in the existence of the bucket, # we just want to try if aws is reachable self.bucket_exists = self._check_bucket_existence() return True except EndpointConnectionError as exc: logging.error("Can't connect to cloud provider: %s", exc) return False def _check_bucket_existence(self): """ Check cloud storage for the target bucket :return: True if the bucket exists, False otherwise :rtype: bool """ try: # Search the bucket on s3 self.s3.meta.client.head_bucket(Bucket=self.bucket_name) return True except ClientError as exc: # If a client error is thrown, then check the error code. # If code was 404, then the bucket does not exist error_code = exc.response["Error"]["Code"] if error_code == "404": return False # Otherwise there is nothing else to do than re-raise the original # exception raise def _create_bucket(self): """ Create the bucket in cloud storage """ # Get the current region from client. # Do not use session.region_name here because it may be None region = self.s3.meta.client.meta.region_name logging.info( "Bucket '%s' does not exist, creating it on region '%s'", self.bucket_name, region, ) create_bucket_config = { "ACL": "private", } # The location constraint is required during bucket creation # for all regions outside of us-east-1. This constraint cannot # be specified in us-east-1; specifying it in this region # results in a failure, so we will only # add it if we are deploying outside of us-east-1. # See https://github.com/boto/boto3/issues/125 if region != "us-east-1": create_bucket_config["CreateBucketConfiguration"] = { "LocationConstraint": region, } self.s3.Bucket(self.bucket_name).create(**create_bucket_config) def list_bucket(self, prefix="", delimiter="/"): """ List bucket content in a directory manner :param str prefix: :param str delimiter: :return: List of objects and dirs right under the prefix :rtype: List[str] """ if prefix.startswith(delimiter): prefix = prefix.lstrip(delimiter) res = self.s3.meta.client.list_objects_v2( Bucket=self.bucket_name, Prefix=prefix, Delimiter=delimiter ) # List "folders" keys = res.get("CommonPrefixes") if keys is not None: for k in keys: yield k.get("Prefix") # List "files" objects = res.get("Contents") if objects is not None: for o in objects: yield o.get("Key") def download_file(self, key, dest_path, decompress): """ Download a file from S3 :param str key: The S3 key to download :param str dest_path: Where to put the destination file :param bool decompress: Whenever to decompress this file or not """ # Open the remote file obj = self.s3.Object(self.bucket_name, key) remote_file = obj.get()["Body"] # Write the dest file in binary mode with open(dest_path, "wb") as dest_file: # If the file is not compressed, just copy its content if not decompress: shutil.copyfileobj(remote_file, dest_file) return decompress_to_file(remote_file, dest_file, decompress) def remote_open(self, key, decompressor=None): """ Open a remote S3 object and returns a readable stream :param str key: The key identifying the object to open :param barman.clients.cloud_compression.ChunkedCompressor decompressor: A ChunkedCompressor object which will be used to decompress chunks of bytes as they are read from the stream :return: A file-like object from which the stream can be read or None if the key does not exist """ try: obj = self.s3.Object(self.bucket_name, key) resp = StreamingBodyIO(obj.get()["Body"]) if decompressor: return DecompressingStreamingIO(resp, decompressor) else: return resp except ClientError as exc: error_code = exc.response["Error"]["Code"] if error_code == "NoSuchKey": return None else: raise def upload_fileobj(self, fileobj, key, override_tags=None): """ Synchronously upload the content of a file-like object to a cloud key :param fileobj IOBase: File-like object to upload :param str key: The key to identify the uploaded object :param List[tuple] override_tags: List of k,v tuples which should override any tags already defined in the cloud interface """ extra_args = self._extra_upload_args.copy() tags = override_tags or self.tags if tags is not None: extra_args["Tagging"] = urlencode(tags) self.s3.meta.client.upload_fileobj( Fileobj=fileobj, Bucket=self.bucket_name, Key=key, ExtraArgs=extra_args ) def create_multipart_upload(self, key): """ Create a new multipart upload :param key: The key to use in the cloud service :return: The multipart upload handle :rtype: dict[str, str] """ extra_args = self._extra_upload_args.copy() if self.tags is not None: extra_args["Tagging"] = urlencode(self.tags) return self.s3.meta.client.create_multipart_upload( Bucket=self.bucket_name, Key=key, **extra_args ) def _upload_part(self, upload_metadata, key, body, part_number): """ Upload a part into this multipart upload :param dict upload_metadata: The multipart upload handle :param str key: The key to use in the cloud service :param object body: A stream-like object to upload :param int part_number: Part number, starting from 1 :return: The part handle :rtype: dict[str, None|str] """ part = self.s3.meta.client.upload_part( Body=body, Bucket=self.bucket_name, Key=key, UploadId=upload_metadata["UploadId"], PartNumber=part_number, ) return { "PartNumber": part_number, "ETag": part["ETag"], } def _complete_multipart_upload(self, upload_metadata, key, parts): """ Finish a certain multipart upload :param dict upload_metadata: The multipart upload handle :param str key: The key to use in the cloud service :param parts: The list of parts composing the multipart upload """ self.s3.meta.client.complete_multipart_upload( Bucket=self.bucket_name, Key=key, UploadId=upload_metadata["UploadId"], MultipartUpload={"Parts": parts}, ) def _abort_multipart_upload(self, upload_metadata, key): """ Abort a certain multipart upload :param dict upload_metadata: The multipart upload handle :param str key: The key to use in the cloud service """ self.s3.meta.client.abort_multipart_upload( Bucket=self.bucket_name, Key=key, UploadId=upload_metadata["UploadId"] ) def delete_objects(self, paths): """ Delete the objects at the specified paths :param List[str] paths: """ # Explicitly check if we are being asked to delete nothing at all and if # so return without error. if len(paths) == 0: return # S3 bulk deletion is limited to batches of 1000 keys batch_size = 1000 try: # If xrange exists then we are on python 2 so we need to use it range_fun = xrange except NameError: # Otherwise just use range range_fun = range errors = False for i in range_fun(0, len(paths), batch_size): resp = self.s3.meta.client.delete_objects( Bucket=self.bucket_name, Delete={ "Objects": [{"Key": path} for path in paths[i : i + batch_size]], "Quiet": True, }, ) if "Errors" in resp: errors = True for error_dict in resp["Errors"]: logging.error( 'Deletion of object %s failed with error code: "%s", message: "%s"' % (error_dict["Key"], error_dict["Code"], error_dict["Message"]) ) if errors: raise CloudProviderError( "Error from cloud provider while deleting objects - " "please check the Barman logs" ) barman-2.18/barman/diagnose.py0000644000621200062120000000672614172556763014516 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module represents the barman diagnostic tool. """ import datetime import json import logging import barman from barman import fs, output from barman.backup import BackupInfo from barman.exceptions import CommandFailedException, FsOperationFailed from barman.utils import BarmanEncoder _logger = logging.getLogger(__name__) def exec_diagnose(servers, errors_list): """ Diagnostic command: gathers information from backup server and from all the configured servers. Gathered information should be used for support and problems detection :param dict(str,barman.server.Server) servers: list of configured servers :param list errors_list: list of global errors """ # global section. info about barman server diagnosis = {"global": {}, "servers": {}} # barman global config diagnosis["global"]["config"] = dict(barman.__config__._global_config) diagnosis["global"]["config"]["errors_list"] = errors_list try: command = fs.UnixLocalCommand() # basic system info diagnosis["global"]["system_info"] = command.get_system_info() except CommandFailedException as e: diagnosis["global"]["system_info"] = {"error": repr(e)} diagnosis["global"]["system_info"]["barman_ver"] = barman.__version__ diagnosis["global"]["system_info"]["timestamp"] = datetime.datetime.now() # per server section for name in sorted(servers): server = servers[name] if server is None: output.error("Unknown server '%s'" % name) continue # server configuration diagnosis["servers"][name] = {} diagnosis["servers"][name]["config"] = vars(server.config) if "config" in diagnosis["servers"][name]["config"]: del diagnosis["servers"][name]["config"]["config"] # server system info if server.config.ssh_command: try: command = fs.UnixRemoteCommand( ssh_command=server.config.ssh_command, path=server.path ) diagnosis["servers"][name]["system_info"] = command.get_system_info() except FsOperationFailed: pass # barman status information for the server diagnosis["servers"][name]["status"] = server.get_remote_status() # backup list backups = server.get_available_backups(BackupInfo.STATUS_ALL) diagnosis["servers"][name]["backups"] = backups # wal status diagnosis["servers"][name]["wals"] = { "last_archived_wal_per_timeline": server.backup_manager.get_latest_archived_wals_info(), } # Release any PostgreSQL resource server.close() output.info( json.dumps(diagnosis, cls=BarmanEncoder, indent=4, sort_keys=True), log=False ) barman-2.18/barman/cli.py0000644000621200062120000016737214172556763013501 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module implements the interface with the command line and the logger. """ import json import logging import os import sys from argparse import SUPPRESS, ArgumentTypeError, ArgumentParser, HelpFormatter if sys.version_info.major < 3: from argparse import Action, _SubParsersAction, _ActionsContainer import argcomplete from collections import OrderedDict from contextlib import closing import barman.config import barman.diagnose from barman import output from barman.annotations import KeepManager from barman.config import RecoveryOptions from barman.exceptions import ( BadXlogSegmentName, RecoveryException, SyncError, WalArchiveContentError, ) from barman.infofile import BackupInfo, WalFileInfo from barman.server import Server from barman.utils import ( BarmanEncoder, check_non_negative, check_positive, configure_logging, drop_privileges, force_str, get_log_levels, parse_log_level, SHA256, ) from barman.xlog import check_archive_usable from barman.backup_manifest import BackupManifest from barman.storage.local_file_manager import LocalFileManager _logger = logging.getLogger(__name__) # Support aliases for argparse in python2. # Derived from https://gist.github.com/sampsyo/471779 and based on the # initial patchset for CPython for supporting aliases in argparse. # Licensed under CC0 1.0 if sys.version_info.major < 3: class AliasedSubParsersAction(_SubParsersAction): old_init = staticmethod(_ActionsContainer.__init__) @staticmethod def _containerInit( self, description, prefix_chars, argument_default, conflict_handler ): AliasedSubParsersAction.old_init( self, description, prefix_chars, argument_default, conflict_handler ) self.register("action", "parsers", AliasedSubParsersAction) class _AliasedPseudoAction(Action): def __init__(self, name, aliases, help): dest = name if aliases: dest += " (%s)" % ",".join(aliases) sup = super(AliasedSubParsersAction._AliasedPseudoAction, self) sup.__init__(option_strings=[], dest=dest, help=help) def add_parser(self, name, **kwargs): aliases = kwargs.pop("aliases", []) parser = super(AliasedSubParsersAction, self).add_parser(name, **kwargs) # Make the aliases work. for alias in aliases: self._name_parser_map[alias] = parser # Make the help text reflect them, first removing old help entry. if "help" in kwargs: help_text = kwargs.pop("help") self._choices_actions.pop() pseudo_action = self._AliasedPseudoAction(name, aliases, help_text) self._choices_actions.append(pseudo_action) return parser # override argparse to register new subparser action by default _ActionsContainer.__init__ = AliasedSubParsersAction._containerInit class OrderedHelpFormatter(HelpFormatter): def _format_usage(self, usage, actions, groups, prefix): for action in actions: if not action.option_strings: action.choices = OrderedDict(sorted(action.choices.items())) return super(OrderedHelpFormatter, self)._format_usage( usage, actions, groups, prefix ) p = ArgumentParser( epilog="Barman by EnterpriseDB (www.enterprisedb.com)", formatter_class=OrderedHelpFormatter, ) p.add_argument( "-v", "--version", action="version", version="%s\n\nBarman by EnterpriseDB (www.enterprisedb.com)" % barman.__version__, ) p.add_argument( "-c", "--config", help="uses a configuration file " "(defaults: %s)" % ", ".join(barman.config.Config.CONFIG_FILES), default=SUPPRESS, ) p.add_argument( "--color", "--colour", help="Whether to use colors in the output", choices=["never", "always", "auto"], default="auto", ) p.add_argument( "--log-level", help="Override the default log level", choices=list(get_log_levels()), default=SUPPRESS, ) p.add_argument("-q", "--quiet", help="be quiet", action="store_true") p.add_argument("-d", "--debug", help="debug output", action="store_true") p.add_argument( "-f", "--format", help="output format", choices=output.AVAILABLE_WRITERS.keys(), default=output.DEFAULT_WRITER, ) subparsers = p.add_subparsers(dest="command") def argument(*name_or_flags, **kwargs): """Convenience function to properly format arguments to pass to the command decorator. """ # Remove the completer keyword argument from the dictionary completer = kwargs.pop("completer", None) return (list(name_or_flags), completer, kwargs) def command(args=None, parent=subparsers, cmd_aliases=None): """Decorator to define a new subcommand in a sanity-preserving way. The function will be stored in the ``func`` variable when the parser parses arguments so that it can be called directly like so:: args = cli.parse_args() args.func(args) Usage example:: @command([argument("-d", help="Enable debug mode", action="store_true")]) def command(args): print(args) Then on the command line:: $ python cli.py command -d """ if args is None: args = [] if cmd_aliases is None: cmd_aliases = [] def decorator(func): parser = parent.add_parser( func.__name__.replace("_", "-"), description=func.__doc__, help=func.__doc__, aliases=cmd_aliases, ) parent._choices_actions = sorted(parent._choices_actions, key=lambda x: x.dest) for arg in args: if arg[1]: parser.add_argument(*arg[0], **arg[2]).completer = arg[1] else: parser.add_argument(*arg[0], **arg[2]) parser.set_defaults(func=func) return func return decorator @command() def help(args=None): """ show this help message and exit """ p.print_help() def check_target_action(value): """ Check the target action option :param value: str containing the value to check """ if value is None: return None if value in ("pause", "shutdown", "promote"): return value raise ArgumentTypeError("'%s' is not a valid recovery target action" % value) @command( [argument("--minimal", help="machine readable output", action="store_true")], cmd_aliases=["list-server"], ) def list_servers(args): """ List available servers, with useful information """ # Get every server, both inactive and temporarily disabled servers = get_server_list() for name in sorted(servers): server = servers[name] # Exception: manage_server_command is not invoked here # Normally you would call manage_server_command to check if the # server is None and to report inactive and disabled servers, but here # we want all servers and the server cannot be None output.init("list_server", name, minimal=args.minimal) description = server.config.description or "" # If the server has been manually disabled if not server.config.active: description += " (inactive)" # If server has configuration errors elif server.config.disabled: description += " (WARNING: disabled)" # If server is a passive node if server.passive_node: description += " (Passive)" output.result("list_server", name, description) output.close_and_exit() @command( [ argument( "--keep-descriptors", help="Keep the stdout and the stderr streams attached to Barman subprocesses", action="store_true", ) ] ) def cron(args): """ Run maintenance tasks (global command) """ # Skip inactive and temporarily disabled servers servers = get_server_list(skip_inactive=True, skip_disabled=True) for name in sorted(servers): server = servers[name] # Exception: manage_server_command is not invoked here # Normally you would call manage_server_command to check if the # server is None and to report inactive and disabled servers, # but here we have only active and well configured servers. try: server.cron(keep_descriptors=args.keep_descriptors) except Exception: # A cron should never raise an exception, so this code # should never be executed. However, it is here to protect # unrelated servers in case of unexpected failures. output.exception( "Unable to run cron on server '%s', " "please look in the barman log file for more details.", name, ) output.close_and_exit() # noinspection PyUnusedLocal def server_completer(prefix, parsed_args, **kwargs): global_config(parsed_args) for conf in barman.__config__.servers(): if conf.name.startswith(prefix): yield conf.name # noinspection PyUnusedLocal def server_completer_all(prefix, parsed_args, **kwargs): global_config(parsed_args) current_list = getattr(parsed_args, "server_name", None) or () for conf in barman.__config__.servers(): if conf.name.startswith(prefix) and conf.name not in current_list: yield conf.name if len(current_list) == 0 and "all".startswith(prefix): yield "all" # noinspection PyUnusedLocal def backup_completer(prefix, parsed_args, **kwargs): global_config(parsed_args) server = get_server(parsed_args) backups = server.get_available_backups() for backup_id in sorted(backups, reverse=True): if backup_id.startswith(prefix): yield backup_id for special_id in ("latest", "last", "oldest", "first", "last-failed"): if len(backups) > 0 and special_id.startswith(prefix): yield special_id @command( [ argument( "server_name", completer=server_completer_all, nargs="+", help="specifies the server names for the backup command " "('all' will show all available servers)", ), argument( "--immediate-checkpoint", help="forces the initial checkpoint to be done as quickly as possible", dest="immediate_checkpoint", action="store_true", default=SUPPRESS, ), argument( "--no-immediate-checkpoint", help="forces the initial checkpoint to be spread", dest="immediate_checkpoint", action="store_false", default=SUPPRESS, ), argument( "--reuse-backup", nargs="?", choices=barman.config.REUSE_BACKUP_VALUES, default=None, const="link", help="use the previous backup to improve transfer-rate. " 'If no argument is given "link" is assumed', ), argument( "--retry-times", help="Number of retries after an error if base backup copy fails.", type=check_non_negative, ), argument( "--retry-sleep", help="Wait time after a failed base backup copy, before retrying.", type=check_non_negative, ), argument( "--no-retry", help="Disable base backup copy retry logic.", dest="retry_times", action="store_const", const=0, ), argument( "--jobs", "-j", help="Run the copy in parallel using NJOBS processes.", type=check_positive, metavar="NJOBS", ), argument( "--bwlimit", help="maximum transfer rate in kilobytes per second. " "A value of 0 means no limit. Overrides 'bandwidth_limit' " "configuration option.", metavar="KBPS", type=check_non_negative, default=SUPPRESS, ), argument( "--wait", "-w", help="wait for all the required WAL files to be archived", dest="wait", action="store_true", default=False, ), argument( "--wait-timeout", help="the time, in seconds, spent waiting for the required " "WAL files to be archived before timing out", dest="wait_timeout", metavar="TIMEOUT", default=None, type=check_non_negative, ), ] ) def backup(args): """ Perform a full backup for the given server (supports 'all') """ servers = get_server_list(args, skip_inactive=True, skip_passive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue if args.reuse_backup is not None: server.config.reuse_backup = args.reuse_backup if args.retry_sleep is not None: server.config.basebackup_retry_sleep = args.retry_sleep if args.retry_times is not None: server.config.basebackup_retry_times = args.retry_times if hasattr(args, "immediate_checkpoint"): server.config.immediate_checkpoint = args.immediate_checkpoint if args.jobs is not None: server.config.parallel_jobs = args.jobs if hasattr(args, "bwlimit"): server.config.bandwidth_limit = args.bwlimit with closing(server): server.backup(wait=args.wait, wait_timeout=args.wait_timeout) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer_all, nargs="+", help="specifies the server name for the command " "('all' will show all available servers)", ), argument("--minimal", help="machine readable output", action="store_true"), ], cmd_aliases=["list-backup"], ) def list_backups(args): """ List available backups for the given server (supports 'all') """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue output.init("list_backup", name, minimal=args.minimal) with closing(server): server.list_backups() output.close_and_exit() @command( [ argument( "server_name", completer=server_completer_all, nargs="+", help="specifies the server name for the command", ) ] ) def status(args): """ Shows live information and status of the PostgreSQL server """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue output.init("status", name) with closing(server): server.status() output.close_and_exit() @command( [ argument( "server_name", completer=server_completer_all, nargs="+", help="specifies the server name for the command " "('all' will show all available servers)", ), argument("--minimal", help="machine readable output", action="store_true"), argument( "--target", choices=("all", "hot-standby", "wal-streamer"), default="all", help=""" Possible values are: 'hot-standby' (only hot standby servers), 'wal-streamer' (only WAL streaming clients, such as pg_receivewal), 'all' (any of them). Defaults to %(default)s""", ), ] ) def replication_status(args): """ Shows live information and status of any streaming client """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue with closing(server): output.init("replication_status", name, minimal=args.minimal) server.replication_status(args.target) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer_all, nargs="+", help="specifies the server name for the command ", ) ] ) def rebuild_xlogdb(args): """ Rebuild the WAL file database guessing it from the disk content. """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue with closing(server): server.rebuild_xlogdb() output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command ", ), argument("--target-tli", help="target timeline", type=check_positive), argument( "--target-time", help="target time. You can use any valid unambiguous representation. " 'e.g: "YYYY-MM-DD HH:MM:SS.mmm"', ), argument("--target-xid", help="target transaction ID"), argument("--target-lsn", help="target LSN (Log Sequence Number)"), argument( "--target-name", help="target name created previously with " "pg_create_restore_point() function call", ), argument( "--target-immediate", help="end recovery as soon as a consistent state is reached", action="store_true", default=False, ), argument( "--exclusive", help="set target to be non inclusive", action="store_true" ), argument( "--tablespace", help="tablespace relocation rule", metavar="NAME:LOCATION", action="append", ), argument( "--remote-ssh-command", metavar="SSH_COMMAND", help="This options activates remote recovery, by specifying the secure " "shell command to be launched on a remote host. It is " 'the equivalent of the "ssh_command" server option in ' "the configuration file for remote recovery. " 'Example: "ssh postgres@db2"', ), argument( "backup_id", completer=backup_completer, help="specifies the backup ID to recover", ), argument( "destination_directory", help="the directory where the new server is created", ), argument( "--bwlimit", help="maximum transfer rate in kilobytes per second. " "A value of 0 means no limit. Overrides 'bandwidth_limit' " "configuration option.", metavar="KBPS", type=check_non_negative, default=SUPPRESS, ), argument( "--retry-times", help="Number of retries after an error if base backup copy fails.", type=check_non_negative, ), argument( "--retry-sleep", help="Wait time after a failed base backup copy, before retrying.", type=check_non_negative, ), argument( "--no-retry", help="Disable base backup copy retry logic.", dest="retry_times", action="store_const", const=0, ), argument( "--jobs", "-j", help="Run the copy in parallel using NJOBS processes.", type=check_positive, metavar="NJOBS", ), argument( "--get-wal", help="Enable the get-wal option during the recovery.", dest="get_wal", action="store_true", default=SUPPRESS, ), argument( "--no-get-wal", help="Disable the get-wal option during recovery.", dest="get_wal", action="store_false", default=SUPPRESS, ), argument( "--network-compression", help="Enable network compression during remote recovery.", dest="network_compression", action="store_true", default=SUPPRESS, ), argument( "--no-network-compression", help="Disable network compression during remote recovery.", dest="network_compression", action="store_false", default=SUPPRESS, ), argument( "--target-action", help="Specifies what action the server should take once the " "recovery target is reached. This option is not allowed for " "PostgreSQL < 9.1. If PostgreSQL is between 9.1 and 9.4 included " 'the only allowed value is "pause". If PostgreSQL is 9.5 or newer ' 'the possible values are "shutdown", "pause", "promote".', dest="target_action", type=check_target_action, default=SUPPRESS, ), argument( "--standby-mode", dest="standby_mode", action="store_true", default=SUPPRESS, help="Enable standby mode when starting the recovered PostgreSQL instance", ), ] ) def recover(args): """ Recover a server at a given time, name, LSN or xid """ server = get_server(args) # Retrieves the backup backup_id = parse_backup_id(server, args) if backup_id.status not in BackupInfo.STATUS_COPY_DONE: output.error( "Cannot recover from backup '%s' of server '%s': " "backup status is not DONE", args.backup_id, server.config.name, ) output.close_and_exit() # decode the tablespace relocation rules tablespaces = {} if args.tablespace: for rule in args.tablespace: try: tablespaces.update([rule.split(":", 1)]) except ValueError: output.error( "Invalid tablespace relocation rule '%s'\n" "HINT: The valid syntax for a relocation rule is " "NAME:LOCATION", rule, ) output.close_and_exit() # validate the rules against the tablespace list valid_tablespaces = [] if backup_id.tablespaces: valid_tablespaces = [ tablespace_data.name for tablespace_data in backup_id.tablespaces ] for item in tablespaces: if item not in valid_tablespaces: output.error( "Invalid tablespace name '%s'\n" "HINT: Please use any of the following " "tablespaces: %s", item, ", ".join(valid_tablespaces), ) output.close_and_exit() # explicitly disallow the rsync remote syntax (common mistake) if ":" in args.destination_directory: output.error( "The destination directory parameter " "cannot contain the ':' character\n" "HINT: If you want to do a remote recovery you have to use " "the --remote-ssh-command option" ) output.close_and_exit() if args.retry_sleep is not None: server.config.basebackup_retry_sleep = args.retry_sleep if args.retry_times is not None: server.config.basebackup_retry_times = args.retry_times if hasattr(args, "get_wal"): if args.get_wal: server.config.recovery_options.add(RecoveryOptions.GET_WAL) else: server.config.recovery_options.remove(RecoveryOptions.GET_WAL) if args.jobs is not None: server.config.parallel_jobs = args.jobs if hasattr(args, "bwlimit"): server.config.bandwidth_limit = args.bwlimit # PostgreSQL supports multiple parameters to specify when the recovery # process will end, and in that case the last entry in recovery # configuration files will be used. See [1] # # Since the meaning of the target options is not dependent on the order # of parameters, we decided to make the target options mutually exclusive. # # [1]: https://www.postgresql.org/docs/current/static/ # recovery-target-settings.html target_options = [ "target_time", "target_xid", "target_lsn", "target_name", "target_immediate", ] specified_target_options = len( [option for option in target_options if getattr(args, option)] ) if specified_target_options > 1: output.error("You cannot specify multiple targets for the recovery operation") output.close_and_exit() if hasattr(args, "network_compression"): if args.network_compression and args.remote_ssh_command is None: output.error( "Network compression can only be used with " "remote recovery.\n" "HINT: If you want to do a remote recovery " "you have to use the --remote-ssh-command option" ) output.close_and_exit() server.config.network_compression = args.network_compression with closing(server): try: server.recover( backup_id, args.destination_directory, tablespaces=tablespaces, target_tli=args.target_tli, target_time=args.target_time, target_xid=args.target_xid, target_lsn=args.target_lsn, target_name=args.target_name, target_immediate=args.target_immediate, exclusive=args.exclusive, remote_command=args.remote_ssh_command, target_action=getattr(args, "target_action", None), standby_mode=getattr(args, "standby_mode", None), ) except RecoveryException as exc: output.error(force_str(exc)) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer_all, nargs="+", help="specifies the server names to show " "('all' will show all available servers)", ) ], cmd_aliases=["show-server"], ) def show_servers(args): """ Show all configuration parameters for the specified servers """ servers = get_server_list(args) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command( server, name, skip_inactive=False, skip_disabled=False, disabled_is_error=False, ): continue # If the server has been manually disabled if not server.config.active: name += " (inactive)" # If server has configuration errors elif server.config.disabled: name += " (WARNING: disabled)" output.init("show_server", name) with closing(server): server.show() output.close_and_exit() @command( [ argument( "server_name", completer=server_completer_all, nargs="+", help="specifies the server name target of the switch-wal command", ), argument( "--force", help="forces the switch of a WAL by executing a checkpoint before", dest="force", action="store_true", default=False, ), argument( "--archive", help="wait for one WAL file to be archived", dest="archive", action="store_true", default=False, ), argument( "--archive-timeout", help="the time, in seconds, the archiver will wait for a new WAL file " "to be archived before timing out", metavar="TIMEOUT", default="30", type=check_non_negative, ), ], cmd_aliases=["switch-xlog"], ) def switch_wal(args): """ Execute the switch-wal command on the target server """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue with closing(server): server.switch_wal(args.force, args.archive, args.archive_timeout) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer_all, nargs="+", help="specifies the server names to check " "('all' will check all available servers)", ), argument( "--nagios", help="Nagios plugin compatible output", action="store_true" ), ] ) def check(args): """ Check if the server configuration is working. This command returns success if every checks pass, or failure if any of these fails """ if args.nagios: output.set_output_writer(output.NagiosOutputWriter()) servers = get_server_list(args) for name in sorted(servers): server = servers[name] # Validate the returned server if not manage_server_command( server, name, skip_inactive=False, skip_disabled=False, disabled_is_error=False, ): continue output.init("check", name, server.config.active, server.config.disabled) with closing(server): server.check() output.close_and_exit() @command() def diagnose(args=None): """ Diagnostic command (for support and problems detection purpose) """ # Get every server (both inactive and temporarily disabled) servers = get_server_list(on_error_stop=False, suppress_error=True) # errors list with duplicate paths between servers errors_list = barman.__config__.servers_msg_list barman.diagnose.exec_diagnose(servers, errors_list) output.close_and_exit() @command( [ argument( "--primary", help="execute the sync-info on the primary node (if set)", action="store_true", default=SUPPRESS, ), argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument( "last_wal", help="specifies the name of the latest WAL read", nargs="?" ), argument( "last_position", nargs="?", type=check_positive, help="the last position read from xlog database (in bytes)", ), ] ) def sync_info(args): """ Output the internal synchronisation status. Used to sync_backup with a passive node """ server = get_server(args) try: # if called with --primary option if getattr(args, "primary", False): primary_info = server.primary_node_info(args.last_wal, args.last_position) output.info( json.dumps(primary_info, cls=BarmanEncoder, indent=4), log=False ) else: server.sync_status(args.last_wal, args.last_position) except SyncError as e: # Catch SyncError exceptions and output only the error message, # preventing from logging the stack trace output.error(e) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument( "backup_id", help="specifies the backup ID to be copied on the passive node" ), ] ) def sync_backup(args): """ Command that synchronises a backup from a master to a passive node """ server = get_server(args) try: server.sync_backup(args.backup_id) except SyncError as e: # Catch SyncError exceptions and output only the error message, # preventing from logging the stack trace output.error(e) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ) ] ) def sync_wals(args): """ Command that synchronises WAL files from a master to a passive node """ server = get_server(args) try: server.sync_wals() except SyncError as e: # Catch SyncError exceptions and output only the error message, # preventing from logging the stack trace output.error(e) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument( "backup_id", completer=backup_completer, help="specifies the backup ID" ), ], cmd_aliases=["show-backups"], ) def show_backup(args): """ This method shows a single backup information """ server = get_server(args) # Retrieves the backup backup_info = parse_backup_id(server, args) with closing(server): server.show_backup(backup_info) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument( "backup_id", completer=backup_completer, help="specifies the backup ID" ), argument( "--target", choices=("standalone", "data", "wal", "full"), default="standalone", help=""" Possible values are: data (just the data files), standalone (base backup files, including required WAL files), wal (just WAL files between the beginning of base backup and the following one (if any) or the end of the log) and full (same as data + wal). Defaults to %(default)s""", ), ] ) def list_files(args): """ List all the files for a single backup """ server = get_server(args) # Retrieves the backup backup_info = parse_backup_id(server, args) try: for line in backup_info.get_list_of_files(args.target): output.info(line, log=False) except BadXlogSegmentName as e: output.error( "invalid xlog segment name %r\n" 'HINT: Please run "barman rebuild-xlogdb %s" ' "to solve this issue", force_str(e), server.config.name, ) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument( "backup_id", completer=backup_completer, help="specifies the backup ID" ), ] ) def delete(args): """ Delete a backup """ server = get_server(args) # Retrieves the backup backup_id = parse_backup_id(server, args) with closing(server): if not server.delete_backup(backup_id): output.error( "Cannot delete backup (%s %s)" % (server.config.name, backup_id) ) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument("wal_name", help="the WAL file to get"), argument( "--output-directory", "-o", help="put the retrieved WAL file in this directory with the original name", default=SUPPRESS, ), argument( "--partial", "-P", help="retrieve also partial WAL files (.partial)", action="store_true", dest="partial", default=False, ), argument( "--gzip", "-z", "-x", help="compress the output with gzip", action="store_const", const="gzip", dest="compression", default=SUPPRESS, ), argument( "--bzip2", "-j", help="compress the output with bzip2", action="store_const", const="bzip2", dest="compression", default=SUPPRESS, ), argument( "--peek", "-p", help="peek from the WAL archive up to 'SIZE' WAL files, starting " "from the requested one. 'SIZE' must be an integer >= 1. " "When invoked with this option, get-wal returns a list of " "zero to 'SIZE' WAL segment names, one per row.", metavar="SIZE", type=check_positive, default=SUPPRESS, ), argument( "--test", "-t", help="test both the connection and the configuration of the requested " "PostgreSQL server in Barman for WAL retrieval. With this option, " "the 'wal_name' mandatory argument is ignored.", action="store_true", default=SUPPRESS, ), ] ) def get_wal(args): """ Retrieve WAL_NAME file from SERVER_NAME archive. The content will be streamed on standard output unless the --output-directory option is specified. """ server = get_server(args, inactive_is_error=True) if getattr(args, "test", None): output.info( "Ready to retrieve WAL files from the server %s", server.config.name ) return # Retrieve optional arguments. If an argument is not specified, # the namespace doesn't contain it due to SUPPRESS default. # In that case we pick 'None' using getattr third argument. compression = getattr(args, "compression", None) output_directory = getattr(args, "output_directory", None) peek = getattr(args, "peek", None) with closing(server): server.get_wal( args.wal_name, compression=compression, output_directory=output_directory, peek=peek, partial=args.partial, ) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument( "--test", "-t", help="test both the connection and the configuration of the requested " "PostgreSQL server in Barman to make sure it is ready to receive " "WAL files.", action="store_true", default=SUPPRESS, ), ] ) def put_wal(args): """ Receive a WAL file from SERVER_NAME and securely store it in the incoming directory. The file will be read from standard input in tar format. """ server = get_server(args, inactive_is_error=True) if getattr(args, "test", None): output.info("Ready to accept WAL files for the server %s", server.config.name) return try: # Python 3.x stream = sys.stdin.buffer except AttributeError: # Python 2.x stream = sys.stdin with closing(server): server.put_wal(stream) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ) ] ) def archive_wal(args): """ Execute maintenance operations on WAL files for a given server. This command processes any incoming WAL files for the server and archives them along the catalogue. """ server = get_server(args) with closing(server): server.archive_wal() output.close_and_exit() @command( [ argument( "--stop", help="stop the receive-wal subprocess for the server", action="store_true", ), argument( "--reset", help="reset the status of receive-wal removing any status files", action="store_true", ), argument( "--create-slot", help="create the replication slot, if it does not exist", action="store_true", ), argument( "--drop-slot", help="drop the replication slot, if it exists", action="store_true", ), argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), ] ) def receive_wal(args): """ Start a receive-wal process. The process uses the streaming protocol to receive WAL files from the PostgreSQL server. """ server = get_server(args) if args.stop and args.reset: output.error("--stop and --reset options are not compatible") # If the caller requested to shutdown the receive-wal process deliver the # termination signal, otherwise attempt to start it elif args.stop: server.kill("receive-wal") elif args.create_slot: with closing(server): server.create_physical_repslot() elif args.drop_slot: with closing(server): server.drop_repslot() else: with closing(server): server.receive_wal(reset=args.reset) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument( "backup_id", completer=backup_completer, help="specifies the backup ID" ), ] ) def check_backup(args): """ Make sure that all the required WAL files to check the consistency of a physical backup (that is, from the beginning to the end of the full backup) are correctly archived. This command is automatically invoked by the cron command and at the end of every backup operation. """ server = get_server(args) # Retrieves the backup backup_info = parse_backup_id(server, args) with closing(server): server.check_backup(backup_info) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command ", ), argument( "backup_id", completer=backup_completer, help="specifies the backup ID" ), ], cmd_aliases=["verify"], ) def verify_backup(args): """ verify a backup for the given server and backup id """ # get barman.server.Server server = get_server(args) # Raises an error if wrong backup backup_info = parse_backup_id(server, args) # get backup path output.info("Verifying backup %s on server %s" % (args.backup_id, args.server_name)) server.backup_manager.verify_backup(backup_info) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command ", ), argument( "backup_id", completer=backup_completer, help="specifies the backup ID" ), ], ) def generate_manifest(args): """ Generate a manifest-backup for the given server and backup id """ server = get_server(args) # Raises an error if wrong backup backup_info = parse_backup_id(server, args) # know context (remote backup? local?) local_file_manager = LocalFileManager() backup_manifest = BackupManifest( backup_info.get_data_directory(), local_file_manager, SHA256() ) backup_manifest.create_backup_manifest() output.info("Backup %s is valid on server %s" % (args.backup_id, args.server_name)) output.close_and_exit() @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument( "backup_id", completer=backup_completer, help="specifies the backup ID" ), argument("--release", help="remove the keep annotation", action="store_true"), argument( "--status", help="return the keep status of the backup", action="store_true" ), argument( "--target", help="keep this backup with the specified recovery target", choices=[KeepManager.TARGET_FULL, KeepManager.TARGET_STANDALONE], ), ] ) def keep(args): """ Tag the specified backup so that it will never be deleted """ if not any((args.release, args.status, args.target)): output.error( "one of the arguments -r/--release -s/--status --target is required" ) output.close_and_exit() server = get_server(args) backup_info = parse_backup_id(server, args) backup_manager = server.backup_manager if args.status: output.init("status", server.config.name) target = backup_manager.get_keep_target(backup_info.backup_id) if target: output.result("status", server.config.name, "keep_status", "Keep", target) else: output.result("status", server.config.name, "keep_status", "Keep", "nokeep") elif args.release: backup_manager.release_keep(backup_info.backup_id) else: if backup_info.status != BackupInfo.DONE: msg = ( "Cannot add keep to backup %s because it has status %s. " "Only backups with status DONE can be kept." ) % (backup_info.backup_id, backup_info.status) output.error(msg) output.close_and_exit() backup_manager.keep_backup(backup_info.backup_id, args.target) @command( [ argument( "server_name", completer=server_completer, help="specifies the server name for the command", ), argument( "--timeline", help="the earliest timeline whose WALs should cause the check to fail", type=check_positive, ), ] ) def check_wal_archive(args): """ Check the WAL archive can be safely used for a new server. This will fail if there are any existing WALs in the archive. If the --timeline option is used then any WALs on earlier timelines than that specified will not cause the check to fail. """ server = get_server(args) output.init("check_wal_archive", server.config.name) with server.xlogdb() as fxlogdb: wals = [WalFileInfo.from_xlogdb_line(w).name for w in fxlogdb] try: check_archive_usable( wals, timeline=args.timeline, ) output.result("check_wal_archive", server.config.name) except WalArchiveContentError as err: msg = "WAL archive check failed for server %s: %s" % ( server.config.name, force_str(err), ) logging.error(msg) output.error(msg) output.close_and_exit() def pretty_args(args): """ Prettify the given argparse namespace to be human readable :type args: argparse.Namespace :return: the human readable content of the namespace """ values = dict(vars(args)) # Retrieve the command name with recent argh versions if "_functions_stack" in values: values["command"] = values["_functions_stack"][0].__name__ del values["_functions_stack"] # Older argh versions only have the matching function in the namespace elif "function" in values: values["command"] = values["function"].__name__ del values["function"] return "%r" % values def global_config(args): """ Set the configuration file """ if hasattr(args, "config"): filename = args.config else: try: filename = os.environ["BARMAN_CONFIG_FILE"] except KeyError: filename = None config = barman.config.Config(filename) barman.__config__ = config # change user if needed try: drop_privileges(config.user) except OSError: msg = "ERROR: please run barman as %r user" % config.user raise SystemExit(msg) except KeyError: msg = "ERROR: the configured user %r does not exists" % config.user raise SystemExit(msg) # configure logging if hasattr(args, "log_level"): config.log_level = args.log_level log_level = parse_log_level(config.log_level) configure_logging( config.log_file, log_level or barman.config.DEFAULT_LOG_LEVEL, config.log_format ) if log_level is None: _logger.warning("unknown log_level in config file: %s", config.log_level) # Configure output if args.format != output.DEFAULT_WRITER or args.quiet or args.debug: output.set_output_writer(args.format, quiet=args.quiet, debug=args.debug) # Configure color output if args.color == "auto": # Enable colored output if both stdout and stderr are TTYs output.ansi_colors_enabled = sys.stdout.isatty() and sys.stderr.isatty() else: output.ansi_colors_enabled = args.color == "always" # Load additional configuration files config.load_configuration_files_directory() # We must validate the configuration here in order to have # both output and logging configured config.validate_global_config() _logger.debug( "Initialised Barman version %s (config: %s, args: %s)", barman.__version__, config.config_file, pretty_args(args), ) def get_server( args, skip_inactive=True, skip_disabled=False, skip_passive=False, inactive_is_error=False, on_error_stop=True, suppress_error=False, ): """ Get a single server retrieving its configuration (wraps get_server_list()) Returns a Server object or None if the required server is unknown and on_error_stop is False. WARNING: this function modifies the 'args' parameter :param args: an argparse namespace containing a single server_name parameter WARNING: the function modifies the content of this parameter :param bool skip_inactive: do nothing if the server is inactive :param bool skip_disabled: do nothing if the server is disabled :param bool skip_passive: do nothing if the server is passive :param bool inactive_is_error: treat inactive server as error :param bool on_error_stop: stop if an error is found :param bool suppress_error: suppress display of errors (e.g. diagnose) :rtype: Server|None """ # This function must to be called with in a single-server context name = args.server_name assert isinstance(name, str) # The 'all' special name is forbidden in this context if name == "all": output.error("You cannot use 'all' in a single server context") output.close_and_exit() # The following return statement will never be reached # but it is here for clarity return None # Builds a list from a single given name args.server_name = [name] # Skip_inactive is reset if inactive_is_error is set, because # it needs to retrieve the inactive server to emit the error. skip_inactive &= not inactive_is_error # Retrieve the requested server servers = get_server_list( args, skip_inactive, skip_disabled, skip_passive, on_error_stop, suppress_error ) # The requested server has been excluded from get_server_list result if len(servers) == 0: output.close_and_exit() # The following return statement will never be reached # but it is here for clarity return None # retrieve the server object server = servers[name] # Apply standard validation control and skips # the server if inactive or disabled, displaying standard # error messages. If on_error_stop (default) exits if not manage_server_command(server, name, inactive_is_error) and on_error_stop: output.close_and_exit() # The following return statement will never be reached # but it is here for clarity return None # Returns the filtered server return server def get_server_list( args=None, skip_inactive=False, skip_disabled=False, skip_passive=False, on_error_stop=True, suppress_error=False, ): """ Get the server list from the configuration If args the parameter is None or arg.server_name is ['all'] returns all defined servers :param args: an argparse namespace containing a list server_name parameter :param bool skip_inactive: skip inactive servers when 'all' is required :param bool skip_disabled: skip disabled servers when 'all' is required :param bool skip_passive: skip passive servers when 'all' is required :param bool on_error_stop: stop if an error is found :param bool suppress_error: suppress display of errors (e.g. diagnose) :rtype: dict[str,Server] """ server_dict = {} # This function must to be called with in a multiple-server context assert not args or isinstance(args.server_name, list) # Generate the list of servers (required for global errors) available_servers = barman.__config__.server_names() # Get a list of configuration errors from all the servers global_error_list = barman.__config__.servers_msg_list # Global errors have higher priority if global_error_list: # Output the list of global errors if not suppress_error: for error in global_error_list: output.error(error) # If requested, exit on first error if on_error_stop: output.close_and_exit() # The following return statement will never be reached # but it is here for clarity return {} # Handle special 'all' server cases # - args is None # - 'all' special name if not args or "all" in args.server_name: # When 'all' is used, it must be the only specified argument if args and len(args.server_name) != 1: output.error("You cannot use 'all' with other server names") servers = available_servers else: # Put servers in a set, so multiple occurrences are counted only once servers = set(args.server_name) # Loop through all the requested servers for server in servers: conf = barman.__config__.get_server(server) if conf is None: # Unknown server server_dict[server] = None else: server_object = Server(conf) # Skip inactive servers, if requested if skip_inactive and not server_object.config.active: output.info("Skipping inactive server '%s'" % conf.name) continue # Skip disabled servers, if requested if skip_disabled and server_object.config.disabled: output.info("Skipping temporarily disabled server '%s'" % conf.name) continue # Skip passive nodes, if requested if skip_passive and server_object.passive_node: output.info("Skipping passive server '%s'", conf.name) continue server_dict[server] = server_object return server_dict def manage_server_command( server, name=None, inactive_is_error=False, disabled_is_error=True, skip_inactive=True, skip_disabled=True, ): """ Standard and consistent method for managing server errors within a server command execution. By default, suggests to skip any inactive and disabled server; it also emits errors for disabled servers by default. Returns True if the command has to be executed for this server. :param barman.server.Server server: server to be checked for errors :param str name: name of the server, in a multi-server command :param bool inactive_is_error: treat inactive server as error :param bool disabled_is_error: treat disabled server as error :param bool skip_inactive: skip if inactive :param bool skip_disabled: skip if disabled :return: True if the command has to be executed on this server :rtype: boolean """ # Unknown server (skip it) if not server: output.error("Unknown server '%s'" % name) return False if not server.config.active: # Report inactive server as error if inactive_is_error: output.error("Inactive server: %s" % server.config.name) if skip_inactive: return False # Report disabled server as error if server.config.disabled: # Output all the messages as errors, and exit terminating the run. if disabled_is_error: for message in server.config.msg_list: output.error(message) if skip_disabled: return False # All ok, execute the command return True def parse_backup_id(server, args): """ Parses backup IDs including special words such as latest, oldest, etc. Exit with error if the backup id doesn't exist. :param Server server: server object to search for the required backup :param args: command line arguments namespace :rtype: barman.infofile.LocalBackupInfo """ if args.backup_id in ("latest", "last"): backup_id = server.get_last_backup_id() elif args.backup_id in ("oldest", "first"): backup_id = server.get_first_backup_id() elif args.backup_id in ("last-failed"): backup_id = server.get_last_backup_id([BackupInfo.FAILED]) else: backup_id = args.backup_id backup_info = server.get_backup(backup_id) if backup_info is None: output.error( "Unknown backup '%s' for server '%s'", args.backup_id, server.config.name ) output.close_and_exit() return backup_info def main(): """ The main method of Barman """ # noinspection PyBroadException try: argcomplete.autocomplete(p) args = p.parse_args() global_config(args) if args.command is None: p.print_help() else: args.func(args) except KeyboardInterrupt: msg = "Process interrupted by user (KeyboardInterrupt)" output.error(msg) except Exception as e: msg = "%s\nSee log file for more details." % e output.exception(msg) # cleanup output API and exit honoring output.error_occurred and # output.error_exit_code output.close_and_exit() if __name__ == "__main__": # This code requires the mock module and allow us to test # bash completion inside the IDE debugger try: # noinspection PyUnresolvedReferences import mock sys.stdout = mock.Mock(wraps=sys.stdout) sys.stdout.isatty.return_value = True os.dup2(2, 8) except ImportError: pass main() barman-2.18/barman/recovery_executor.py0000644000621200062120000015310714172556763016475 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains the methods necessary to perform a recovery """ from __future__ import print_function import collections import datetime import logging import os import re import shutil import socket import tempfile import time from io import BytesIO import dateutil.parser import dateutil.tz from barman import output, xlog from barman.command_wrappers import RsyncPgData from barman.config import RecoveryOptions from barman.copy_controller import RsyncCopyController from barman.exceptions import ( BadXlogSegmentName, CommandFailedException, DataTransferFailure, FsOperationFailed, RecoveryInvalidTargetException, RecoveryStandbyModeException, RecoveryTargetActionException, ) from barman.fs import UnixLocalCommand, UnixRemoteCommand from barman.infofile import BackupInfo, LocalBackupInfo from barman.utils import force_str, mkpath # generic logger for this module _logger = logging.getLogger(__name__) # regexp matching a single value in Postgres configuration file PG_CONF_SETTING_RE = re.compile(r"^\s*([^\s=]+)\s*=?\s*(.*)$") # create a namedtuple object called Assertion # with 'filename', 'line', 'key' and 'value' as properties Assertion = collections.namedtuple("Assertion", "filename line key value") # noinspection PyMethodMayBeStatic class RecoveryExecutor(object): """ Class responsible of recovery operations """ # Potentially dangerous options list, which need to be revised by the user # after a recovery DANGEROUS_OPTIONS = [ "data_directory", "config_file", "hba_file", "ident_file", "external_pid_file", "ssl_cert_file", "ssl_key_file", "ssl_ca_file", "ssl_crl_file", "unix_socket_directory", "unix_socket_directories", "include", "include_dir", "include_if_exists", ] # List of options that, if present, need to be forced to a specific value # during recovery, to avoid data losses MANGLE_OPTIONS = { # Dangerous options "archive_command": "false", # Recovery options that may interfere with recovery targets "recovery_target": None, "recovery_target_name": None, "recovery_target_time": None, "recovery_target_xid": None, "recovery_target_lsn": None, "recovery_target_inclusive": None, "recovery_target_timeline": None, "recovery_target_action": None, } def __init__(self, backup_manager): """ Constructor :param barman.backup.BackupManager backup_manager: the BackupManager owner of the executor """ self.backup_manager = backup_manager self.server = backup_manager.server self.config = backup_manager.config self.temp_dirs = [] def recover( self, backup_info, dest, tablespaces=None, remote_command=None, target_tli=None, target_time=None, target_xid=None, target_lsn=None, target_name=None, target_immediate=False, exclusive=False, target_action=None, standby_mode=None, ): """ Performs a recovery of a backup This method should be called in a closing context :param barman.infofile.BackupInfo backup_info: the backup to recover :param str dest: the destination directory :param dict[str,str]|None tablespaces: a tablespace name -> location map (for relocation) :param str|None remote_command: The remote command to recover the base backup, in case of remote backup. :param str|None target_tli: the target timeline :param str|None target_time: the target time :param str|None target_xid: the target xid :param str|None target_lsn: the target LSN :param str|None target_name: the target name created previously with pg_create_restore_point() function call :param str|None target_immediate: end recovery as soon as consistency is reached :param bool exclusive: whether the recovery is exclusive or not :param str|None target_action: The recovery target action :param bool|None standby_mode: standby mode """ # Run the cron to be sure the wal catalog is up to date # Prepare a map that contains all the objects required for a recovery recovery_info = self._setup(backup_info, remote_command, dest) output.info( "Starting %s restore for server %s using backup %s", recovery_info["recovery_dest"], self.server.config.name, backup_info.backup_id, ) output.info("Destination directory: %s", dest) if remote_command: output.info("Remote command: %s", remote_command) # If the backup we are recovering is still not validated and we # haven't requested the get-wal feature, display a warning message if not recovery_info["get_wal"]: if backup_info.status == BackupInfo.WAITING_FOR_WALS: output.warning( "IMPORTANT: You have requested a recovery operation for " "a backup that does not have yet all the WAL files that " "are required for consistency." ) # Set targets for PITR self._set_pitr_targets( recovery_info, backup_info, dest, target_name, target_time, target_tli, target_xid, target_lsn, target_immediate, target_action, ) # Retrieve the safe_horizon for smart copy self._retrieve_safe_horizon(recovery_info, backup_info, dest) # check destination directory. If doesn't exist create it try: recovery_info["cmd"].create_dir_if_not_exists(dest) except FsOperationFailed as e: output.error("unable to initialise destination directory '%s': %s", dest, e) output.close_and_exit() # Initialize tablespace directories if backup_info.tablespaces: self._prepare_tablespaces( backup_info, recovery_info["cmd"], dest, tablespaces ) # Copy the base backup output.info("Copying the base backup.") try: self._backup_copy( backup_info, dest, tablespaces, remote_command, recovery_info["safe_horizon"], ) except DataTransferFailure as e: output.error("Failure copying base backup: %s", e) output.close_and_exit() # Copy the backup.info file in the destination as # ".barman-recover.info" if remote_command: try: recovery_info["rsync"]( backup_info.filename, ":%s/.barman-recover.info" % dest ) except CommandFailedException as e: output.error("copy of recovery metadata file failed: %s", e) output.close_and_exit() else: backup_info.save(os.path.join(dest, ".barman-recover.info")) # Standby mode is not available for PostgreSQL older than 9.0 if backup_info.version < 90000 and standby_mode: raise RecoveryStandbyModeException( "standby_mode is available only from PostgreSQL 9.0" ) # Restore the WAL segments. If GET_WAL option is set, skip this phase # as they will be retrieved using the wal-get command. if not recovery_info["get_wal"]: # If the backup we restored is still waiting for WALS, read the # backup info again and check whether it has been validated. # Notify the user if it is still not DONE. if backup_info.status == BackupInfo.WAITING_FOR_WALS: data = LocalBackupInfo(self.server, backup_info.filename) if data.status == BackupInfo.WAITING_FOR_WALS: output.warning( "IMPORTANT: The backup we have recovered IS NOT " "VALID. Required WAL files for consistency are " "missing. Please verify that WAL archiving is " "working correctly or evaluate using the 'get-wal' " "option for recovery" ) output.info("Copying required WAL segments.") required_xlog_files = () # Makes static analysers happy try: # TODO: Stop early if taget-immediate # Retrieve a list of required log files required_xlog_files = tuple( self.server.get_required_xlog_files( backup_info, target_tli, recovery_info["target_epoch"] ) ) # Restore WAL segments into the wal_dest directory self._xlog_copy( required_xlog_files, recovery_info["wal_dest"], remote_command ) except DataTransferFailure as e: output.error("Failure copying WAL files: %s", e) output.close_and_exit() except BadXlogSegmentName as e: output.error( "invalid xlog segment name %r\n" 'HINT: Please run "barman rebuild-xlogdb %s" ' "to solve this issue", force_str(e), self.config.name, ) output.close_and_exit() # If WAL files are put directly in the pg_xlog directory, # avoid shipping of just recovered files # by creating the corresponding archive status file if not recovery_info["is_pitr"]: output.info("Generating archive status files") self._generate_archive_status( recovery_info, remote_command, required_xlog_files ) # Generate recovery.conf file (only if needed by PITR or get_wal) is_pitr = recovery_info["is_pitr"] get_wal = recovery_info["get_wal"] if is_pitr or get_wal or standby_mode: output.info("Generating recovery configuration") self._generate_recovery_conf( recovery_info, backup_info, dest, target_immediate, exclusive, remote_command, target_name, target_time, target_tli, target_xid, target_lsn, standby_mode, ) # Create archive_status directory if necessary archive_status_dir = os.path.join(recovery_info["wal_dest"], "archive_status") try: recovery_info["cmd"].create_dir_if_not_exists(archive_status_dir) except FsOperationFailed as e: output.error( "unable to create the archive_status directory '%s': %s", archive_status_dir, e, ) output.close_and_exit() # As last step, analyse configuration files in order to spot # harmful options. Barman performs automatic conversion of # some options as well as notifying users of their existence. # # This operation is performed in three steps: # 1) mapping # 2) analysis # 3) copy output.info("Identify dangerous settings in destination directory.") self._map_temporary_config_files(recovery_info, backup_info, remote_command) self._analyse_temporary_config_files(recovery_info) self._copy_temporary_config_files(dest, remote_command, recovery_info) return recovery_info def _setup(self, backup_info, remote_command, dest): """ Prepare the recovery_info dictionary for the recovery, as well as temporary working directory :param barman.infofile.LocalBackupInfo backup_info: representation of a backup :param str remote_command: ssh command for remote connection :return dict: recovery_info dictionary, holding the basic values for a recovery """ # Calculate the name of the WAL directory if backup_info.version < 100000: wal_dest = os.path.join(dest, "pg_xlog") else: wal_dest = os.path.join(dest, "pg_wal") tempdir = tempfile.mkdtemp(prefix="barman_recovery-") self.temp_dirs.append(tempdir) recovery_info = { "cmd": None, "recovery_dest": "local", "rsync": None, "configuration_files": [], "destination_path": dest, "temporary_configuration_files": [], "tempdir": tempdir, "is_pitr": False, "wal_dest": wal_dest, "get_wal": RecoveryOptions.GET_WAL in self.config.recovery_options, } # A map that will keep track of the results of the recovery. # Used for output generation results = { "changes": [], "warnings": [], "delete_barman_wal": False, "missing_files": [], "get_wal": False, "recovery_start_time": datetime.datetime.now(), } recovery_info["results"] = results # Set up a list of configuration files recovery_info["configuration_files"].append("postgresql.conf") if backup_info.version >= 90400: recovery_info["configuration_files"].append("postgresql.auto.conf") # Identify the file holding the recovery configuration results["recovery_configuration_file"] = "postgresql.auto.conf" if backup_info.version < 120000: results["recovery_configuration_file"] = "recovery.conf" # Handle remote recovery options if remote_command: recovery_info["recovery_dest"] = "remote" recovery_info["rsync"] = RsyncPgData( path=self.server.path, ssh=remote_command, bwlimit=self.config.bandwidth_limit, network_compression=self.config.network_compression, ) try: # create a UnixRemoteCommand obj if is a remote recovery recovery_info["cmd"] = UnixRemoteCommand( remote_command, path=self.server.path ) except FsOperationFailed: output.error( "Unable to connect to the target host using the command '%s'", remote_command, ) output.close_and_exit() else: # if is a local recovery create a UnixLocalCommand recovery_info["cmd"] = UnixLocalCommand() return recovery_info def _set_pitr_targets( self, recovery_info, backup_info, dest, target_name, target_time, target_tli, target_xid, target_lsn, target_immediate, target_action, ): """ Set PITR targets - as specified by the user :param dict recovery_info: Dictionary containing all the recovery parameters :param barman.infofile.LocalBackupInfo backup_info: representation of a backup :param str dest: destination directory of the recovery :param str|None target_name: recovery target name for PITR :param str|None target_time: recovery target time for PITR :param str|None target_tli: recovery target timeline for PITR :param str|None target_xid: recovery target transaction id for PITR :param str|None target_lsn: recovery target LSN for PITR :param bool|None target_immediate: end recovery as soon as consistency is reached :param str|None target_action: recovery target action for PITR """ target_epoch = None target_datetime = None d_immediate = backup_info.version >= 90400 and target_immediate d_lsn = backup_info.version >= 100000 and target_lsn d_tli = target_tli and target_tli != backup_info.timeline # Detect PITR if target_time or target_xid or d_tli or target_name or d_immediate or d_lsn: recovery_info["is_pitr"] = True targets = {} if target_time: try: target_datetime = dateutil.parser.parse(target_time) except ValueError as e: raise RecoveryInvalidTargetException( "Unable to parse the target time parameter %r: %s" % (target_time, e) ) except TypeError: # this should not happen, but there is a known bug in # dateutil.parser.parse() implementation # ref: https://bugs.launchpad.net/dateutil/+bug/1247643 raise RecoveryInvalidTargetException( "Unable to parse the target time parameter %r" % target_time ) # If the parsed timestamp is naive, forces it to local timezone if target_datetime.tzinfo is None: target_datetime = target_datetime.replace( tzinfo=dateutil.tz.tzlocal() ) # Check if the target time is reachable from the # selected backup if backup_info.end_time > target_datetime: raise RecoveryInvalidTargetException( "The requested target time %s " "is before the backup end time %s" % (target_datetime, backup_info.end_time) ) ms = target_datetime.microsecond / 1000000.0 target_epoch = time.mktime(target_datetime.timetuple()) + ms targets["time"] = str(target_datetime) if target_xid: targets["xid"] = str(target_xid) if d_lsn: targets["lsn"] = str(d_lsn) if d_tli and target_tli != backup_info.timeline: targets["timeline"] = str(d_tli) if target_name: targets["name"] = str(target_name) if d_immediate: targets["immediate"] = d_immediate # Manage the target_action option if backup_info.version < 90100: if target_action: raise RecoveryTargetActionException( "Illegal target action '%s' " "for this version of PostgreSQL" % target_action ) elif 90100 <= backup_info.version < 90500: if target_action == "pause": recovery_info["pause_at_recovery_target"] = "on" elif target_action: raise RecoveryTargetActionException( "Illegal target action '%s' " "for this version of PostgreSQL" % target_action ) else: if target_action in ("pause", "shutdown", "promote"): recovery_info["recovery_target_action"] = target_action elif target_action: raise RecoveryTargetActionException( "Illegal target action '%s' " "for this version of PostgreSQL" % target_action ) output.info( "Doing PITR. Recovery target %s", (", ".join(["%s: %r" % (k, v) for k, v in targets.items()])), ) recovery_info["wal_dest"] = os.path.join(dest, "barman_wal") # With a PostgreSQL version older than 8.4, it is the user's # responsibility to delete the "barman_wal" directory as the # restore_command option in recovery.conf is not supported if backup_info.version < 80400 and not recovery_info["get_wal"]: recovery_info["results"]["delete_barman_wal"] = True else: # Raise an error if target_lsn is used with a pgversion < 10 if backup_info.version < 100000: if target_lsn: raise RecoveryInvalidTargetException( "Illegal use of recovery_target_lsn '%s' " "for this version of PostgreSQL " "(version 10 minimum required)" % target_lsn ) if target_immediate: raise RecoveryInvalidTargetException( "Illegal use of recovery_target_immediate " "for this version of PostgreSQL " "(version 9.4 minimum required)" ) if target_action: raise RecoveryTargetActionException( "Can't enable recovery target action when PITR is not required" ) recovery_info["target_epoch"] = target_epoch recovery_info["target_datetime"] = target_datetime def _retrieve_safe_horizon(self, recovery_info, backup_info, dest): """ Retrieve the safe_horizon for smart copy If the target directory contains a previous recovery, it is safe to pick the least of the two backup "begin times" (the one we are recovering now and the one previously recovered in the target directory). Set the value in the given recovery_info dictionary. :param dict recovery_info: Dictionary containing all the recovery parameters :param barman.infofile.LocalBackupInfo backup_info: a backup representation :param str dest: recovery destination directory """ # noinspection PyBroadException try: backup_begin_time = backup_info.begin_time # Retrieve previously recovered backup metadata (if available) dest_info_txt = recovery_info["cmd"].get_file_content( os.path.join(dest, ".barman-recover.info") ) dest_info = LocalBackupInfo( self.server, info_file=BytesIO(dest_info_txt.encode("utf-8")) ) dest_begin_time = dest_info.begin_time # Pick the earlier begin time. Both are tz-aware timestamps because # BackupInfo class ensure it safe_horizon = min(backup_begin_time, dest_begin_time) output.info( "Using safe horizon time for smart rsync copy: %s", safe_horizon ) except FsOperationFailed as e: # Setting safe_horizon to None will effectively disable # the time-based part of smart_copy method. However it is still # faster than running all the transfers with checksum enabled. # # FsOperationFailed means the .barman-recover.info is not available # on destination directory safe_horizon = None _logger.warning( "Unable to retrieve safe horizon time for smart rsync copy: %s", e ) except Exception as e: # Same as above, but something failed decoding .barman-recover.info # or comparing times, so log the full traceback safe_horizon = None _logger.exception( "Error retrieving safe horizon time for smart rsync copy: %s", e ) recovery_info["safe_horizon"] = safe_horizon def _prepare_tablespaces(self, backup_info, cmd, dest, tablespaces): """ Prepare the directory structure for required tablespaces, taking care of tablespaces relocation, if requested. :param barman.infofile.LocalBackupInfo backup_info: backup representation :param barman.fs.UnixLocalCommand cmd: Object for filesystem interaction :param str dest: destination dir for the recovery :param dict tablespaces: dict of all the tablespaces and their location """ tblspc_dir = os.path.join(dest, "pg_tblspc") try: # check for pg_tblspc dir into recovery destination folder. # if it does not exists, create it cmd.create_dir_if_not_exists(tblspc_dir) except FsOperationFailed as e: output.error( "unable to initialise tablespace directory '%s': %s", tblspc_dir, e ) output.close_and_exit() for item in backup_info.tablespaces: # build the filename of the link under pg_tblspc directory pg_tblspc_file = os.path.join(tblspc_dir, str(item.oid)) # by default a tablespace goes in the same location where # it was on the source server when the backup was taken location = item.location # if a relocation has been requested for this tablespace, # use the target directory provided by the user if tablespaces and item.name in tablespaces: location = tablespaces[item.name] try: # remove the current link in pg_tblspc, if it exists cmd.delete_if_exists(pg_tblspc_file) # create tablespace location, if does not exist # (raise an exception if it is not possible) cmd.create_dir_if_not_exists(location) # check for write permissions on destination directory cmd.check_write_permission(location) # create symlink between tablespace and recovery folder cmd.create_symbolic_link(location, pg_tblspc_file) except FsOperationFailed as e: output.error( "unable to prepare '%s' tablespace (destination '%s'): %s", item.name, location, e, ) output.close_and_exit() output.info("\t%s, %s, %s", item.oid, item.name, location) def _backup_copy( self, backup_info, dest, tablespaces=None, remote_command=None, safe_horizon=None, ): """ Perform the actual copy of the base backup for recovery purposes First, it copies one tablespace at a time, then the PGDATA directory. Bandwidth limitation, according to configuration, is applied in the process. TODO: manage configuration files if outside PGDATA. :param barman.infofile.LocalBackupInfo backup_info: the backup to recover :param str dest: the destination directory :param dict[str,str]|None tablespaces: a tablespace name -> location map (for relocation) :param str|None remote_command: default None. The remote command to recover the base backup, in case of remote backup. :param datetime.datetime|None safe_horizon: anything after this time has to be checked with checksum """ # Set a ':' prefix to remote destinations dest_prefix = "" if remote_command: dest_prefix = ":" # Create the copy controller object, specific for rsync, # which will drive all the copy operations. Items to be # copied are added before executing the copy() method controller = RsyncCopyController( path=self.server.path, ssh_command=remote_command, network_compression=self.config.network_compression, safe_horizon=safe_horizon, retry_times=self.config.basebackup_retry_times, retry_sleep=self.config.basebackup_retry_sleep, workers=self.config.parallel_jobs, ) # Dictionary for paths to be excluded from rsync exclude_and_protect = [] # Process every tablespace if backup_info.tablespaces: for tablespace in backup_info.tablespaces: # By default a tablespace goes in the same location where # it was on the source server when the backup was taken location = tablespace.location # If a relocation has been requested for this tablespace # use the user provided target directory if tablespaces and tablespace.name in tablespaces: location = tablespaces[tablespace.name] # If the tablespace location is inside the data directory, # exclude and protect it from being deleted during # the data directory copy if location.startswith(dest): exclude_and_protect += [location[len(dest) :]] # Exclude and protect the tablespace from being deleted during # the data directory copy exclude_and_protect.append("/pg_tblspc/%s" % tablespace.oid) # Add the tablespace directory to the list of objects # to be copied by the controller controller.add_directory( label=tablespace.name, src="%s/" % backup_info.get_data_directory(tablespace.oid), dst=dest_prefix + location, bwlimit=self.config.get_bwlimit(tablespace), item_class=controller.TABLESPACE_CLASS, ) # Add the PGDATA directory to the list of objects to be copied # by the controller controller.add_directory( label="pgdata", src="%s/" % backup_info.get_data_directory(), dst=dest_prefix + dest, bwlimit=self.config.get_bwlimit(), exclude=[ "/pg_log/*", "/log/*", "/pg_xlog/*", "/pg_wal/*", "/postmaster.pid", "/recovery.conf", "/tablespace_map", ], exclude_and_protect=exclude_and_protect, item_class=controller.PGDATA_CLASS, ) # TODO: Manage different location for configuration files # TODO: that were not within the data directory # Execute the copy try: controller.copy() # TODO: Improve the exception output except CommandFailedException as e: msg = "data transfer failure" raise DataTransferFailure.from_command_error("rsync", e, msg) def _xlog_copy(self, required_xlog_files, wal_dest, remote_command): """ Restore WAL segments :param required_xlog_files: list of all required WAL files :param wal_dest: the destination directory for xlog recover :param remote_command: default None. The remote command to recover the xlog, in case of remote backup. """ # List of required WAL files partitioned by containing directory xlogs = collections.defaultdict(list) # add '/' suffix to ensure it is a directory wal_dest = "%s/" % wal_dest # Map of every compressor used with any WAL file in the archive, # to be used during this recovery compressors = {} compression_manager = self.backup_manager.compression_manager # Fill xlogs and compressors maps from required_xlog_files for wal_info in required_xlog_files: hashdir = xlog.hash_dir(wal_info.name) xlogs[hashdir].append(wal_info) # If a compressor is required, make sure it exists in the cache if ( wal_info.compression is not None and wal_info.compression not in compressors ): compressors[wal_info.compression] = compression_manager.get_compressor( compression=wal_info.compression ) rsync = RsyncPgData( path=self.server.path, ssh=remote_command, bwlimit=self.config.bandwidth_limit, network_compression=self.config.network_compression, ) # If compression is used and this is a remote recovery, we need a # temporary directory where to spool uncompressed files, # otherwise we either decompress every WAL file in the local # destination, or we ship the uncompressed file remotely if compressors: if remote_command: # Decompress to a temporary spool directory wal_decompression_dest = tempfile.mkdtemp(prefix="barman_wal-") else: # Decompress directly to the destination directory wal_decompression_dest = wal_dest # Make sure wal_decompression_dest exists mkpath(wal_decompression_dest) else: # If no compression wal_decompression_dest = None if remote_command: # If remote recovery tell rsync to copy them remotely # add ':' prefix to mark it as remote wal_dest = ":%s" % wal_dest total_wals = sum(map(len, xlogs.values())) partial_count = 0 for prefix in sorted(xlogs): batch_len = len(xlogs[prefix]) partial_count += batch_len source_dir = os.path.join(self.config.wals_directory, prefix) _logger.info( "Starting copy of %s WAL files %s/%s from %s to %s", batch_len, partial_count, total_wals, xlogs[prefix][0], xlogs[prefix][-1], ) # If at least one compressed file has been found, activate # compression check and decompression for each WAL files if compressors: for segment in xlogs[prefix]: dst_file = os.path.join(wal_decompression_dest, segment.name) if segment.compression is not None: compressors[segment.compression].decompress( os.path.join(source_dir, segment.name), dst_file ) else: shutil.copy2(os.path.join(source_dir, segment.name), dst_file) if remote_command: try: # Transfer the WAL files rsync.from_file_list( list(segment.name for segment in xlogs[prefix]), wal_decompression_dest, wal_dest, ) except CommandFailedException as e: msg = ( "data transfer failure while copying WAL files " "to directory '%s'" ) % (wal_dest[1:],) raise DataTransferFailure.from_command_error("rsync", e, msg) # Cleanup files after the transfer for segment in xlogs[prefix]: file_name = os.path.join(wal_decompression_dest, segment.name) try: os.unlink(file_name) except OSError as e: output.warning( "Error removing temporary file '%s': %s", file_name, e ) else: try: rsync.from_file_list( list(segment.name for segment in xlogs[prefix]), "%s/" % os.path.join(self.config.wals_directory, prefix), wal_dest, ) except CommandFailedException as e: msg = ( "data transfer failure while copying WAL files " "to directory '%s'" % (wal_dest[1:],) ) raise DataTransferFailure.from_command_error("rsync", e, msg) _logger.info("Finished copying %s WAL files.", total_wals) # Remove local decompression target directory if different from the # destination directory (it happens when compression is in use during a # remote recovery if wal_decompression_dest and wal_decompression_dest != wal_dest: shutil.rmtree(wal_decompression_dest) def _generate_archive_status( self, recovery_info, remote_command, required_xlog_files ): """ Populate the archive_status directory :param dict recovery_info: Dictionary containing all the recovery parameters :param str remote_command: ssh command for remote connection :param tuple required_xlog_files: list of required WAL segments """ if remote_command: status_dir = recovery_info["tempdir"] else: status_dir = os.path.join(recovery_info["wal_dest"], "archive_status") mkpath(status_dir) for wal_info in required_xlog_files: with open(os.path.join(status_dir, "%s.done" % wal_info.name), "a") as f: f.write("") if remote_command: try: recovery_info["rsync"]( "%s/" % status_dir, ":%s" % os.path.join(recovery_info["wal_dest"], "archive_status"), ) except CommandFailedException as e: output.error("unable to populate archive_status directory: %s", e) output.close_and_exit() def _generate_recovery_conf( self, recovery_info, backup_info, dest, immediate, exclusive, remote_command, target_name, target_time, target_tli, target_xid, target_lsn, standby_mode, ): """ Generate recovery configuration for PITR :param dict recovery_info: Dictionary containing all the recovery parameters :param barman.infofile.LocalBackupInfo backup_info: representation of a backup :param str dest: destination directory of the recovery :param bool|None immediate: end recovery as soon as consistency is reached :param boolean exclusive: exclusive backup or concurrent :param str remote_command: ssh command for remote connection :param str target_name: recovery target name for PITR :param str target_time: recovery target time for PITR :param str target_tli: recovery target timeline for PITR :param str target_xid: recovery target transaction id for PITR :param str target_lsn: recovery target LSN for PITR :param bool|None standby_mode: standby mode """ recovery_conf_lines = [] # If GET_WAL has been set, use the get-wal command to retrieve the # required wal files. Otherwise use the unix command "cp" to copy # them from the barman_wal directory if recovery_info["get_wal"]: partial_option = "" if not standby_mode: partial_option = "-P" # We need to create the right restore command. # If we are doing a remote recovery, # the barman-cli package is REQUIRED on the server that is hosting # the PostgreSQL server. # We use the machine FQDN and the barman_user # setting to call the barman-wal-restore correctly. # If local recovery, we use barman directly, assuming # the postgres process will be executed with the barman user. # It MUST to be reviewed by the user in any case. if remote_command: fqdn = socket.getfqdn() recovery_conf_lines.append( "# The 'barman-wal-restore' command " "is provided in the 'barman-cli' package" ) recovery_conf_lines.append( "restore_command = 'barman-wal-restore %s -U %s " "%s %s %%f %%p'" % (partial_option, self.config.config.user, fqdn, self.config.name) ) else: recovery_conf_lines.append( "# The 'barman get-wal' command " "must run as '%s' user" % self.config.config.user ) recovery_conf_lines.append( "restore_command = 'sudo -u %s " "barman get-wal %s %s %%f > %%p'" % (self.config.config.user, partial_option, self.config.name) ) recovery_info["results"]["get_wal"] = True else: recovery_conf_lines.append("restore_command = 'cp barman_wal/%f %p'") if backup_info.version >= 80400 and not recovery_info["get_wal"]: recovery_conf_lines.append("recovery_end_command = 'rm -fr barman_wal'") # Writes recovery target if target_time: recovery_conf_lines.append("recovery_target_time = '%s'" % target_time) if target_xid: recovery_conf_lines.append("recovery_target_xid = '%s'" % target_xid) if target_lsn: recovery_conf_lines.append("recovery_target_lsn = '%s'" % target_lsn) if target_name: recovery_conf_lines.append("recovery_target_name = '%s'" % target_name) # TODO: log a warning if PostgreSQL < 9.4 and --immediate if backup_info.version >= 90400 and immediate: recovery_conf_lines.append("recovery_target = 'immediate'") # Manage what happens after recovery target is reached if (target_xid or target_time or target_lsn) and exclusive: recovery_conf_lines.append( "recovery_target_inclusive = '%s'" % (not exclusive) ) if target_tli: recovery_conf_lines.append("recovery_target_timeline = %s" % target_tli) # Write recovery target action if "pause_at_recovery_target" in recovery_info: recovery_conf_lines.append( "pause_at_recovery_target = '%s'" % recovery_info["pause_at_recovery_target"] ) if "recovery_target_action" in recovery_info: recovery_conf_lines.append( "recovery_target_action = '%s'" % recovery_info["recovery_target_action"] ) # Set the standby mode if backup_info.version >= 120000: signal_file = "recovery.signal" if standby_mode: signal_file = "standby.signal" if remote_command: recovery_file = os.path.join(recovery_info["tempdir"], signal_file) else: recovery_file = os.path.join(dest, signal_file) open(recovery_file, "ab").close() recovery_info["auto_conf_append_lines"] = recovery_conf_lines else: if standby_mode: recovery_conf_lines.append("standby_mode = 'on'") if remote_command: recovery_file = os.path.join(recovery_info["tempdir"], "recovery.conf") else: recovery_file = os.path.join(dest, "recovery.conf") with open(recovery_file, "wb") as recovery: recovery.write(("\n".join(recovery_conf_lines) + "\n").encode("utf-8")) if remote_command: plain_rsync = RsyncPgData( path=self.server.path, ssh=remote_command, bwlimit=self.config.bandwidth_limit, network_compression=self.config.network_compression, ) try: plain_rsync.from_file_list( [os.path.basename(recovery_file)], recovery_info["tempdir"], ":%s" % dest, ) except CommandFailedException as e: output.error( "remote copy of %s failed: %s", os.path.basename(recovery_file), e ) output.close_and_exit() def _map_temporary_config_files(self, recovery_info, backup_info, remote_command): """ Map configuration files, by filling the 'temporary_configuration_files' array, depending on remote or local recovery. This array will be used by the subsequent methods of the class. :param dict recovery_info: Dictionary containing all the recovery parameters :param barman.infofile.LocalBackupInfo backup_info: a backup representation :param str remote_command: ssh command for remote recovery """ # Cycle over postgres configuration files which my be missing. # If a file is missing, we will be unable to restore it and # we will warn the user. # This can happen if we are using pg_basebackup and # a configuration file is located outside the data dir. # This is not an error condition, so we check also for # `pg_ident.conf` which is an optional file. hardcoded_files = ["pg_hba.conf", "pg_ident.conf"] conf_files = recovery_info["configuration_files"] + hardcoded_files for conf_file in conf_files: source_path = os.path.join(backup_info.get_data_directory(), conf_file) if not os.path.exists(source_path): recovery_info["results"]["missing_files"].append(conf_file) # Remove the file from the list of configuration files if conf_file in recovery_info["configuration_files"]: recovery_info["configuration_files"].remove(conf_file) for conf_file in recovery_info["configuration_files"]: if remote_command: # If the recovery is remote, copy the postgresql.conf # file in a temp dir # Otherwise we can modify the postgresql.conf file # in the destination directory. conf_file_path = os.path.join(recovery_info["tempdir"], conf_file) shutil.copy2( os.path.join(backup_info.get_data_directory(), conf_file), conf_file_path, ) else: # Otherwise use the local destination path. conf_file_path = os.path.join( recovery_info["destination_path"], conf_file ) recovery_info["temporary_configuration_files"].append(conf_file_path) if backup_info.version >= 120000: # Make sure 'postgresql.auto.conf' file exists in # recovery_info['temporary_configuration_files'] because # the recovery settings will end up there conf_file = "postgresql.auto.conf" if conf_file not in recovery_info["configuration_files"]: if remote_command: conf_file_path = os.path.join(recovery_info["tempdir"], conf_file) else: conf_file_path = os.path.join( recovery_info["destination_path"], conf_file ) # Touch the file into existence open(conf_file_path, "ab").close() recovery_info["temporary_configuration_files"].append(conf_file_path) def _analyse_temporary_config_files(self, recovery_info): """ Analyse temporary configuration files and identify dangerous options Mark all the dangerous options for the user to review. This procedure also changes harmful options such as 'archive_command'. :param dict recovery_info: dictionary holding all recovery parameters """ results = recovery_info["results"] # Check for dangerous options inside every config file for conf_file in recovery_info["temporary_configuration_files"]: append_lines = None if conf_file.endswith("postgresql.auto.conf"): append_lines = recovery_info.get("auto_conf_append_lines") # Identify and comment out dangerous options, replacing them with # the appropriate values results["changes"] += self._pg_config_mangle( conf_file, self.MANGLE_OPTIONS, "%s.origin" % conf_file, append_lines ) # Identify dangerous options and warn users about their presence results["warnings"] += self._pg_config_detect_possible_issues(conf_file) def _copy_temporary_config_files(self, dest, remote_command, recovery_info): """ Copy modified configuration files using rsync in case of remote recovery :param str dest: destination directory of the recovery :param str remote_command: ssh command for remote connection :param dict recovery_info: Dictionary containing all the recovery parameters """ if remote_command: # If this is a remote recovery, rsync the modified files from the # temporary local directory to the remote destination directory. file_list = [] for conf_file in recovery_info["configuration_files"]: file_list.append("%s" % conf_file) file_list.append("%s.origin" % conf_file) try: recovery_info["rsync"].from_file_list( file_list, recovery_info["tempdir"], ":%s" % dest ) except CommandFailedException as e: output.error("remote copy of configuration files failed: %s", e) output.close_and_exit() def close(self): """ Cleanup operations for a recovery """ # Remove the temporary directories for temp_dir in self.temp_dirs: shutil.rmtree(temp_dir, ignore_errors=True) self.temp_dirs = [] def _pg_config_mangle( self, filename, settings, backup_filename=None, append_lines=None ): """ This method modifies the given PostgreSQL configuration file, commenting out the given settings, and adding the ones generated by Barman. If backup_filename is passed, keep a backup copy. :param filename: the PostgreSQL configuration file :param settings: dictionary of settings to be mangled :param backup_filename: config file backup copy. Default is None. """ # Read the full content of the file in memory with open(filename, "rb") as f: content = f.readlines() # Rename the original file to backup_filename or to a temporary name # if backup_filename is missing. We need to keep it to preserve # permissions. if backup_filename: orig_filename = backup_filename else: orig_filename = "%s.config_mangle.old" % filename shutil.move(filename, orig_filename) # Write the mangled content mangled = [] with open(filename, "wb") as f: last_line = None for l_number, line in enumerate(content): rm = PG_CONF_SETTING_RE.match(line.decode("utf-8")) if rm: key = rm.group(1) if key in settings: value = settings[key] f.write("#BARMAN#".encode("utf-8") + line) # If value is None, simply comment the old line if value is not None: changes = "%s = %s\n" % (key, value) f.write(changes.encode("utf-8")) mangled.append( Assertion._make( [os.path.basename(f.name), l_number, key, value] ) ) continue last_line = line f.write(line) # Append content of append_lines array if append_lines: # Ensure we have end of line character at the end of the file before adding new lines if last_line and last_line[-1] != "\n".encode("utf-8"): f.write("\n".encode("utf-8")) f.write(("\n".join(append_lines) + "\n").encode("utf-8")) # Restore original permissions shutil.copymode(orig_filename, filename) # If a backup copy of the file is not requested, # unlink the orig file if not backup_filename: os.unlink(orig_filename) return mangled def _pg_config_detect_possible_issues(self, filename): """ This method looks for any possible issue with PostgreSQL location options such as data_directory, config_file, etc. It returns a dictionary with the dangerous options that have been found. :param filename: the Postgres configuration file """ clashes = [] with open(filename) as f: content = f.readlines() # Read line by line and identify dangerous options for l_number, line in enumerate(content): rm = PG_CONF_SETTING_RE.match(line) if rm: key = rm.group(1) if key in self.DANGEROUS_OPTIONS: clashes.append( Assertion._make( [os.path.basename(f.name), l_number, key, rm.group(2)] ) ) return clashes barman-2.18/barman/copy_controller.py0000644000621200062120000013244714172556763016142 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ Copy controller module A copy controller will handle the copy between a series of files and directory, and their final destination. """ import collections import datetime import logging import os.path import re import shutil import signal import tempfile from functools import partial from multiprocessing import Lock, Pool import dateutil.tz from barman.command_wrappers import RsyncPgData from barman.exceptions import CommandFailedException, RsyncListFilesFailure from barman.utils import human_readable_timedelta, total_seconds _logger = logging.getLogger(__name__) _worker_callable = None """ Global variable containing a callable used to execute the jobs. Initialized by `_init_worker` and used by `_run_worker` function. This variable must be None outside a multiprocessing worker Process. """ # Parallel copy bucket size (10GB) BUCKET_SIZE = 1024 * 1024 * 1024 * 10 def _init_worker(func): """ Store the callable used to execute jobs passed to `_run_worker` function :param callable func: the callable to invoke for every job """ global _worker_callable _worker_callable = func def _run_worker(job): """ Execute a job using the callable set using `_init_worker` function :param _RsyncJob job: the job to be executed """ global _worker_callable assert ( _worker_callable is not None ), "Worker has not been initialized with `_init_worker`" # This is the entrypoint of the worker process. Since the KeyboardInterrupt # exceptions is handled by the main process, let's forget about Ctrl-C # here. # When the parent process will receive a KeyboardInterrupt, it will ask # the pool to terminate its workers and then terminate itself. signal.signal(signal.SIGINT, signal.SIG_IGN) return _worker_callable(job) class _RsyncJob(object): """ A job to be executed by a worker Process """ def __init__(self, item_idx, description, id=None, file_list=None, checksum=None): """ :param int item_idx: The index of copy item containing this job :param str description: The description of the job, used for logging :param int id: Job ID (as in bucket) :param list[RsyncCopyController._FileItem] file_list: Path to the file containing the file list :param bool checksum: Whether to force the checksum verification """ self.id = id self.item_idx = item_idx self.description = description self.file_list = file_list self.checksum = checksum # Statistics self.copy_start_time = None self.copy_end_time = None class _FileItem(collections.namedtuple("_FileItem", "mode size date path")): """ This named tuple is used to store the content each line of the output of a "rsync --list-only" call """ class _RsyncCopyItem(object): """ Internal data object that contains the information about one of the items that have to be copied during a RsyncCopyController run. """ def __init__( self, label, src, dst, exclude=None, exclude_and_protect=None, include=None, is_directory=False, bwlimit=None, reuse=None, item_class=None, optional=False, ): """ The "label" parameter is meant to be used for error messages and logging. If "src" or "dst" content begin with a ':' character, it is a remote path. Only local paths are supported in "reuse" argument. If "reuse" parameter is provided and is not None, it is used to implement the incremental copy. This only works if "is_directory" is True :param str label: a symbolic name for this item :param str src: source directory. :param str dst: destination directory. :param list[str] exclude: list of patterns to be excluded from the copy. The destination will be deleted if present. :param list[str] exclude_and_protect: list of patterns to be excluded from the copy. The destination will be preserved if present. :param list[str] include: list of patterns to be included in the copy even if excluded. :param bool is_directory: Whether the item points to a directory. :param bwlimit: bandwidth limit to be enforced. (KiB) :param str|None reuse: the reference path for incremental mode. :param str|None item_class: If specified carries a meta information about what the object to be copied is. :param bool optional: Whether a failure copying this object should be treated as a fatal failure. This only works if "is_directory" is False """ self.label = label self.src = src self.dst = dst self.exclude = exclude self.exclude_and_protect = exclude_and_protect self.include = include self.is_directory = is_directory self.bwlimit = bwlimit self.reuse = reuse self.item_class = item_class self.optional = optional # Attributes that will e filled during the analysis self.temp_dir = None self.dir_file = None self.exclude_and_protect_file = None self.safe_list = None self.check_list = None # Statistics self.analysis_start_time = None self.analysis_end_time = None # Ensure that the user specified the item class, since it is mandatory # to correctly handle the item assert self.item_class def __str__(self): # Prepare strings for messages formatted_class = self.item_class formatted_name = self.src if self.src.startswith(":"): formatted_class = "remote " + self.item_class formatted_name = self.src[1:] formatted_class += " directory" if self.is_directory else " file" # Log the operation that is being executed if self.item_class in ( RsyncCopyController.PGDATA_CLASS, RsyncCopyController.PGCONTROL_CLASS, ): return "%s: %s" % (formatted_class, formatted_name) else: return "%s '%s': %s" % (formatted_class, self.label, formatted_name) class RsyncCopyController(object): """ Copy a list of files and directory to their final destination. """ # Constants to be used as "item_class" values PGDATA_CLASS = "PGDATA" TABLESPACE_CLASS = "tablespace" PGCONTROL_CLASS = "pg_control" CONFIG_CLASS = "config" # This regular expression is used to parse each line of the output # of a "rsync --list-only" call. This regexp has been tested with any known # version of upstream rsync that is supported (>= 3.0.4) LIST_ONLY_RE = re.compile( r""" ^ # start of the line # capture the mode (es. "-rw-------") (?P[-\w]+) \s+ # size is an integer (?P\d+) \s+ # The date field can have two different form (?P # "2014/06/05 18:00:00" if the sending rsync is compiled # with HAVE_STRFTIME [\d/]+\s+[\d:]+ | # "Thu Jun 5 18:00:00 2014" otherwise \w+\s+\w+\s+\d+\s+[\d:]+\s+\d+ ) \s+ # all the remaining characters are part of filename (?P.+) $ # end of the line """, re.VERBOSE, ) # This regular expression is used to ignore error messages regarding # vanished files that are not really an error. It is used because # in some cases rsync reports it with exit code 23 which could also mean # a fatal error VANISHED_RE = re.compile( r""" ^ # start of the line ( # files which vanished before rsync start rsync:\ link_stat\ ".+"\ failed:\ No\ such\ file\ or\ directory\ \(2\) | # files which vanished after rsync start file\ has\ vanished:\ ".+" | # files which have been truncated during transfer rsync:\ read\ errors\ mapping\ ".+":\ No\ data\ available\ \(61\) | # final summary rsync\ error:\ .* \(code\ 23\)\ at\ main\.c\(\d+\) \ \[(generator|receiver|sender)=[^\]]+\] ) $ # end of the line """, re.VERBOSE + re.IGNORECASE, ) def __init__( self, path=None, ssh_command=None, ssh_options=None, network_compression=False, reuse_backup=None, safe_horizon=None, exclude=None, retry_times=0, retry_sleep=0, workers=1, ): """ :param str|None path: the PATH where rsync executable will be searched :param str|None ssh_command: the ssh executable to be used to access remote paths :param list[str]|None ssh_options: list of ssh options to be used to access remote paths :param boolean network_compression: whether to use the network compression :param str|None reuse_backup: if "link" or "copy" enables the incremental copy feature :param datetime.datetime|None safe_horizon: if set, assumes that every files older than it are save to copy without checksum verification. :param list[str]|None exclude: list of patterns to be excluded from the copy :param int retry_times: The number of times to retry a failed operation :param int retry_sleep: Sleep time between two retry :param int workers: The number of parallel copy workers """ super(RsyncCopyController, self).__init__() self.path = path self.ssh_command = ssh_command self.ssh_options = ssh_options self.network_compression = network_compression self.reuse_backup = reuse_backup self.safe_horizon = safe_horizon self.exclude = exclude self.retry_times = retry_times self.retry_sleep = retry_sleep self.workers = workers self._logger_lock = Lock() # Assume we are running with a recent rsync (>= 3.1) self.rsync_has_ignore_missing_args = True self.item_list = [] """List of items to be copied""" self.rsync_cache = {} """A cache of RsyncPgData objects""" # Attributes used for progress reporting self.total_steps = None """Total number of steps""" self.current_step = None """Current step number""" self.temp_dir = None """Temp dir used to store the status during the copy""" # Statistics self.jobs_done = None """Already finished jobs list""" self.copy_start_time = None """Copy start time""" self.copy_end_time = None """Copy end time""" def add_directory( self, label, src, dst, exclude=None, exclude_and_protect=None, include=None, bwlimit=None, reuse=None, item_class=None, ): """ Add a directory that we want to copy. If "src" or "dst" content begin with a ':' character, it is a remote path. Only local paths are supported in "reuse" argument. If "reuse" parameter is provided and is not None, it is used to implement the incremental copy. This only works if "is_directory" is True :param str label: symbolic name to be used for error messages and logging. :param str src: source directory. :param str dst: destination directory. :param list[str] exclude: list of patterns to be excluded from the copy. The destination will be deleted if present. :param list[str] exclude_and_protect: list of patterns to be excluded from the copy. The destination will be preserved if present. :param list[str] include: list of patterns to be included in the copy even if excluded. :param bwlimit: bandwidth limit to be enforced. (KiB) :param str|None reuse: the reference path for incremental mode. :param str item_class: If specified carries a meta information about what the object to be copied is. """ self.item_list.append( _RsyncCopyItem( label=label, src=src, dst=dst, is_directory=True, bwlimit=bwlimit, reuse=reuse, item_class=item_class, optional=False, exclude=exclude, exclude_and_protect=exclude_and_protect, include=include, ) ) def add_file(self, label, src, dst, item_class=None, optional=False): """ Add a file that we want to copy :param str label: symbolic name to be used for error messages and logging. :param str src: source directory. :param str dst: destination directory. :param str item_class: If specified carries a meta information about what the object to be copied is. :param bool optional: Whether a failure copying this object should be treated as a fatal failure. """ self.item_list.append( _RsyncCopyItem( label=label, src=src, dst=dst, is_directory=False, bwlimit=None, reuse=None, item_class=item_class, optional=optional, ) ) def _rsync_factory(self, item): """ Build the RsyncPgData object required for copying the provided item :param _RsyncCopyItem item: information about a copy operation :rtype: RsyncPgData """ # If the object already exists, use it if item in self.rsync_cache: return self.rsync_cache[item] # Prepare the command arguments args = self._reuse_args(item.reuse) # Merge the global exclude with the one into the item object if self.exclude and item.exclude: exclude = self.exclude + item.exclude else: exclude = self.exclude or item.exclude # Using `--ignore-missing-args` could fail in case # the local or the remote rsync is older than 3.1. # In that case we expect that during the analyze phase # we get an error. The analyze code must catch that error # and retry after flushing the rsync cache. if self.rsync_has_ignore_missing_args: args.append("--ignore-missing-args") # TODO: remove debug output or use it to progress tracking # By adding a double '--itemize-changes' option, the rsync # output will contain the full list of files that have been # touched, even those that have not changed args.append("--itemize-changes") args.append("--itemize-changes") # Build the rsync object that will execute the copy rsync = RsyncPgData( path=self.path, ssh=self.ssh_command, ssh_options=self.ssh_options, args=args, bwlimit=item.bwlimit, network_compression=self.network_compression, exclude=exclude, exclude_and_protect=item.exclude_and_protect, include=item.include, retry_times=self.retry_times, retry_sleep=self.retry_sleep, retry_handler=partial(self._retry_handler, item), ) self.rsync_cache[item] = rsync return rsync def _rsync_set_pre_31_mode(self): """ Stop using `--ignore-missing-args` and restore rsync < 3.1 compatibility """ _logger.info( "Detected rsync version less than 3.1. " "top using '--ignore-missing-args' argument." ) self.rsync_has_ignore_missing_args = False self.rsync_cache.clear() def copy(self): """ Execute the actual copy """ # Store the start time self.copy_start_time = datetime.datetime.now() # Create a temporary directory to hold the file lists. self.temp_dir = tempfile.mkdtemp(suffix="", prefix="barman-") # The following try block is to make sure the temporary directory # will be removed on exit and all the pool workers # have been terminated. pool = None try: # Initialize the counters used by progress reporting self._progress_init() _logger.info("Copy started (safe before %r)", self.safe_horizon) # Execute some preliminary steps for each item to be copied for item in self.item_list: # The initial preparation is necessary only for directories if not item.is_directory: continue # Store the analysis start time item.analysis_start_time = datetime.datetime.now() # Analyze the source and destination directory content _logger.info(self._progress_message("[global] analyze %s" % item)) self._analyze_directory(item) # Prepare the target directories, removing any unneeded file _logger.info( self._progress_message( "[global] create destination directories and delete " "unknown files for %s" % item ) ) self._create_dir_and_purge(item) # Store the analysis end time item.analysis_end_time = datetime.datetime.now() # Init the list of jobs done. Every job will be added to this list # once finished. The content will be used to calculate statistics # about the copy process. self.jobs_done = [] # The jobs are executed using a parallel processes pool # Each job is generated by `self._job_generator`, it is executed by # `_run_worker` using `self._execute_job`, which has been set # calling `_init_worker` function during the Pool initialization. pool = Pool( processes=self.workers, initializer=_init_worker, initargs=(self._execute_job,), ) for job in pool.imap_unordered( _run_worker, self._job_generator(exclude_classes=[self.PGCONTROL_CLASS]) ): # Store the finished job for further analysis self.jobs_done.append(job) # The PGCONTROL_CLASS items must always be copied last for job in pool.imap_unordered( _run_worker, self._job_generator(include_classes=[self.PGCONTROL_CLASS]) ): # Store the finished job for further analysis self.jobs_done.append(job) except KeyboardInterrupt: _logger.info( "Copy interrupted by the user (safe before %s)", self.safe_horizon ) raise except BaseException: _logger.info("Copy failed (safe before %s)", self.safe_horizon) raise else: _logger.info("Copy finished (safe before %s)", self.safe_horizon) finally: # The parent process may have finished naturally or have been # interrupted by an exception (i.e. due to a copy error or # the user pressing Ctrl-C). # At this point we must make sure that all the workers have been # correctly terminated before continuing. if pool: pool.terminate() pool.join() # Clean up the temp dir, any exception raised here is logged # and discarded to not clobber an eventual exception being handled. try: shutil.rmtree(self.temp_dir) except EnvironmentError as e: _logger.error("Error cleaning up '%s' (%s)", self.temp_dir, e) self.temp_dir = None # Store the end time self.copy_end_time = datetime.datetime.now() def _job_generator(self, include_classes=None, exclude_classes=None): """ Generate the jobs to be executed by the workers :param list[str]|None include_classes: If not none, copy only the items which have one of the specified classes. :param list[str]|None exclude_classes: If not none, skip all items which have one of the specified classes. :rtype: iter[_RsyncJob] """ for item_idx, item in enumerate(self.item_list): # Skip items of classes which are not required if include_classes and item.item_class not in include_classes: continue if exclude_classes and item.item_class in exclude_classes: continue # If the item is a directory then copy it in two stages, # otherwise copy it using a plain rsync if item.is_directory: # Copy the safe files using the default rsync algorithm msg = self._progress_message("[%%s] %%s copy safe files from %s" % item) phase_skipped = True for i, bucket in enumerate(self._fill_buckets(item.safe_list)): phase_skipped = False yield _RsyncJob( item_idx, id=i, description=msg, file_list=bucket, checksum=False, ) if phase_skipped: _logger.info(msg, "global", "skipping") # Copy the check files forcing rsync to verify the checksum msg = self._progress_message( "[%%s] %%s copy files with checksum from %s" % item ) phase_skipped = True for i, bucket in enumerate(self._fill_buckets(item.check_list)): phase_skipped = False yield _RsyncJob( item_idx, id=i, description=msg, file_list=bucket, checksum=True ) if phase_skipped: _logger.info(msg, "global", "skipping") else: # Copy the file using plain rsync msg = self._progress_message("[%%s] %%s copy %s" % item) yield _RsyncJob(item_idx, description=msg) def _fill_buckets(self, file_list): """ Generate buckets for parallel copy :param list[_FileItem] file_list: list of file to transfer :rtype: iter[list[_FileItem]] """ # If there is only one worker, fall back to copying all file at once if self.workers < 2: yield file_list return # Create `self.workers` buckets buckets = [[] for _ in range(self.workers)] bucket_sizes = [0 for _ in range(self.workers)] pos = -1 # Sort the list by size for entry in sorted(file_list, key=lambda item: item.size): # Try to fill the file in a bucket for i in range(self.workers): pos = (pos + 1) % self.workers new_size = bucket_sizes[pos] + entry.size if new_size < BUCKET_SIZE: bucket_sizes[pos] = new_size buckets[pos].append(entry) break else: # All the buckets are filled, so return them all for i in range(self.workers): if len(buckets[i]) > 0: yield buckets[i] # Clear the bucket buckets[i] = [] bucket_sizes[i] = 0 # Put the current file in the first bucket bucket_sizes[0] = entry.size buckets[0].append(entry) pos = 0 # Send all the remaining buckets for i in range(self.workers): if len(buckets[i]) > 0: yield buckets[i] def _execute_job(self, job): """ Execute a `_RsyncJob` in a worker process :type job: _RsyncJob """ item = self.item_list[job.item_idx] if job.id is not None: bucket = "bucket %s" % job.id else: bucket = "global" # Build the rsync object required for the copy rsync = self._rsync_factory(item) # Store the start time job.copy_start_time = datetime.datetime.now() # Write in the log that the job is starting with self._logger_lock: _logger.info(job.description, bucket, "starting") if item.is_directory: # A directory item must always have checksum and file_list set assert ( job.file_list is not None ), "A directory item must not have a None `file_list` attribute" assert ( job.checksum is not None ), "A directory item must not have a None `checksum` attribute" # Generate a unique name for the file containing the list of files file_list_path = os.path.join( self.temp_dir, "%s_%s_%s.list" % (item.label, "check" if job.checksum else "safe", os.getpid()), ) # Write the list, one path per line with open(file_list_path, "w") as file_list: for entry in job.file_list: assert isinstance(entry, _FileItem), ( "expect %r to be a _FileItem" % entry ) file_list.write(entry.path + "\n") self._copy( rsync, item.src, item.dst, file_list=file_list_path, checksum=job.checksum, ) else: # A file must never have checksum and file_list set assert ( job.file_list is None ), "A file item must have a None `file_list` attribute" assert ( job.checksum is None ), "A file item must have a None `checksum` attribute" rsync(item.src, item.dst, allowed_retval=(0, 23, 24)) if rsync.ret == 23: if item.optional: _logger.warning("Ignoring error reading %s", item) else: raise CommandFailedException( dict(ret=rsync.ret, out=rsync.out, err=rsync.err) ) # Store the stop time job.copy_end_time = datetime.datetime.now() # Write in the log that the job is finished with self._logger_lock: _logger.info( job.description, bucket, "finished (duration: %s)" % human_readable_timedelta(job.copy_end_time - job.copy_start_time), ) # Return the job to the caller, for statistics purpose return job def _progress_init(self): """ Init counters used by progress logging """ self.total_steps = 0 for item in self.item_list: # Directories require 4 steps, files only one if item.is_directory: self.total_steps += 4 else: self.total_steps += 1 self.current_step = 0 def _progress_message(self, msg): """ Log a message containing the progress :param str msg: the message :return srt: message to log """ self.current_step += 1 return "Copy step %s of %s: %s" % (self.current_step, self.total_steps, msg) def _reuse_args(self, reuse_directory): """ If reuse_backup is 'copy' or 'link', build the rsync option to enable the reuse, otherwise returns an empty list :param str reuse_directory: the local path with data to be reused :rtype: list[str] """ if self.reuse_backup in ("copy", "link") and reuse_directory is not None: return ["--%s-dest=%s" % (self.reuse_backup, reuse_directory)] else: return [] def _retry_handler(self, item, command, args, kwargs, attempt, exc): """ :param _RsyncCopyItem item: The item that is being processed :param RsyncPgData command: Command object being executed :param list args: command args :param dict kwargs: command kwargs :param int attempt: attempt number (starting from 0) :param CommandFailedException exc: the exception which caused the failure """ _logger.warn("Failure executing rsync on %s (attempt %s)", item, attempt) _logger.warn("Retrying in %s seconds", self.retry_sleep) def _analyze_directory(self, item): """ Analyzes the status of source and destination directories identifying the files that are safe from the point of view of a PostgreSQL backup. The safe_horizon value is the timestamp of the beginning of the older backup involved in copy (as source or destination). Any files updated after that timestamp, must be checked as they could have been modified during the backup - and we do not reply WAL files to update them. The destination directory must exist. If the "safe_horizon" parameter is None, we cannot make any assumptions about what can be considered "safe", so we must check everything with checksums enabled. If "ref" parameter is provided and is not None, it is looked up instead of the "dst" dir. This is useful when we are copying files using '--link-dest' and '--copy-dest' rsync options. In this case, both the "dst" and "ref" dir must exist and the "dst" dir must be empty. If source or destination path begin with a ':' character, it is a remote path. Only local paths are supported in "ref" argument. :param _RsyncCopyItem item: information about a copy operation """ # If reference is not set we use dst as reference path ref = item.reuse if ref is None: ref = item.dst # Make sure the ref path ends with a '/' or rsync will add the # last path component to all the returned items during listing if ref[-1] != "/": ref += "/" # Build a hash containing all files present on reference directory. # Directories are not included try: ref_hash = {} ref_has_content = False for file_item in self._list_files(item, ref): if file_item.path != "." and not ( item.label == "pgdata" and file_item.path == "pg_tblspc" ): ref_has_content = True if file_item.mode[0] != "d": ref_hash[file_item.path] = file_item except (CommandFailedException, RsyncListFilesFailure) as e: # Here we set ref_hash to None, thus disable the code that marks as # "safe matching" those destination files with different time or # size, even if newer than "safe_horizon". As a result, all files # newer than "safe_horizon" will be checked through checksums. ref_hash = None _logger.error( "Unable to retrieve reference directory file list. " "Using only source file information to decide which files" " need to be copied with checksums enabled: %s" % e ) # The 'dir.list' file will contain every directory in the # source tree item.dir_file = os.path.join(self.temp_dir, "%s_dir.list" % item.label) dir_list = open(item.dir_file, "w+") # The 'protect.list' file will contain a filter rule to protect # each file present in the source tree. It will be used during # the first phase to delete all the extra files on destination. item.exclude_and_protect_file = os.path.join( self.temp_dir, "%s_exclude_and_protect.filter" % item.label ) exclude_and_protect_filter = open(item.exclude_and_protect_file, "w+") if not ref_has_content: # If the destination directory is empty then include all # directories and exclude all files. This stops the rsync # command which runs during the _create_dir_and_purge function # from copying the entire contents of the source directory and # ensures it only creates the directories. exclude_and_protect_filter.write("+ */\n") exclude_and_protect_filter.write("- *\n") # The `safe_list` will contain all items older than # safe_horizon, as well as files that we know rsync will # check anyway due to a difference in mtime or size item.safe_list = [] # The `check_list` will contain all items that need # to be copied with checksum option enabled item.check_list = [] for entry in self._list_files(item, item.src): # If item is a directory, we only need to save it in 'dir.list' if entry.mode[0] == "d": dir_list.write(entry.path + "\n") continue # Add every file in the source path to the list of files # to be protected from deletion ('exclude_and_protect.filter') # But only if we know the destination directory is non-empty if ref_has_content: exclude_and_protect_filter.write("P /" + entry.path + "\n") exclude_and_protect_filter.write("- /" + entry.path + "\n") # If source item is older than safe_horizon, # add it to 'safe.list' if self.safe_horizon and entry.date < self.safe_horizon: item.safe_list.append(entry) continue # If ref_hash is None, it means we failed to retrieve the # destination file list. We assume the only safe way is to # check every file that is older than safe_horizon if ref_hash is None: item.check_list.append(entry) continue # If source file differs by time or size from the matching # destination, rsync will discover the difference in any case. # It is then safe to skip checksum check here. dst_item = ref_hash.get(entry.path, None) if dst_item is None: item.safe_list.append(entry) continue different_size = dst_item.size != entry.size different_date = dst_item.date != entry.date if different_size or different_date: item.safe_list.append(entry) continue # All remaining files must be checked with checksums enabled item.check_list.append(entry) # Close all the control files dir_list.close() exclude_and_protect_filter.close() def _create_dir_and_purge(self, item): """ Create destination directories and delete any unknown file :param _RsyncCopyItem item: information about a copy operation """ # Build the rsync object required for the analysis rsync = self._rsync_factory(item) # Create directories and delete any unknown file self._rsync_ignore_vanished_files( rsync, "--recursive", "--delete", "--files-from=%s" % item.dir_file, "--filter", "merge %s" % item.exclude_and_protect_file, item.src, item.dst, check=True, ) def _copy(self, rsync, src, dst, file_list, checksum=False): """ The method execute the call to rsync, using as source a a list of files, and adding the the checksum option if required by the caller. :param Rsync rsync: the Rsync object used to retrieve the list of files inside the directories for copy purposes :param str src: source directory :param str dst: destination directory :param str file_list: path to the file containing the sources for rsync :param bool checksum: if checksum argument for rsync is required """ # Build the rsync call args args = ["--files-from=%s" % file_list] if checksum: # Add checksum option if needed args.append("--checksum") self._rsync_ignore_vanished_files(rsync, src, dst, *args, check=True) def _list_files(self, item, path): """ This method recursively retrieves a list of files contained in a directory, either local or remote (if starts with ':') :param _RsyncCopyItem item: information about a copy operation :param str path: the path we want to inspect :except CommandFailedException: if rsync call fails :except RsyncListFilesFailure: if rsync output can't be parsed """ _logger.debug("list_files: %r", path) # Build the rsync object required for the analysis rsync = self._rsync_factory(item) try: # Use the --no-human-readable option to avoid digit groupings # in "size" field with rsync >= 3.1.0. # Ref: http://ftp.samba.org/pub/rsync/src/rsync-3.1.0-NEWS rsync.get_output( "--no-human-readable", "--list-only", "-r", path, check=True ) except CommandFailedException: # This could fail due to the local or the remote rsync # older than 3.1. IF so, fallback to pre 3.1 mode if self.rsync_has_ignore_missing_args and rsync.ret in ( 12, # Error in rsync protocol data stream (remote) 1, ): # Syntax or usage error (local) self._rsync_set_pre_31_mode() # Recursive call, uses the compatibility mode for item in self._list_files(item, path): yield item return else: raise # Cache tzlocal object we need to build dates tzinfo = dateutil.tz.tzlocal() for line in rsync.out.splitlines(): line = line.rstrip() match = self.LIST_ONLY_RE.match(line) if match: mode = match.group("mode") # no exceptions here: the regexp forces 'size' to be an integer size = int(match.group("size")) try: date_str = match.group("date") # The date format has been validated by LIST_ONLY_RE. # Use "2014/06/05 18:00:00" format if the sending rsync # is compiled with HAVE_STRFTIME, otherwise use # "Thu Jun 5 18:00:00 2014" format if date_str[0].isdigit(): date = datetime.datetime.strptime(date_str, "%Y/%m/%d %H:%M:%S") else: date = datetime.datetime.strptime( date_str, "%a %b %d %H:%M:%S %Y" ) date = date.replace(tzinfo=tzinfo) except (TypeError, ValueError): # This should not happen, due to the regexp msg = ( "Unable to parse rsync --list-only output line " "(date): '%s'" % line ) _logger.exception(msg) raise RsyncListFilesFailure(msg) path = match.group("path") yield _FileItem(mode, size, date, path) else: # This is a hard error, as we are unable to parse the output # of rsync. It can only happen with a modified or unknown # rsync version (perhaps newer than 3.1?) msg = "Unable to parse rsync --list-only output line: '%s'" % line _logger.error(msg) raise RsyncListFilesFailure(msg) def _rsync_ignore_vanished_files(self, rsync, *args, **kwargs): """ Wrap an Rsync.get_output() call and ignore missing args TODO: when rsync 3.1 will be widespread, replace this with --ignore-missing-args argument :param Rsync rsync: the Rsync object used to execute the copy """ kwargs["allowed_retval"] = (0, 23, 24) rsync.get_output(*args, **kwargs) # If return code is 23 and there is any error which doesn't match # the VANISHED_RE regexp raise an error if rsync.ret == 23 and rsync.err is not None: for line in rsync.err.splitlines(): match = self.VANISHED_RE.match(line.rstrip()) if match: continue else: _logger.error("First rsync error line: %s", line) raise CommandFailedException( dict(ret=rsync.ret, out=rsync.out, err=rsync.err) ) return rsync.out, rsync.err def statistics(self): """ Return statistics about the copy object. :rtype: dict """ # This method can only run at the end of a non empty copy assert self.copy_end_time assert self.item_list assert self.jobs_done # Initialise the result calculating the total runtime stat = { "total_time": total_seconds(self.copy_end_time - self.copy_start_time), "number_of_workers": self.workers, "analysis_time_per_item": {}, "copy_time_per_item": {}, "serialized_copy_time_per_item": {}, } # Calculate the time spent during the analysis of the items analysis_start = None analysis_end = None for item in self.item_list: # Some items don't require analysis if not item.analysis_end_time: continue # Build a human readable name to refer to an item in the output ident = item.label if not analysis_start: analysis_start = item.analysis_start_time elif analysis_start > item.analysis_start_time: analysis_start = item.analysis_start_time if not analysis_end: analysis_end = item.analysis_end_time elif analysis_end < item.analysis_end_time: analysis_end = item.analysis_end_time stat["analysis_time_per_item"][ident] = total_seconds( item.analysis_end_time - item.analysis_start_time ) stat["analysis_time"] = total_seconds(analysis_end - analysis_start) # Calculate the time spent per job # WARNING: this code assumes that every item is copied separately, # so it's strictly tied to the `_job_generator` method code item_data = {} for job in self.jobs_done: # WARNING: the item contained in the job is not the same object # contained in self.item_list, as it has gone through two # pickling/unpickling cycle # Build a human readable name to refer to an item in the output ident = self.item_list[job.item_idx].label # If this is the first time we see this item we just store the # values from the job if ident not in item_data: item_data[ident] = { "start": job.copy_start_time, "end": job.copy_end_time, "total_time": job.copy_end_time - job.copy_start_time, } else: data = item_data[ident] if data["start"] > job.copy_start_time: data["start"] = job.copy_start_time if data["end"] < job.copy_end_time: data["end"] = job.copy_end_time data["total_time"] += job.copy_end_time - job.copy_start_time # Calculate the time spent copying copy_start = None copy_end = None serialized_time = datetime.timedelta(0) for ident in item_data: data = item_data[ident] if copy_start is None or copy_start > data["start"]: copy_start = data["start"] if copy_end is None or copy_end < data["end"]: copy_end = data["end"] stat["copy_time_per_item"][ident] = total_seconds( data["end"] - data["start"] ) stat["serialized_copy_time_per_item"][ident] = total_seconds( data["total_time"] ) serialized_time += data["total_time"] # Store the total time spent by copying stat["copy_time"] = total_seconds(copy_end - copy_start) stat["serialized_copy_time"] = total_seconds(serialized_time) return stat barman-2.18/barman/cloud.py0000644000621200062120000020325414172556763014026 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2018-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import collections import copy import datetime import errno import json import logging import multiprocessing import operator import os import shutil import signal import tarfile from abc import ABCMeta, abstractmethod, abstractproperty from functools import partial from io import BytesIO, RawIOBase from tempfile import NamedTemporaryFile from barman.annotations import KeepManagerMixinCloud from barman.backup_executor import ConcurrentBackupStrategy, ExclusiveBackupStrategy from barman.clients import cloud_compression from barman.exceptions import BarmanException from barman.fs import path_allowed from barman.infofile import BackupInfo from barman.postgres_plumbing import EXCLUDE_LIST, PGDATA_EXCLUDE_LIST from barman.utils import ( BarmanEncoder, force_str, human_readable_timedelta, pretty_size, total_seconds, with_metaclass, ) from barman import xlog try: # Python 3.x from queue import Empty as EmptyQueue except ImportError: # Python 2.x from Queue import Empty as EmptyQueue BUFSIZE = 16 * 1024 LOGGING_FORMAT = "%(asctime)s [%(process)s] %(levelname)s: %(message)s" # Allowed compression algorithms ALLOWED_COMPRESSIONS = {".gz": "gzip", ".bz2": "bzip2", ".snappy": "snappy"} def configure_logging(config): """ Get a nicer output from the Python logging package """ verbosity = config.verbose - config.quiet log_level = max(logging.WARNING - verbosity * 10, logging.DEBUG) logging.basicConfig(format=LOGGING_FORMAT, level=log_level) def copyfileobj_pad_truncate(src, dst, length=None): """ Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. This method is used by the TarFileIgnoringTruncate.addfile(). """ if length == 0: return if length is None: shutil.copyfileobj(src, dst, BUFSIZE) return blocks, remainder = divmod(length, BUFSIZE) for _ in range(blocks): buf = src.read(BUFSIZE) dst.write(buf) if len(buf) < BUFSIZE: # End of file reached # The file must have been truncated, so pad with zeroes dst.write(tarfile.NUL * (BUFSIZE - len(buf))) if remainder != 0: buf = src.read(remainder) dst.write(buf) if len(buf) < remainder: # End of file reached # The file must have been truncated, so pad with zeroes dst.write(tarfile.NUL * (remainder - len(buf))) class CloudProviderError(BarmanException): """ This exception is raised when we get an error in the response from the cloud provider """ class CloudUploadingError(BarmanException): """ This exception is raised when there are upload errors """ class TarFileIgnoringTruncate(tarfile.TarFile): """ Custom TarFile class that ignore truncated or vanished files. """ format = tarfile.PAX_FORMAT # Use PAX format to better preserve metadata def addfile(self, tarinfo, fileobj=None): """ Add the provided fileobj to the tar ignoring truncated or vanished files. This method completely replaces TarFile.addfile() """ self._check("awx") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: copyfileobj_pad_truncate(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE) if remainder > 0: self.fileobj.write(tarfile.NUL * (tarfile.BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * tarfile.BLOCKSIZE self.members.append(tarinfo) class CloudTarUploader(object): # This is the method we use to create new buffers # We use named temporary files, so we can pass them by name to # other processes _buffer = partial( NamedTemporaryFile, delete=False, prefix="barman-upload-", suffix=".part" ) def __init__(self, cloud_interface, key, compression=None, chunk_size=None): """ A tar archive that resides on cloud storage :param CloudInterface cloud_interface: cloud interface instance :param str key: path inside the bucket :param str compression: required compression :param int chunk_size: the upload chunk size """ self.cloud_interface = cloud_interface self.key = key self.upload_metadata = None if chunk_size is None: self.chunk_size = cloud_interface.MIN_CHUNK_SIZE else: self.chunk_size = max(chunk_size, cloud_interface.MIN_CHUNK_SIZE) self.buffer = None self.counter = 0 self.compressor = None # Some supported compressions (e.g. snappy) require CloudTarUploader to apply # compression manually rather than relying on the tar file. self.compressor = cloud_compression.get_compressor(compression) # If the compression is supported by tar then it will be added to the filemode # passed to tar_mode. tar_mode = cloud_compression.get_streaming_tar_mode("w", compression) # The value of 65536 for the chunk size is based on comments in the python-snappy # library which suggest it should be good for almost every scenario. # See: https://github.com/andrix/python-snappy/blob/0.6.0/snappy/snappy.py#L282 self.tar = TarFileIgnoringTruncate.open( fileobj=self, mode=tar_mode, bufsize=64 << 10 ) self.size = 0 self.stats = None def write(self, buf): if self.buffer and self.buffer.tell() > self.chunk_size: self.flush() if not self.buffer: self.buffer = self._buffer() if self.compressor: # If we have a custom compressor we must use it here compressed_buf = self.compressor.add_chunk(buf) self.buffer.write(compressed_buf) self.size += len(compressed_buf) else: # If there is no custom compressor then we are either not using # compression or tar has already compressed it - in either case we # just write the data to the buffer self.buffer.write(buf) self.size += len(buf) def flush(self): if not self.upload_metadata: self.upload_metadata = self.cloud_interface.create_multipart_upload( self.key ) self.buffer.flush() self.buffer.seek(0, os.SEEK_SET) self.counter += 1 self.cloud_interface.async_upload_part( upload_metadata=self.upload_metadata, key=self.key, body=self.buffer, part_number=self.counter, ) self.buffer.close() self.buffer = None def close(self): if self.tar: self.tar.close() self.flush() self.cloud_interface.async_complete_multipart_upload( upload_metadata=self.upload_metadata, key=self.key, parts_count=self.counter, ) self.stats = self.cloud_interface.wait_for_multipart_upload(self.key) class CloudUploadController(object): def __init__(self, cloud_interface, key_prefix, max_archive_size, compression): """ Create a new controller that upload the backup in cloud storage :param CloudInterface cloud_interface: cloud interface instance :param str|None key_prefix: path inside the bucket :param int max_archive_size: the maximum size of an archive :param str|None compression: required compression """ self.cloud_interface = cloud_interface if key_prefix and key_prefix[0] == "/": key_prefix = key_prefix[1:] self.key_prefix = key_prefix if max_archive_size < self.cloud_interface.MAX_ARCHIVE_SIZE: self.max_archive_size = max_archive_size else: logging.warning( "max-archive-size too big. Capping it to to %s", pretty_size(self.cloud_interface.MAX_ARCHIVE_SIZE), ) self.max_archive_size = self.cloud_interface.MAX_ARCHIVE_SIZE # We aim to a maximum of MAX_CHUNKS_PER_FILE / 2 chinks per file self.chunk_size = 2 * int( max_archive_size / self.cloud_interface.MAX_CHUNKS_PER_FILE ) self.compression = compression self.tar_list = {} self.upload_stats = {} """Already finished uploads list""" self.copy_start_time = datetime.datetime.now() """Copy start time""" self.copy_end_time = None """Copy end time""" def _build_dest_name(self, name, count=0): """ Get the destination tar name :param str name: the name prefix :param int count: the part count :rtype: str """ components = [name] if count > 0: components.append("_%04d" % count) components.append(".tar") if self.compression == "gz": components.append(".gz") elif self.compression == "bz2": components.append(".bz2") elif self.compression == "snappy": components.append(".snappy") return "".join(components) def _get_tar(self, name): """ Get a named tar file from cloud storage. Subsequent call with the same name return the same name :param str name: tar name :rtype: tarfile.TarFile """ if name not in self.tar_list or not self.tar_list[name]: self.tar_list[name] = [ CloudTarUploader( cloud_interface=self.cloud_interface, key=os.path.join(self.key_prefix, self._build_dest_name(name)), compression=self.compression, chunk_size=self.chunk_size, ) ] # If the current uploading file size is over DEFAULT_MAX_TAR_SIZE # Close the current file and open the next part uploader = self.tar_list[name][-1] if uploader.size > self.max_archive_size: uploader.close() uploader = CloudTarUploader( cloud_interface=self.cloud_interface, key=os.path.join( self.key_prefix, self._build_dest_name(name, len(self.tar_list[name])), ), compression=self.compression, chunk_size=self.chunk_size, ) self.tar_list[name].append(uploader) return uploader.tar def upload_directory(self, label, src, dst, exclude=None, include=None): logging.info( "Uploading '%s' directory '%s' as '%s'", label, src, self._build_dest_name(dst), ) for root, dirs, files in os.walk(src): tar_root = os.path.relpath(root, src) if not path_allowed(exclude, include, tar_root, True): continue try: self._get_tar(dst).add(root, arcname=tar_root, recursive=False) except EnvironmentError as e: if e.errno == errno.ENOENT: # If a directory disappeared just skip it, # WAL reply will take care during recovery. continue else: raise for item in files: tar_item = os.path.join(tar_root, item) if not path_allowed(exclude, include, tar_item, False): continue logging.debug("Uploading %s", tar_item) try: self._get_tar(dst).add(os.path.join(root, item), arcname=tar_item) except EnvironmentError as e: if e.errno == errno.ENOENT: # If a file disappeared just skip it, # WAL reply will take care during recovery. continue else: raise def add_file(self, label, src, dst, path, optional=False): if optional and not os.path.exists(src): return logging.info( "Uploading '%s' file from '%s' to '%s' with path '%s'", label, src, self._build_dest_name(dst), path, ) tar = self._get_tar(dst) tar.add(src, arcname=path) def add_fileobj(self, label, fileobj, dst, path, mode=None, uid=None, gid=None): logging.info( "Uploading '%s' file to '%s' with path '%s'", label, self._build_dest_name(dst), path, ) tar = self._get_tar(dst) tarinfo = tar.tarinfo(path) fileobj.seek(0, os.SEEK_END) tarinfo.size = fileobj.tell() if mode is not None: tarinfo.mode = mode if uid is not None: tarinfo.gid = uid if gid is not None: tarinfo.gid = gid fileobj.seek(0, os.SEEK_SET) tar.addfile(tarinfo, fileobj) def close(self): logging.info("Marking all the uploaded archives as 'completed'") for name in self.tar_list: if self.tar_list[name]: # Tho only opened file is the last one, all the others # have been already closed self.tar_list[name][-1].close() self.upload_stats[name] = [tar.stats for tar in self.tar_list[name]] self.tar_list[name] = None # Store the end time self.copy_end_time = datetime.datetime.now() def statistics(self): """ Return statistics about the CloudUploadController object. :rtype: dict """ logging.info("Calculating backup statistics") # This method can only run at the end of a non empty copy assert self.copy_end_time assert self.upload_stats # Initialise the result calculating the total runtime stat = { "total_time": total_seconds(self.copy_end_time - self.copy_start_time), "number_of_workers": self.cloud_interface.worker_processes_count, # Cloud uploads have no analysis "analysis_time": 0, "analysis_time_per_item": {}, "copy_time_per_item": {}, "serialized_copy_time_per_item": {}, } # Calculate the time spent uploading upload_start = None upload_end = None serialized_time = datetime.timedelta(0) for name in self.upload_stats: name_start = None name_end = None total_time = datetime.timedelta(0) for index, data in enumerate(self.upload_stats[name]): logging.debug( "Calculating statistics for file %s, index %s, data: %s", name, index, json.dumps(data, indent=2, sort_keys=True, cls=BarmanEncoder), ) if upload_start is None or upload_start > data["start_time"]: upload_start = data["start_time"] if upload_end is None or upload_end < data["end_time"]: upload_end = data["end_time"] if name_start is None or name_start > data["start_time"]: name_start = data["start_time"] if name_end is None or name_end < data["end_time"]: name_end = data["end_time"] parts = data["parts"] for num in parts: part = parts[num] total_time += part["end_time"] - part["start_time"] stat["serialized_copy_time_per_item"][name] = total_seconds(total_time) serialized_time += total_time # Cloud uploads have no analysis stat["analysis_time_per_item"][name] = 0 stat["copy_time_per_item"][name] = total_seconds(name_end - name_start) # Store the total time spent by copying stat["copy_time"] = total_seconds(upload_end - upload_start) stat["serialized_copy_time"] = total_seconds(serialized_time) return stat class FileUploadStatistics(dict): def __init__(self, *args, **kwargs): super(FileUploadStatistics, self).__init__(*args, **kwargs) start_time = datetime.datetime.now() self.setdefault("status", "uploading") self.setdefault("start_time", start_time) self.setdefault("parts", {}) def set_part_end_time(self, part_number, end_time): part = self["parts"].setdefault(part_number, {"part_number": part_number}) part["end_time"] = end_time def set_part_start_time(self, part_number, start_time): part = self["parts"].setdefault(part_number, {"part_number": part_number}) part["start_time"] = start_time class DecompressingStreamingIO(RawIOBase): """ Provide an IOBase interface which decompresses streaming cloud responses. This is intended to wrap azure_blob_storage.StreamingBlobIO and aws_s3.StreamingBodyIO objects, transparently decompressing chunks while continuing to expose them via the read method of the IOBase interface. This allows TarFile to stream the uncompressed data directly from the cloud provider responses without requiring it to know anything about the compression. """ # The value of 65536 for the chunk size is based on comments in the python-snappy # library which suggest it should be good for almost every scenario. # See: https://github.com/andrix/python-snappy/blob/0.6.0/snappy/snappy.py#L300 COMPRESSED_CHUNK_SIZE = 65536 def __init__(self, streaming_response, decompressor): """ Create a new DecompressingStreamingIO object. A DecompressingStreamingIO object will be created which reads compressed bytes from streaming_response and decompresses them with the supplied decompressor. :param RawIOBase streaming_response: A file-like object which provides the data in the response streamed from the cloud provider. :param barman.clients.cloud_compression.ChunkedCompressor: A ChunkedCompressor object which provides a decompress(bytes) method to return the decompressed bytes. """ self.streaming_response = streaming_response self.decompressor = decompressor self.buffer = bytes() def _read_from_uncompressed_buffer(self, n): """ Read up to n bytes from the local buffer of uncompressed data. Removes up to n bytes from the local buffer and returns them. If n is greater than the length of the buffer then the entire buffer content is returned and the buffer is emptied. :param int n: The number of bytes to read :return: The bytes read from the local buffer :rtype: bytes """ if n <= len(self.buffer): return_bytes = self.buffer[:n] self.buffer = self.buffer[n:] return return_bytes else: return_bytes = self.buffer self.buffer = bytes() return return_bytes def read(self, n=-1): """ Read up to n bytes of uncompressed data from the wrapped IOBase. Bytes are initially read from the local buffer of uncompressed data. If more bytes are required then chunks of COMPRESSED_CHUNK_SIZE are read from the wrapped IOBase and decompressed in memory until >= n uncompressed bytes have been read. n bytes are then returned with any remaining bytes being stored in the local buffer for future requests. :param int n: The number of uncompressed bytes required :return: Up to n uncompressed bytes from the wrapped IOBase :rtype: bytes """ uncompressed_bytes = self._read_from_uncompressed_buffer(n) if len(uncompressed_bytes) == n: return uncompressed_bytes while len(uncompressed_bytes) < n: compressed_bytes = self.streaming_response.read(self.COMPRESSED_CHUNK_SIZE) uncompressed_bytes += self.decompressor.decompress(compressed_bytes) if len(compressed_bytes) < self.COMPRESSED_CHUNK_SIZE: # If we got fewer bytes than we asked for then we're done break return_bytes = uncompressed_bytes[:n] self.buffer = uncompressed_bytes[n:] return return_bytes class CloudInterface(with_metaclass(ABCMeta)): """ Abstract base class which provides the interface between barman and cloud storage providers. Support for individual cloud providers should be implemented by inheriting from this class and providing implementations for the abstract methods. This class provides generic boilerplate for the asynchronous and parallel upload of objects to cloud providers which support multipart uploads. These uploads are carried out by worker processes which are spawned by _ensure_async and consume upload jobs from a queue. The public async_upload_part and async_complete_multipart_upload methods add jobs to this queue. When the worker processes consume the jobs they execute the synchronous counterparts to the async_* methods (_upload_part and _complete_multipart_upload) which must be implemented in CloudInterface sub-classes. Additional boilerplate for creating buckets and streaming objects as tar files is also provided. """ @abstractproperty def MAX_CHUNKS_PER_FILE(self): """ Maximum number of chunks allowed in a single file in cloud storage. The exact definition of chunk depends on the cloud provider, for example in AWS S3 a chunk would be one part in a multipart upload. In Azure a chunk would be a single block of a block blob. :type: int """ pass @abstractproperty def MIN_CHUNK_SIZE(self): """ Minimum size in bytes of a single chunk. :type: int """ pass @abstractproperty def MAX_ARCHIVE_SIZE(self): """ Maximum size in bytes of a single file in cloud storage. :type: int """ pass def __init__(self, url, jobs=2, tags=None): """ Base constructor :param str url: url for the cloud storage resource :param int jobs: How many sub-processes to use for asynchronous uploading, defaults to 2. :param List[tuple] tags: List of tags as k,v tuples to be added to all uploaded objects """ self.url = url self.tags = tags # The worker process and the shared queue are created only when # needed self.queue = None self.result_queue = None self.errors_queue = None self.done_queue = None self.error = None self.abort_requested = False self.worker_processes_count = jobs self.worker_processes = [] # The parts DB is a dictionary mapping each bucket key name to a list # of uploaded parts. # This structure is updated by the _refresh_parts_db method call self.parts_db = collections.defaultdict(list) # Statistics about uploads self.upload_stats = collections.defaultdict(FileUploadStatistics) def close(self): """ Wait for all the asynchronous operations to be done """ if self.queue: for _ in self.worker_processes: self.queue.put(None) for process in self.worker_processes: process.join() def _abort(self): """ Abort all the operations """ if self.queue: for process in self.worker_processes: os.kill(process.pid, signal.SIGINT) self.close() def _ensure_async(self): """ Ensure that the asynchronous execution infrastructure is up and the worker process is running """ if self.queue: return self.queue = multiprocessing.JoinableQueue(maxsize=self.worker_processes_count) self.result_queue = multiprocessing.Queue() self.errors_queue = multiprocessing.Queue() self.done_queue = multiprocessing.Queue() # Delay assigning the worker_processes list to the object until we have # finished spawning the workers so they do not get pickled by multiprocessing # (pickling the worker process references will fail in Python >= 3.8) worker_processes = [] for process_number in range(self.worker_processes_count): process = multiprocessing.Process( target=self._worker_process_main, args=(process_number,) ) process.start() worker_processes.append(process) self.worker_processes = worker_processes def _retrieve_results(self): """ Receive the results from workers and update the local parts DB, making sure that each part list is sorted by part number """ # Wait for all the current jobs to be completed self.queue.join() touched_keys = [] while not self.result_queue.empty(): result = self.result_queue.get() touched_keys.append(result["key"]) self.parts_db[result["key"]].append(result["part"]) # Save the upload end time of the part stats = self.upload_stats[result["key"]] stats.set_part_end_time(result["part_number"], result["end_time"]) for key in touched_keys: self.parts_db[key] = sorted( self.parts_db[key], key=operator.itemgetter("PartNumber") ) # Read the results of completed uploads while not self.done_queue.empty(): result = self.done_queue.get() self.upload_stats[result["key"]].update(result) # Raise an error if a job failed self._handle_async_errors() def _handle_async_errors(self): """ If an upload error has been discovered, stop the upload process, stop all the workers and raise an exception :return: """ # If an error has already been reported, do nothing if self.error: return try: self.error = self.errors_queue.get_nowait() except EmptyQueue: return logging.error("Error received from upload worker: %s", self.error) self._abort() raise CloudUploadingError(self.error) def _worker_process_main(self, process_number): """ Repeatedly grab a task from the queue and execute it, until a task containing "None" is grabbed, indicating that the process must stop. :param int process_number: the process number, used in the logging output """ logging.info("Upload process started (worker %s)", process_number) # We create a new session instead of reusing the one # from the parent process to avoid any race condition self._reinit_session() while True: task = self.queue.get() if not task: self.queue.task_done() break try: self._worker_process_execute_job(task, process_number) except Exception as exc: logging.error( "Upload error: %s (worker %s)", force_str(exc), process_number ) logging.debug("Exception details:", exc_info=exc) self.errors_queue.put(force_str(exc)) except KeyboardInterrupt: if not self.abort_requested: logging.info( "Got abort request: upload cancelled (worker %s)", process_number, ) self.abort_requested = True finally: self.queue.task_done() logging.info("Upload process stopped (worker %s)", process_number) def _worker_process_execute_job(self, task, process_number): """ Exec a single task :param Dict task: task to execute :param int process_number: the process number, used in the logging output :return: """ if task["job_type"] == "upload_part": if self.abort_requested: logging.info( "Skipping '%s', part '%s' (worker %s)" % (task["key"], task["part_number"], process_number) ) os.unlink(task["body"]) return else: logging.info( "Uploading '%s', part '%s' (worker %s)" % (task["key"], task["part_number"], process_number) ) with open(task["body"], "rb") as fp: part = self._upload_part( task["upload_metadata"], task["key"], fp, task["part_number"] ) os.unlink(task["body"]) self.result_queue.put( { "key": task["key"], "part_number": task["part_number"], "end_time": datetime.datetime.now(), "part": part, } ) elif task["job_type"] == "complete_multipart_upload": if self.abort_requested: logging.info("Aborting %s (worker %s)" % (task["key"], process_number)) self._abort_multipart_upload(task["upload_metadata"], task["key"]) self.done_queue.put( { "key": task["key"], "end_time": datetime.datetime.now(), "status": "aborted", } ) else: logging.info( "Completing '%s' (worker %s)" % (task["key"], process_number) ) self._complete_multipart_upload( task["upload_metadata"], task["key"], task["parts_metadata"] ) self.done_queue.put( { "key": task["key"], "end_time": datetime.datetime.now(), "status": "done", } ) else: raise ValueError("Unknown task: %s", repr(task)) def async_upload_part(self, upload_metadata, key, body, part_number): """ Asynchronously upload a part into a multipart upload :param dict upload_metadata: Provider-specific metadata for this upload e.g. the multipart upload handle in AWS S3 :param str key: The key to use in the cloud service :param any body: A stream-like object to upload :param int part_number: Part number, starting from 1 """ # If an error has already been reported, do nothing if self.error: return self._ensure_async() self._handle_async_errors() # Save the upload start time of the part stats = self.upload_stats[key] stats.set_part_start_time(part_number, datetime.datetime.now()) # If the body is a named temporary file use it directly # WARNING: this imply that the file will be deleted after the upload if hasattr(body, "name") and hasattr(body, "delete") and not body.delete: fp = body else: # Write a temporary file with the part contents with NamedTemporaryFile(delete=False) as fp: shutil.copyfileobj(body, fp, BUFSIZE) # Pass the job to the uploader process self.queue.put( { "job_type": "upload_part", "upload_metadata": upload_metadata, "key": key, "body": fp.name, "part_number": part_number, } ) def async_complete_multipart_upload(self, upload_metadata, key, parts_count): """ Asynchronously finish a certain multipart upload. This method grant that the final call to the cloud storage will happen after all the already scheduled parts have been uploaded. :param dict upload_metadata: Provider-specific metadata for this upload e.g. the multipart upload handle in AWS S3 :param str key: The key to use in the cloud service :param int parts_count: Number of parts """ # If an error has already been reported, do nothing if self.error: return self._ensure_async() self._handle_async_errors() # If parts_db has less then expected parts for this upload, # wait for the workers to send the missing metadata while len(self.parts_db[key]) < parts_count: # Wait for all the current jobs to be completed and # receive all available updates on worker status self._retrieve_results() # Finish the job in the uploader process self.queue.put( { "job_type": "complete_multipart_upload", "upload_metadata": upload_metadata, "key": key, "parts_metadata": self.parts_db[key], } ) del self.parts_db[key] def wait_for_multipart_upload(self, key): """ Wait for a multipart upload to be completed and return the result :param str key: The key to use in the cloud service """ # The upload must exist assert key in self.upload_stats # async_complete_multipart_upload must have been called assert key not in self.parts_db # If status is still uploading the upload has not finished yet while self.upload_stats[key]["status"] == "uploading": # Wait for all the current jobs to be completed and # receive all available updates on worker status self._retrieve_results() return self.upload_stats[key] def setup_bucket(self): """ Search for the target bucket. Create it if not exists """ if self.bucket_exists is None: self.bucket_exists = self._check_bucket_existence() # Create the bucket if it doesn't exist if not self.bucket_exists: self._create_bucket() self.bucket_exists = True def extract_tar(self, key, dst): """ Extract a tar archive from cloud to the local directory :param str key: The key identifying the tar archive :param str dst: Path of the directory into which the tar archive should be extracted """ extension = os.path.splitext(key)[-1] compression = "" if extension == ".tar" else extension[1:] tar_mode = cloud_compression.get_streaming_tar_mode("r", compression) fileobj = self.remote_open(key, cloud_compression.get_compressor(compression)) with tarfile.open(fileobj=fileobj, mode=tar_mode) as tf: tf.extractall(path=dst) @abstractmethod def _reinit_session(self): """ Reinitialises any resources used to maintain a session with a cloud provider. This is called by child processes in order to avoid any potential race conditions around re-using the same session as the parent process. """ @abstractmethod def test_connectivity(self): """ Test that the cloud provider is reachable :return: True if the cloud provider is reachable, False otherwise :rtype: bool """ @abstractmethod def _check_bucket_existence(self): """ Check cloud storage for the target bucket :return: True if the bucket exists, False otherwise :rtype: bool """ @abstractmethod def _create_bucket(self): """ Create the bucket in cloud storage """ @abstractmethod def list_bucket(self, prefix="", delimiter="/"): """ List bucket content in a directory manner :param str prefix: :param str delimiter: :return: List of objects and dirs right under the prefix :rtype: List[str] """ @abstractmethod def download_file(self, key, dest_path, decompress): """ Download a file from cloud storage :param str key: The key identifying the file to download :param str dest_path: Where to put the destination file :param bool decompress: Whenever to decompress this file or not """ @abstractmethod def remote_open(self, key, decompressor=None): """ Open a remote object in cloud storage and returns a readable stream :param str key: The key identifying the object to open :param barman.clients.cloud_compression.ChunkedCompressor decompressor: A ChunkedCompressor object which will be used to decompress chunks of bytes as they are read from the stream :return: A file-like object from which the stream can be read or None if the key does not exist """ @abstractmethod def upload_fileobj(self, fileobj, key, override_tags=None): """ Synchronously upload the content of a file-like object to a cloud key :param fileobj IOBase: File-like object to upload :param str key: The key to identify the uploaded object :param List[tuple] override_tags: List of k,v tuples which should override any tags already defined in the cloud interface """ @abstractmethod def create_multipart_upload(self, key): """ Create a new multipart upload and return any metadata returned by the cloud provider. This metadata is treated as an opaque blob by CloudInterface and will be passed into the _upload_part, _complete_multipart_upload and _abort_multipart_upload methods. The implementations of these methods will need to handle this metadata in the way expected by the cloud provider. Some cloud services do not require multipart uploads to be explicitly created. In such cases the implementation can be a no-op which just returns None. :param key: The key to use in the cloud service :return: The multipart upload metadata :rtype: dict[str, str]|None """ @abstractmethod def _upload_part(self, upload_metadata, key, body, part_number): """ Upload a part into this multipart upload and return a dict of part metadata. The part metadata must contain the key "PartNumber" and can optionally contain any other metadata available (for example the ETag returned by S3). The part metadata will included in a list of metadata for all parts of the upload which is passed to the _complete_multipart_upload method. :param dict upload_metadata: Provider-specific metadata for this upload e.g. the multipart upload handle in AWS S3 :param str key: The key to use in the cloud service :param object body: A stream-like object to upload :param int part_number: Part number, starting from 1 :return: The part metadata :rtype: dict[str, None|str] """ @abstractmethod def _complete_multipart_upload(self, upload_metadata, key, parts_metadata): """ Finish a certain multipart upload :param dict upload_metadata: Provider-specific metadata for this upload e.g. the multipart upload handle in AWS S3 :param str key: The key to use in the cloud service :param List[dict] parts_metadata: The list of metadata for the parts composing the multipart upload. Each part is guaranteed to provide a PartNumber and may optionally contain additional metadata returned by the cloud provider such as ETags. """ @abstractmethod def _abort_multipart_upload(self, upload_metadata, key): """ Abort a certain multipart upload The implementation of this method should clean up any dangling resources left by the incomplete upload. :param dict upload_metadata: Provider-specific metadata for this upload e.g. the multipart upload handle in AWS S3 :param str key: The key to use in the cloud service """ @abstractmethod def delete_objects(self, paths): """ Delete the objects at the specified paths :param List[str] paths: """ class CloudBackupUploader(with_metaclass(ABCMeta)): """ Abstract base class which provides a client for uploading backups. This should be inherited from by specialised classes which have knowledge of the specific backup scenario. Initially two scenarios are implemented: * Upload of a backup on a live PostgreSQL server. * Upload of an existing backup on a Barman server. Code for uploading tablespaces and pgdata files is provided in _backup_copy. This will work for data located on a live PostgreSQL server or an existing backup on a Barman server. """ def __init__( self, server_name, cloud_interface, max_archive_size, compression=None, ): """ Base constructor. :param str server_name: The name of the server as configured in Barman :param CloudInterface cloud_interface: The interface to use to upload the backup :param int max_archive_size: the maximum size of an uploading archive :param str compression: Compression algorithm to use """ self.compression = compression self.server_name = server_name self.cloud_interface = cloud_interface self.max_archive_size = max_archive_size # Stats self.copy_start_time = None self.copy_end_time = None def _create_upload_controller(self, backup_id): """ Create an upload controller from the specified backup_id :param str backup_id: The backup identifier :return: The upload controller :rtype: CloudUploadController """ key_prefix = os.path.join( self.cloud_interface.path, self.server_name, "base", backup_id, ) return CloudUploadController( self.cloud_interface, key_prefix, self.max_archive_size, self.compression, ) @abstractmethod def _get_tablespace_location(self, tablespace): """ Return the on-disk location of the supplied tablespace This will vary depending on whether barman-cloud is running on a live PostgreSQL server or a Barman server """ def _backup_copy(self, controller, backup_info, pgdata_dir, server_major_version): """ Perform the actual copy of the backup uploading it to cloud storage. First, it copies one tablespace at a time, then the PGDATA directory, then pg_control. Bandwidth limitation, according to configuration, is applied in the process. :param barman.cloud.CloudUploadController controller: upload controller :param barman.infofile.BackupInfo backup_info: backup information :param str pgdata_dir: Path to pgdata directory :param str server_major_version: Major version of the postgres server being backed up """ # Store the start time self.copy_start_time = datetime.datetime.now() # List of paths to be excluded by the PGDATA copy exclude = [] # Process every tablespace if backup_info.tablespaces: for tablespace in backup_info.tablespaces: # If the tablespace location is inside the data directory, # exclude and protect it from being copied twice during # the data directory copy if tablespace.location.startswith(backup_info.pgdata + "/"): exclude += [tablespace.location[len(backup_info.pgdata) :]] # Exclude and protect the tablespace from being copied again # during the data directory copy exclude += ["/pg_tblspc/%s" % tablespace.oid] # Copy the tablespace directory. # NOTE: Barman should archive only the content of directory # "PG_" + PG_MAJORVERSION + "_" + CATALOG_VERSION_NO # but CATALOG_VERSION_NO is not easy to retrieve, so we copy # "PG_" + PG_MAJORVERSION + "_*" # It could select some spurious directory if a development or # a beta version have been used, but it's good enough for a # production system as it filters out other major versions. controller.upload_directory( label=tablespace.name, src=self._get_tablespace_location(tablespace), dst="%s" % tablespace.oid, exclude=["/*"] + EXCLUDE_LIST, include=["/PG_%s_*" % server_major_version], ) # Copy PGDATA directory (or if that is itself a symlink, just follow it # and copy whatever it points to; we won't store the symlink in the tar # file) if os.path.islink(pgdata_dir): pgdata_dir = os.path.realpath(pgdata_dir) controller.upload_directory( label="pgdata", src=pgdata_dir, dst="data", exclude=PGDATA_EXCLUDE_LIST + EXCLUDE_LIST + exclude, ) # At last copy pg_control controller.add_file( label="pg_control", src="%s/global/pg_control" % pgdata_dir, dst="data", path="global/pg_control", ) @abstractmethod def backup(self): """ Coordinate the upload of a Backup to cloud storage Any necessary coordination, such as calling pg_start_backup in PostgreSQL, should happen here. """ @abstractmethod def handle_backup_errors(self, action, exc): """ Perform appropriate cleanup actions and exit :param str action: the upload phase that has failed :param BaseException exc: the exception that caused the failure """ class CloudBackupUploaderBarman(CloudBackupUploader): """ A cloud storage upload client for a pre-existing backup on the Barman server. """ def __init__( self, server_name, cloud_interface, max_archive_size, backup_dir, backup_id, compression=None, ): """ Create the cloud storage upload client for a backup in the specified location with the specified backup_id. :param str server_name: The name of the server as configured in Barman :param CloudInterface cloud_interface: The interface to use to upload the backup :param int max_archive_size: the maximum size of an uploading archive :param str backup_dir: Path to the directory containing the backup to be uploaded :param str backup_id: The id of the backup to upload :param str compression: Compression algorithm to use """ super(CloudBackupUploaderBarman, self).__init__( server_name, cloud_interface, max_archive_size, compression=compression, ) self.backup_dir = backup_dir self.backup_id = backup_id def _get_tablespace_location(self, tablespace): """ Determines the tablespace location by combining the backup directory with the oid of the tablespace. """ return os.path.join(self.backup_dir, str(tablespace.oid)) def backup(self): """ Upload a Backup to cloud storage """ # Read the backup_info file from disk as the backup has already been created backup_info = BackupInfo(self.backup_id) backup_info.load(filename=os.path.join(self.backup_dir, "backup.info")) controller = self._create_upload_controller(self.backup_id) try: self._backup_copy( controller, backup_info, os.path.join(self.backup_dir, "data"), backup_info.pg_major_version(), ) # Closing the controller will finalize all the running uploads controller.close() # Store the end time self.copy_end_time = datetime.datetime.now() # Manually add backup.info with open( os.path.join(self.backup_dir, "backup.info"), "rb" ) as backup_info_file: self.cloud_interface.upload_fileobj( backup_info_file, key=os.path.join(controller.key_prefix, "backup.info"), ) # Use BaseException instead of Exception to catch events like # KeyboardInterrupt (e.g.: CTRL-C) except BaseException as exc: # Mark the backup as failed and exit self.handle_backup_errors("uploading data", exc) raise SystemExit(1) logging.info( "Upload of backup completed (start time: %s, elapsed time: %s)", self.copy_start_time, human_readable_timedelta(datetime.datetime.now() - self.copy_start_time), ) def handle_backup_errors(self, action, exc): """ Log that the backup upload has failed and exit :param str action: the upload phase that has failed :param BaseException exc: the exception that caused the failure """ msg_lines = force_str(exc).strip().splitlines() # If the exception has no attached message use the raw # type name if len(msg_lines) == 0: msg_lines = [type(exc).__name__] logging.error("Backup upload failed %s (%s)", action, msg_lines[0]) logging.debug("Exception details:", exc_info=exc) class CloudBackupUploaderPostgres(CloudBackupUploader): """ A cloud storage upload client for a live PostgreSQL server. """ def __init__( self, server_name, cloud_interface, max_archive_size, postgres, compression=None, ): super(CloudBackupUploaderPostgres, self).__init__( server_name, cloud_interface, max_archive_size, compression=compression, ) self.postgres = postgres def _get_tablespace_location(self, tablespace): """ Just returns the location of the tablespace as we are running on the PostgreSQL server. """ return tablespace.location def _backup_copy(self, controller, backup_info, pgdata_dir, server_major_version): """ Perform the actual copy of the backup uploading it to cloud storage. The core implementation of _backup_copy is extended here so that we include external config files on the PostgreSQL server. """ super(CloudBackupUploaderPostgres, self)._backup_copy( controller, backup_info, pgdata_dir, server_major_version ) # Copy configuration files (if not inside PGDATA) external_config_files = backup_info.get_external_config_files() included_config_files = [] for config_file in external_config_files: # Add included files to a list, they will be handled later if config_file.file_type == "include": included_config_files.append(config_file) continue # If the ident file is missing, it isn't an error condition # for PostgreSQL. # Barman is consistent with this behavior. optional = False if config_file.file_type == "ident_file": optional = True # Create the actual copy jobs in the controller controller.add_file( label=config_file.file_type, src=config_file.path, dst="data", path=os.path.basename(config_file.path), optional=optional, ) # Check for any include directives in PostgreSQL configuration # Currently, include directives are not supported for files that # reside outside PGDATA. These files must be manually backed up. # Barman will emit a warning and list those files if any(included_config_files): msg = ( "The usage of include directives is not supported " "for files that reside outside PGDATA.\n" "Please manually backup the following files:\n" "\t%s\n" % "\n\t".join(icf.path for icf in included_config_files) ) logging.warning(msg) def backup(self): """ Upload a Backup to cloud storage directly from a live PostgreSQL server. """ server_name = "cloud" backup_info = BackupInfo( backup_id=datetime.datetime.now().strftime("%Y%m%dT%H%M%S"), server_name=server_name, ) backup_info.set_attribute("systemid", self.postgres.get_systemid()) controller = self._create_upload_controller(backup_info.backup_id) if self.postgres.server_version >= 90600 or self.postgres.has_pgespresso: strategy = ConcurrentBackupStrategy(self.postgres, server_name) else: strategy = ExclusiveBackupStrategy(self.postgres, server_name) logging.info("Starting backup '%s'", backup_info.backup_id) strategy.start_backup(backup_info) try: self._backup_copy( controller, backup_info, backup_info.pgdata, self.postgres.server_major_version, ) logging.info("Stopping backup '%s'", backup_info.backup_id) strategy.stop_backup(backup_info) # Create a restore point after a backup target_name = "barman_%s" % backup_info.backup_id self.postgres.create_restore_point(target_name) # Free the Postgres connection self.postgres.close() # Eventually, add the backup_label from the backup_info if backup_info.backup_label: pgdata_stat = os.stat(backup_info.pgdata) controller.add_fileobj( label="backup_label", fileobj=BytesIO(backup_info.backup_label.encode("UTF-8")), dst="data", path="backup_label", uid=pgdata_stat.st_uid, gid=pgdata_stat.st_gid, ) # Closing the controller will finalize all the running uploads controller.close() # Store the end time self.copy_end_time = datetime.datetime.now() # Store statistics about the copy backup_info.set_attribute("copy_stats", controller.statistics()) # Set the backup status as DONE backup_info.set_attribute("status", BackupInfo.DONE) # Use BaseException instead of Exception to catch events like # KeyboardInterrupt (e.g.: CTRL-C) except BaseException as exc: # Mark the backup as failed and exit self.handle_backup_errors("uploading data", exc, backup_info) raise SystemExit(1) finally: try: with BytesIO() as backup_info_file: backup_info.save(file_object=backup_info_file) backup_info_file.seek(0, os.SEEK_SET) key = os.path.join(controller.key_prefix, "backup.info") logging.info("Uploading '%s'", key) self.cloud_interface.upload_fileobj(backup_info_file, key) except BaseException as exc: # Mark the backup as failed and exit self.handle_backup_errors( "uploading backup.info file", exc, backup_info ) raise SystemExit(1) logging.info( "Backup end at LSN: %s (%s, %08X)", backup_info.end_xlog, backup_info.end_wal, backup_info.end_offset, ) logging.info( "Backup completed (start time: %s, elapsed time: %s)", self.copy_start_time, human_readable_timedelta(datetime.datetime.now() - self.copy_start_time), ) def handle_backup_errors(self, action, exc, backup_info): """ Mark the backup as failed and exit :param str action: the upload phase that has failed :param barman.infofile.BackupInfo backup_info: the backup info file :param BaseException exc: the exception that caused the failure """ msg_lines = force_str(exc).strip().splitlines() # If the exception has no attached message use the raw # type name if len(msg_lines) == 0: msg_lines = [type(exc).__name__] if backup_info: # Use only the first line of exception message # in backup_info error field backup_info.set_attribute("status", BackupInfo.FAILED) backup_info.set_attribute( "error", "failure %s (%s)" % (action, msg_lines[0]) ) logging.error("Backup failed %s (%s)", action, msg_lines[0]) logging.debug("Exception details:", exc_info=exc) class BackupFileInfo(object): def __init__(self, oid=None, base=None, path=None, compression=None): self.oid = oid self.base = base self.path = path self.compression = compression self.additional_files = [] class CloudBackupCatalog(KeepManagerMixinCloud): """ Cloud storage backup catalog """ def __init__(self, cloud_interface, server_name): """ Object responsible for retrievin backup catalog from cloud storage :param CloudInterface cloud_interface: The interface to use to upload the backup :param str server_name: The name of the server as configured in Barman """ super(CloudBackupCatalog, self).__init__( cloud_interface=cloud_interface, server_name=server_name ) self.cloud_interface = cloud_interface self.server_name = server_name self.prefix = os.path.join(self.cloud_interface.path, self.server_name, "base") self.wal_prefix = os.path.join( self.cloud_interface.path, self.server_name, "wals" ) self._backup_list = None self._wal_paths = None self.unreadable_backups = [] def get_backup_list(self): """ Retrieve the list of available backup from cloud storage :rtype: Dict[str,BackupInfo] """ if self._backup_list is None: backup_list = {} # get backups metadata for backup_dir in self.cloud_interface.list_bucket(self.prefix + "/"): # We want only the directories if backup_dir[-1] != "/": continue backup_id = os.path.basename(backup_dir.rstrip("/")) try: backup_info = self.get_backup_info(backup_id) except Exception as exc: logging.warning( "Unable to open backup.info file for %s: %s" % (backup_id, exc) ) self.unreadable_backups.append(backup_id) continue if backup_info: backup_list[backup_id] = backup_info self._backup_list = backup_list return self._backup_list def remove_backup_from_cache(self, backup_id): """ Remove backup with backup_id from the cached list. This is intended for cases where we want to update the state without firing lots of requests at the bucket. """ if self._backup_list: self._backup_list.pop(backup_id) def get_wal_paths(self): """ Retrieve a dict of WAL paths keyed by the WAL name from cloud storage """ if self._wal_paths is None: wal_paths = {} for wal in self.cloud_interface.list_bucket( self.wal_prefix + "/", delimiter="" ): wal_basename = os.path.basename(wal) if xlog.is_any_xlog_file(wal_basename): # We have an uncompressed xlog of some kind wal_paths[wal_basename] = wal else: # Allow one suffix for compression and try again wal_name, suffix = os.path.splitext(wal_basename) if suffix in ALLOWED_COMPRESSIONS and xlog.is_any_xlog_file( wal_name ): wal_paths[wal_name] = wal else: # If it still doesn't look like an xlog file, ignore continue self._wal_paths = wal_paths return self._wal_paths def remove_wal_from_cache(self, wal_name): """ Remove named wal from the cached list. This is intended for cases where we want to update the state without firing lots of requests at the bucket. """ if self._wal_paths: self._wal_paths.pop(wal_name) def get_backup_info(self, backup_id): """ Load a BackupInfo from cloud storage :param str backup_id: The backup id to load :rtype: BackupInfo """ backup_info_path = os.path.join(self.prefix, backup_id, "backup.info") backup_info_file = self.cloud_interface.remote_open(backup_info_path) if backup_info_file is None: return None backup_info = BackupInfo(backup_id) backup_info.load(file_object=backup_info_file) return backup_info def get_backup_files(self, backup_info, allow_missing=False): """ Get the list of expected files part of a backup :param BackupInfo backup_info: the backup information :param bool allow_missing: True if missing backup files are allowed, False otherwise. A value of False will cause a SystemExit to be raised if any files expected due to the `backup_info` content cannot be found. :rtype: dict[int, BackupFileInfo] """ # Correctly format the source path source_dir = os.path.join(self.prefix, backup_info.backup_id) base_path = os.path.join(source_dir, "data") backup_files = {None: BackupFileInfo(None, base_path)} if backup_info.tablespaces: for tblspc in backup_info.tablespaces: base_path = os.path.join(source_dir, "%s" % tblspc.oid) backup_files[tblspc.oid] = BackupFileInfo(tblspc.oid, base_path) for item in self.cloud_interface.list_bucket(source_dir + "/"): for backup_file in backup_files.values(): if item.startswith(backup_file.base): # Automatically detect additional files suffix = item[len(backup_file.base) :] # Avoid to match items that are prefix of other items if not suffix or suffix[0] not in (".", "_"): logging.debug( "Skipping spurious prefix match: %s|%s", backup_file.base, suffix, ) continue # If this file have a suffix starting with `_`, # it is an additional file and we add it to the main # BackupFileInfo ... if suffix[0] == "_": info = BackupFileInfo(backup_file.oid, base_path) backup_file.additional_files.append(info) ext = suffix.split(".", 1)[-1] # ... otherwise this is the main file else: info = backup_file ext = suffix[1:] # Infer the compression from the file extension if ext == "tar": info.compression = None elif ext == "tar.gz": info.compression = "gzip" elif ext == "tar.bz2": info.compression = "bzip2" elif ext == "tar.snappy": info.compression = "snappy" else: logging.warning("Skipping unknown extension: %s", ext) continue info.path = item logging.info( "Found file from backup '%s' of server '%s': %s", backup_info.backup_id, self.server_name, info.path, ) break for backup_file in backup_files.values(): logging_fun = logging.warning if allow_missing else logging.error if backup_file.path is None: logging_fun( "Missing file %s.* for server %s", backup_file.base, self.server_name, ) if not allow_missing: raise SystemExit(1) return backup_files barman-2.18/barman/infofile.py0000644000621200062120000006441714172556763014521 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2013-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import ast import collections import inspect import logging import os import dateutil.parser import dateutil.tz from barman import xlog from barman.exceptions import BackupInfoBadInitialisation from barman.utils import fsync_dir # Named tuple representing a Tablespace with 'name' 'oid' and 'location' # as property. Tablespace = collections.namedtuple("Tablespace", "name oid location") # Named tuple representing a file 'path' with an associated 'file_type' TypedFile = collections.namedtuple("ConfFile", "file_type path") _logger = logging.getLogger(__name__) def output_tablespace_list(tablespaces): """ Return the literal representation of tablespaces as a Python string :param tablespaces tablespaces: list of Tablespaces objects :return str: Literal representation of tablespaces """ if tablespaces: return repr([tuple(item) for item in tablespaces]) else: return None def load_tablespace_list(string): """ Load the tablespaces as a Python list of namedtuple Uses ast to evaluate information about tablespaces. The returned list is used to create a list of namedtuple :param str string: :return list: list of namedtuple representing all the tablespaces """ obj = ast.literal_eval(string) if obj: return [Tablespace._make(item) for item in obj] else: return None def null_repr(obj): """ Return the literal representation of an object :param object obj: object to represent :return str|None: Literal representation of an object or None """ return repr(obj) if obj else None def load_datetime_tz(time_str): """ Load datetime and ensure the result is timezone-aware. If the parsed timestamp is naive, transform it into a timezone-aware one using the local timezone. :param str time_str: string representing a timestamp :return datetime: the parsed timezone-aware datetime """ # dateutil parser returns naive or tz-aware string depending on the format # of the input string timestamp = dateutil.parser.parse(time_str) # if the parsed timestamp is naive, forces it to local timezone if timestamp.tzinfo is None: timestamp = timestamp.replace(tzinfo=dateutil.tz.tzlocal()) return timestamp class Field(object): def __init__(self, name, dump=None, load=None, default=None, doc=None): """ Field descriptor to be used with a FieldListFile subclass. The resulting field is like a normal attribute with two optional associated function: to_str and from_str The Field descriptor can also be used as a decorator class C(FieldListFile): x = Field('x') @x.dump def x(val): return '0x%x' % val @x.load def x(val): return int(val, 16) :param str name: the name of this attribute :param callable dump: function used to dump the content to a disk :param callable load: function used to reload the content from disk :param default: default value for the field :param str doc: docstring of the filed """ self.name = name self.to_str = dump self.from_str = load self.default = default self.__doc__ = doc # noinspection PyUnusedLocal def __get__(self, obj, objtype=None): if obj is None: return self if not hasattr(obj, "_fields"): obj._fields = {} return obj._fields.setdefault(self.name, self.default) def __set__(self, obj, value): if not hasattr(obj, "_fields"): obj._fields = {} obj._fields[self.name] = value def __delete__(self, obj): raise AttributeError("can't delete attribute") def dump(self, to_str): return type(self)(self.name, to_str, self.from_str, self.__doc__) def load(self, from_str): return type(self)(self.name, self.to_str, from_str, self.__doc__) class FieldListFile(object): __slots__ = ("_fields", "filename") def __init__(self, **kwargs): """ Represent a predefined set of keys with the associated value. The constructor build the object assigning every keyword argument to the corresponding attribute. If a provided keyword argument doesn't has a corresponding attribute an AttributeError exception is raised. The values provided to the constructor must be of the appropriate type for the corresponding attribute. The constructor will not attempt any validation or conversion on them. This class is meant to be an abstract base class. :raises: AttributeError """ self._fields = {} self.filename = None for name in kwargs: field = getattr(type(self), name, None) if isinstance(field, Field): setattr(self, name, kwargs[name]) else: raise AttributeError("unknown attribute %s" % name) @classmethod def from_meta_file(cls, filename): """ Factory method that read the specified file and build an object with its content. :param str filename: the file to read """ o = cls() o.load(filename) return o def save(self, filename=None, file_object=None): """ Serialize the object to the specified file or file object If a file_object is specified it will be used. If the filename is not specified it uses the one memorized in the filename attribute. If neither the filename attribute and parameter are set a ValueError exception is raised. :param str filename: path of the file to write :param file file_object: a file like object to write in :param str filename: the file to write :raises: ValueError """ if file_object: info = file_object else: filename = filename or self.filename if filename: info = open(filename + ".tmp", "wb") else: info = None if not info: raise ValueError( "either a valid filename or a file_object must be specified" ) try: for name, field in sorted(inspect.getmembers(type(self))): value = getattr(self, name, None) if isinstance(field, Field): if callable(field.to_str): value = field.to_str(value) info.write(("%s=%s\n" % (name, value)).encode("UTF-8")) finally: if not file_object: info.close() if not file_object: os.rename(filename + ".tmp", filename) fsync_dir(os.path.normpath(os.path.dirname(filename))) def load(self, filename=None, file_object=None): """ Replaces the current object content with the one deserialized from the provided file. This method set the filename attribute. A ValueError exception is raised if the provided file contains any invalid line. :param str filename: path of the file to read :param file file_object: a file like object to read from :param str filename: the file to read :raises: ValueError """ if file_object: info = file_object elif filename: info = open(filename, "rb") else: raise ValueError("either filename or file_object must be specified") # detect the filename if a file_object is passed if not filename and file_object: if hasattr(file_object, "name"): filename = file_object.name # canonicalize filename if filename: self.filename = os.path.abspath(filename) else: self.filename = None filename = "" # This is only for error reporting with info: for line in info: line = line.decode("UTF-8") # skip spaces and comments if line.isspace() or line.rstrip().startswith("#"): continue # parse the line of form "key = value" try: name, value = [x.strip() for x in line.split("=", 1)] except ValueError: raise ValueError( "invalid line %s in file %s" % (line.strip(), filename) ) # use the from_str function to parse the value field = getattr(type(self), name, None) if value == "None": value = None elif isinstance(field, Field) and callable(field.from_str): value = field.from_str(value) setattr(self, name, value) def items(self): """ Return a generator returning a list of (key, value) pairs. If a filed has a dump function defined, it will be used. """ for name, field in sorted(inspect.getmembers(type(self))): value = getattr(self, name, None) if isinstance(field, Field): if callable(field.to_str): value = field.to_str(value) yield (name, value) def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, ", ".join(["%s=%r" % x for x in self.items()]), ) class WalFileInfo(FieldListFile): """ Metadata of a WAL file. """ __slots__ = ("orig_filename",) name = Field("name", doc="base name of WAL file") size = Field("size", load=int, doc="WAL file size after compression") time = Field( "time", load=float, doc="WAL file modification time (seconds since epoch)" ) compression = Field("compression", doc="compression type") @classmethod def from_file( cls, filename, compression_manager=None, unidentified_compression=None, **kwargs ): """ Factory method to generate a WalFileInfo from a WAL file. Every keyword argument will override any attribute from the provided file. If a keyword argument doesn't has a corresponding attribute an AttributeError exception is raised. :param str filename: the file to inspect :param Compressionmanager compression_manager: a compression manager which will be used to identify the compression :param str unidentified_compression: the compression to set if the current schema is not identifiable """ stat = os.stat(filename) kwargs.setdefault("name", os.path.basename(filename)) kwargs.setdefault("size", stat.st_size) kwargs.setdefault("time", stat.st_mtime) if "compression" not in kwargs: kwargs["compression"] = ( compression_manager.identify_compression(filename) or unidentified_compression ) obj = cls(**kwargs) obj.filename = "%s.meta" % filename obj.orig_filename = filename return obj def to_xlogdb_line(self): """ Format the content of this object as a xlogdb line. """ return "%s\t%s\t%s\t%s\n" % (self.name, self.size, self.time, self.compression) @classmethod def from_xlogdb_line(cls, line): """ Parse a line from xlog catalogue :param str line: a line in the wal database to parse :rtype: WalFileInfo """ try: name, size, time, compression = line.split() except ValueError: # Old format compatibility (no compression) compression = None try: name, size, time = line.split() except ValueError: raise ValueError("cannot parse line: %r" % (line,)) # The to_xlogdb_line method writes None values as literal 'None' if compression == "None": compression = None size = int(size) time = float(time) return cls(name=name, size=size, time=time, compression=compression) def to_json(self): """ Return an equivalent dictionary that can be encoded in json """ return dict(self.items()) def relpath(self): """ Returns the WAL file path relative to the server's wals_directory """ return os.path.join(xlog.hash_dir(self.name), self.name) def fullpath(self, server): """ Returns the WAL file full path :param barman.server.Server server: the server that owns the wal file """ return os.path.join(server.config.wals_directory, self.relpath()) class BackupInfo(FieldListFile): #: Conversion to string EMPTY = "EMPTY" STARTED = "STARTED" FAILED = "FAILED" WAITING_FOR_WALS = "WAITING_FOR_WALS" DONE = "DONE" SYNCING = "SYNCING" STATUS_COPY_DONE = (WAITING_FOR_WALS, DONE) STATUS_ALL = (EMPTY, STARTED, WAITING_FOR_WALS, DONE, SYNCING, FAILED) STATUS_NOT_EMPTY = (STARTED, WAITING_FOR_WALS, DONE, SYNCING, FAILED) STATUS_ARCHIVING = (STARTED, WAITING_FOR_WALS, DONE, SYNCING) #: Status according to retention policies OBSOLETE = "OBSOLETE" VALID = "VALID" POTENTIALLY_OBSOLETE = "OBSOLETE*" NONE = "-" KEEP_FULL = "KEEP:FULL" KEEP_STANDALONE = "KEEP:STANDALONE" RETENTION_STATUS = ( OBSOLETE, VALID, POTENTIALLY_OBSOLETE, KEEP_FULL, KEEP_STANDALONE, NONE, ) version = Field("version", load=int) pgdata = Field("pgdata") # Parse the tablespaces as a literal Python list of namedtuple # Output the tablespaces as a literal Python list of tuple tablespaces = Field( "tablespaces", load=load_tablespace_list, dump=output_tablespace_list ) # Timeline is an integer timeline = Field("timeline", load=int) begin_time = Field("begin_time", load=load_datetime_tz) begin_xlog = Field("begin_xlog") begin_wal = Field("begin_wal") begin_offset = Field("begin_offset", load=int) size = Field("size", load=int) deduplicated_size = Field("deduplicated_size", load=int) end_time = Field("end_time", load=load_datetime_tz) end_xlog = Field("end_xlog") end_wal = Field("end_wal") end_offset = Field("end_offset", load=int) status = Field("status", default=EMPTY) server_name = Field("server_name") error = Field("error") mode = Field("mode") config_file = Field("config_file") hba_file = Field("hba_file") ident_file = Field("ident_file") included_files = Field("included_files", load=ast.literal_eval, dump=null_repr) backup_label = Field("backup_label", load=ast.literal_eval, dump=null_repr) copy_stats = Field("copy_stats", load=ast.literal_eval, dump=null_repr) xlog_segment_size = Field( "xlog_segment_size", load=int, default=xlog.DEFAULT_XLOG_SEG_SIZE ) systemid = Field("systemid") __slots__ = "backup_id", "backup_version" def __init__(self, backup_id, **kwargs): """ Stores meta information about a single backup :param str,None backup_id: """ self.backup_version = 2 self.backup_id = backup_id super(BackupInfo, self).__init__(**kwargs) def get_required_wal_segments(self): """ Get the list of required WAL segments for the current backup """ return xlog.generate_segment_names( self.begin_wal, self.end_wal, self.version, self.xlog_segment_size ) def get_external_config_files(self): """ Identify all the configuration files that reside outside the PGDATA. Returns a list of TypedFile objects. :rtype: list[TypedFile] """ config_files = [] for file_type in ("config_file", "hba_file", "ident_file"): config_file = getattr(self, file_type, None) if config_file: # Consider only those that reside outside of the original # PGDATA directory if config_file.startswith(self.pgdata): _logger.debug( "Config file '%s' already in PGDATA", config_file[len(self.pgdata) + 1 :], ) continue config_files.append(TypedFile(file_type, config_file)) # Check for any include directives in PostgreSQL configuration # Currently, include directives are not supported for files that # reside outside PGDATA. These files must be manually backed up. # Barman will emit a warning and list those files if self.included_files: for included_file in self.included_files: if not included_file.startswith(self.pgdata): config_files.append(TypedFile("include", included_file)) return config_files def set_attribute(self, key, value): """ Set a value for a given key """ setattr(self, key, value) def to_dict(self): """ Return the backup_info content as a simple dictionary :return dict: """ result = dict(self.items()) result.update( backup_id=self.backup_id, server_name=self.server_name, mode=self.mode, tablespaces=self.tablespaces, included_files=self.included_files, copy_stats=self.copy_stats, ) return result def to_json(self): """ Return an equivalent dictionary that uses only json-supported types """ data = self.to_dict() # Convert fields which need special types not supported by json if data.get("tablespaces") is not None: data["tablespaces"] = [list(item) for item in data["tablespaces"]] if data.get("begin_time") is not None: data["begin_time"] = data["begin_time"].ctime() if data.get("end_time") is not None: data["end_time"] = data["end_time"].ctime() return data @classmethod def from_json(cls, server, json_backup_info): """ Factory method that builds a BackupInfo object from a json dictionary :param barman.Server server: the server related to the Backup :param dict json_backup_info: the data set containing values from json """ data = dict(json_backup_info) # Convert fields which need special types not supported by json if data.get("tablespaces") is not None: data["tablespaces"] = [ Tablespace._make(item) for item in data["tablespaces"] ] if data.get("begin_time") is not None: data["begin_time"] = load_datetime_tz(data["begin_time"]) if data.get("end_time") is not None: data["end_time"] = load_datetime_tz(data["end_time"]) # Instantiate a BackupInfo object using the converted fields return cls(server, **data) def pg_major_version(self): """ Returns the major version of the PostgreSQL instance from which the backup was made taking into account the change in versioning scheme between PostgreSQL < 10.0 and PostgreSQL >= 10.0. """ major = int(self.version / 10000) if major < 10: minor = int(self.version / 100 % 100) return "%d.%d" % (major, minor) else: return str(major) def wal_directory(self): """ Returns "pg_wal" (v10 and above) or "pg_xlog" (v9.6 and below) based on the Postgres version represented by this backup """ return "pg_wal" if self.version >= 100000 else "pg_xlog" class LocalBackupInfo(BackupInfo): __slots__ = "server", "config", "backup_manager" def __init__(self, server, info_file=None, backup_id=None, **kwargs): """ Stores meta information about a single backup :param Server server: :param file,str,None info_file: :param str,None backup_id: :raise BackupInfoBadInitialisation: if the info_file content is invalid or neither backup_info or """ # Initialises the attributes for the object # based on the predefined keys super(LocalBackupInfo, self).__init__(backup_id=backup_id, **kwargs) self.server = server self.config = server.config self.backup_manager = self.server.backup_manager self.server_name = self.config.name self.mode = self.backup_manager.mode if backup_id: # Cannot pass both info_file and backup_id if info_file: raise BackupInfoBadInitialisation( "both info_file and backup_id parameters are set" ) self.backup_id = backup_id self.filename = self.get_filename() # Check if a backup info file for a given server and a given ID # already exists. If so load the values from the file. if os.path.exists(self.filename): self.load(filename=self.filename) elif info_file: if hasattr(info_file, "read"): # We have been given a file-like object self.load(file_object=info_file) else: # Just a file name self.load(filename=info_file) self.backup_id = self.detect_backup_id() elif not info_file: raise BackupInfoBadInitialisation( "backup_id and info_file parameters are both unset" ) # Manage backup version for new backup structure try: # the presence of pgdata directory is the marker of version 1 if self.backup_id is not None and os.path.exists( os.path.join(self.get_basebackup_directory(), "pgdata") ): self.backup_version = 1 except Exception as e: _logger.warning( "Error detecting backup_version, use default: 2. Failure reason: %s", e, ) def get_list_of_files(self, target): """ Get the list of files for the current backup """ # Walk down the base backup directory if target in ("data", "standalone", "full"): for root, _, files in os.walk(self.get_basebackup_directory()): files.sort() for f in files: yield os.path.join(root, f) if target in "standalone": # List all the WAL files for this backup for x in self.get_required_wal_segments(): yield self.server.get_wal_full_path(x) if target in ("wal", "full"): for wal_info in self.server.get_wal_until_next_backup( self, include_history=True ): yield wal_info.fullpath(self.server) def detect_backup_id(self): """ Detect the backup ID from the name of the parent dir of the info file """ if self.filename: return os.path.basename(os.path.dirname(self.filename)) else: return None def get_basebackup_directory(self): """ Get the default filename for the backup.info file based on backup ID and server directory for base backups """ return os.path.join(self.config.basebackups_directory, self.backup_id) def get_data_directory(self, tablespace_oid=None): """ Get path to the backup data dir according with the backup version If tablespace_oid is passed, build the path to the tablespace base directory, according with the backup version :param int tablespace_oid: the oid of a valid tablespace """ # Check if a tablespace oid is passed and if is a valid oid if tablespace_oid is not None: if self.tablespaces is None: raise ValueError("Invalid tablespace OID %s" % tablespace_oid) invalid_oid = all( str(tablespace_oid) != str(tablespace.oid) for tablespace in self.tablespaces ) if invalid_oid: raise ValueError("Invalid tablespace OID %s" % tablespace_oid) # Build the requested path according to backup_version value path = [self.get_basebackup_directory()] # Check the version of the backup if self.backup_version == 2: # If an oid has been provided, we are looking for a tablespace if tablespace_oid is not None: # Append the oid to the basedir of the backup path.append(str(tablespace_oid)) else: # Looking for the data dir path.append("data") else: # Backup v1, use pgdata as base path.append("pgdata") # If a oid has been provided, we are looking for a tablespace. if tablespace_oid is not None: # Append the path to pg_tblspc/oid folder inside pgdata path.extend(("pg_tblspc", str(tablespace_oid))) # Return the built path return os.path.join(*path) def get_filename(self): """ Get the default filename for the backup.info file based on backup ID and server directory for base backups """ return os.path.join(self.get_basebackup_directory(), "backup.info") def save(self, filename=None, file_object=None): if not file_object: # Make sure the containing directory exists filename = filename or self.filename dir_name = os.path.dirname(filename) if not os.path.exists(dir_name): os.makedirs(dir_name) super(LocalBackupInfo, self).save(filename=filename, file_object=file_object) barman-2.18/barman/backup_executor.py0000644000621200062120000021537014172556763016105 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ Backup Executor module A Backup Executor is a class responsible for the execution of a backup. Specific implementations of backups are defined by classes that derive from BackupExecutor (e.g.: backup with rsync through Ssh). A BackupExecutor is invoked by the BackupManager for backup operations. """ import datetime import logging import os import re import shutil from abc import ABCMeta, abstractmethod from functools import partial import dateutil.parser from distutils.version import LooseVersion as Version from barman import output, xlog from barman.command_wrappers import PgBaseBackup from barman.config import BackupOptions from barman.copy_controller import RsyncCopyController from barman.exceptions import ( CommandFailedException, DataTransferFailure, FsOperationFailed, PostgresConnectionError, PostgresIsInRecovery, SshCommandException, ) from barman.fs import UnixLocalCommand, UnixRemoteCommand from barman.infofile import BackupInfo from barman.postgres_plumbing import EXCLUDE_LIST, PGDATA_EXCLUDE_LIST from barman.remote_status import RemoteStatusMixin from barman.utils import ( force_str, human_readable_timedelta, mkpath, total_seconds, with_metaclass, ) _logger = logging.getLogger(__name__) class BackupExecutor(with_metaclass(ABCMeta, RemoteStatusMixin)): """ Abstract base class for any backup executors. """ def __init__(self, backup_manager, mode=None): """ Base constructor :param barman.backup.BackupManager backup_manager: the BackupManager assigned to the executor """ super(BackupExecutor, self).__init__() self.backup_manager = backup_manager self.server = backup_manager.server self.config = backup_manager.config self.strategy = None self._mode = mode self.copy_start_time = None self.copy_end_time = None # Holds the action being executed. Used for error messages. self.current_action = None def init(self): """ Initialise the internal state of the backup executor """ self.current_action = "starting backup" @property def mode(self): """ Property that defines the mode used for the backup. If a strategy is present, the returned string is a combination of the mode of the executor and the mode of the strategy (eg: rsync-exclusive) :return str: a string describing the mode used for the backup """ strategy_mode = self.strategy.mode if strategy_mode: return "%s-%s" % (self._mode, strategy_mode) else: return self._mode @abstractmethod def backup(self, backup_info): """ Perform a backup for the server - invoked by BackupManager.backup() :param barman.infofile.LocalBackupInfo backup_info: backup information """ def check(self, check_strategy): """ Perform additional checks - invoked by BackupManager.check() :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ def status(self): """ Set additional status info - invoked by BackupManager.status() """ def fetch_remote_status(self): """ Get additional remote status info - invoked by BackupManager.get_remote_status() This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ return {} def _purge_unused_wal_files(self, backup_info): """ It the provided backup is the first, purge all WAL files before the backup start. :param barman.infofile.LocalBackupInfo backup_info: the backup to check """ # Do nothing if the begin_wal is not defined yet if backup_info.begin_wal is None: return # If this is the first backup, purge unused WAL files previous_backup = self.backup_manager.get_previous_backup(backup_info.backup_id) if not previous_backup: output.info("This is the first backup for server %s", self.config.name) removed = self.backup_manager.remove_wal_before_backup(backup_info) if removed: # report the list of the removed WAL files output.info( "WAL segments preceding the current backup have been found:", log=False, ) for wal_name in removed: output.info( "\t%s from server %s has been removed", wal_name, self.config.name, ) def _start_backup_copy_message(self, backup_info): """ Output message for backup start :param barman.infofile.LocalBackupInfo backup_info: backup information """ output.info("Copying files for %s", backup_info.backup_id) def _stop_backup_copy_message(self, backup_info): """ Output message for backup end :param barman.infofile.LocalBackupInfo backup_info: backup information """ output.info( "Copy done (time: %s)", human_readable_timedelta( datetime.timedelta(seconds=backup_info.copy_stats["copy_time"]) ), ) def _parse_ssh_command(ssh_command): """ Parse a user provided ssh command to a single command and a list of arguments In case of error, the first member of the result (the command) will be None :param ssh_command: a ssh command provided by the user :return tuple[str,list[str]]: the command and a list of options """ try: ssh_options = ssh_command.split() except AttributeError: return None, [] ssh_command = ssh_options.pop(0) ssh_options.extend("-o BatchMode=yes -o StrictHostKeyChecking=no".split()) return ssh_command, ssh_options class PostgresBackupExecutor(BackupExecutor): """ Concrete class for backup via pg_basebackup (plain format). Relies on pg_basebackup command to copy data files from the PostgreSQL cluster using replication protocol. """ def __init__(self, backup_manager): """ Constructor :param barman.backup.BackupManager backup_manager: the BackupManager assigned to the executor """ super(PostgresBackupExecutor, self).__init__(backup_manager, "postgres") self.validate_configuration() self.strategy = PostgresBackupStrategy(self.server.postgres, self.config.name) def validate_configuration(self): """ Validate the configuration for this backup executor. If the configuration is not compatible this method will disable the server. """ # Check for the correct backup options if BackupOptions.EXCLUSIVE_BACKUP in self.config.backup_options: self.config.backup_options.remove(BackupOptions.EXCLUSIVE_BACKUP) output.warning( "'exclusive_backup' is not a valid backup_option " "using postgres backup_method. " "Overriding with 'concurrent_backup'." ) # Apply the default backup strategy if BackupOptions.CONCURRENT_BACKUP not in self.config.backup_options: self.config.backup_options.add(BackupOptions.CONCURRENT_BACKUP) output.debug( "The default backup strategy for " "postgres backup_method is: concurrent_backup" ) # Forbid tablespace_bandwidth_limit option. # It works only with rsync based backups. if self.config.tablespace_bandwidth_limit: self.server.config.disabled = True # Report the error in the configuration errors message list self.server.config.msg_list.append( "tablespace_bandwidth_limit option is not supported by " "postgres backup_method" ) # Forbid reuse_backup option. # It works only with rsync based backups. if self.config.reuse_backup in ("copy", "link"): self.server.config.disabled = True # Report the error in the configuration errors message list self.server.config.msg_list.append( "reuse_backup option is not supported by postgres backup_method" ) # Forbid network_compression option. # It works only with rsync based backups. if self.config.network_compression: self.server.config.disabled = True # Report the error in the configuration errors message list self.server.config.msg_list.append( "network_compression option is not supported by " "postgres backup_method" ) # bandwidth_limit option is supported by pg_basebackup executable # starting from Postgres 9.4 if self.server.config.bandwidth_limit: # This method is invoked too early to have a working streaming # connection. So we avoid caching the result by directly # invoking fetch_remote_status() instead of get_remote_status() remote_status = self.fetch_remote_status() # If pg_basebackup is present and it doesn't support bwlimit # disable the server. if remote_status["pg_basebackup_bwlimit"] is False: self.server.config.disabled = True # Report the error in the configuration errors message list self.server.config.msg_list.append( "bandwidth_limit option is not supported by " "pg_basebackup version (current: %s, required: 9.4)" % remote_status["pg_basebackup_version"] ) def backup(self, backup_info): """ Perform a backup for the server - invoked by BackupManager.backup() through the generic interface of a BackupExecutor. This implementation is responsible for performing a backup through the streaming protocol. The connection must be made with a superuser or a user having REPLICATION permissions (see PostgreSQL documentation, Section 20.2), and pg_hba.conf must explicitly permit the replication connection. The server must also be configured with enough max_wal_senders to leave at least one session available for the backup. :param barman.infofile.LocalBackupInfo backup_info: backup information """ try: # Set data directory and server version self.strategy.start_backup(backup_info) backup_info.save() if backup_info.begin_wal is not None: output.info( "Backup start at LSN: %s (%s, %08X)", backup_info.begin_xlog, backup_info.begin_wal, backup_info.begin_offset, ) else: output.info("Backup start at LSN: %s", backup_info.begin_xlog) # Start the copy self.current_action = "copying files" self._start_backup_copy_message(backup_info) self.backup_copy(backup_info) self._stop_backup_copy_message(backup_info) self.strategy.stop_backup(backup_info) # If this is the first backup, purge eventually unused WAL files self._purge_unused_wal_files(backup_info) except CommandFailedException as e: _logger.exception(e) raise def check(self, check_strategy): """ Perform additional checks for PostgresBackupExecutor :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("pg_basebackup") remote_status = self.get_remote_status() # Check for the presence of pg_basebackup check_strategy.result( self.config.name, remote_status["pg_basebackup_installed"] ) # remote_status['pg_basebackup_compatible'] is None if # pg_basebackup cannot be executed and False if it is # not compatible. hint = None check_strategy.init_check("pg_basebackup compatible") if not remote_status["pg_basebackup_compatible"]: pg_version = "Unknown" basebackup_version = "Unknown" if self.server.streaming is not None: pg_version = self.server.streaming.server_txt_version if remote_status["pg_basebackup_version"] is not None: basebackup_version = remote_status["pg_basebackup_version"] hint = "PostgreSQL version: %s, pg_basebackup version: %s" % ( pg_version, basebackup_version, ) check_strategy.result( self.config.name, remote_status["pg_basebackup_compatible"], hint=hint ) # Skip further checks if the postgres connection doesn't work. # We assume that this error condition will be reported by # another check. postgres = self.server.postgres if postgres is None or postgres.server_txt_version is None: return check_strategy.init_check("pg_basebackup supports tablespaces mapping") # We can't backup a cluster with tablespaces if the tablespace # mapping option is not available in the installed version # of pg_basebackup. pg_version = Version(postgres.server_txt_version) tablespaces_list = postgres.get_tablespaces() # pg_basebackup supports the tablespace-mapping option, # so there are no problems in this case if remote_status["pg_basebackup_tbls_mapping"]: hint = None check_result = True # pg_basebackup doesn't support the tablespace-mapping option # and the data directory contains tablespaces, we can't correctly # backup it. elif tablespaces_list: check_result = False if pg_version < "9.3": hint = ( "pg_basebackup can't be used with tablespaces " "and PostgreSQL older than 9.3" ) else: hint = "pg_basebackup 9.4 or higher is required for tablespaces support" # Even if pg_basebackup doesn't support the tablespace-mapping # option, this location can be correctly backed up as doesn't # have any tablespaces else: check_result = True if pg_version < "9.3": hint = ( "pg_basebackup can be used as long as tablespaces " "support is not required" ) else: hint = "pg_basebackup 9.4 or higher is required for tablespaces support" check_strategy.result(self.config.name, check_result, hint=hint) def fetch_remote_status(self): """ Gather info from the remote server. This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. """ remote_status = dict.fromkeys( ( "pg_basebackup_compatible", "pg_basebackup_installed", "pg_basebackup_tbls_mapping", "pg_basebackup_path", "pg_basebackup_bwlimit", "pg_basebackup_version", ), None, ) # Test pg_basebackup existence version_info = PgBaseBackup.get_version_info(self.server.path) if version_info["full_path"]: remote_status["pg_basebackup_installed"] = True remote_status["pg_basebackup_path"] = version_info["full_path"] remote_status["pg_basebackup_version"] = version_info["full_version"] pgbasebackup_version = version_info["major_version"] else: remote_status["pg_basebackup_installed"] = False return remote_status # Is bandwidth limit supported? if ( remote_status["pg_basebackup_version"] is not None and remote_status["pg_basebackup_version"] < "9.4" ): remote_status["pg_basebackup_bwlimit"] = False else: remote_status["pg_basebackup_bwlimit"] = True # Is the tablespace mapping option supported? if pgbasebackup_version >= "9.4": remote_status["pg_basebackup_tbls_mapping"] = True else: remote_status["pg_basebackup_tbls_mapping"] = False # Retrieve the PostgreSQL version pg_version = None if self.server.streaming is not None: pg_version = self.server.streaming.server_major_version # If any of the two versions is unknown, we can't compare them if pgbasebackup_version is None or pg_version is None: # Return here. We are unable to retrieve # pg_basebackup or PostgreSQL versions return remote_status # pg_version is not None so transform into a Version object # for easier comparison between versions pg_version = Version(pg_version) # pg_basebackup 9.2 is compatible only with PostgreSQL 9.2. if "9.2" == pg_version == pgbasebackup_version: remote_status["pg_basebackup_compatible"] = True # other versions are compatible with lesser versions of PostgreSQL # WARNING: The development versions of `pg_basebackup` are considered # higher than the stable versions here, but this is not an issue # because it accepts everything that is less than # the `pg_basebackup` version(e.g. '9.6' is less than '9.6devel') elif "9.2" < pg_version <= pgbasebackup_version: remote_status["pg_basebackup_compatible"] = True else: remote_status["pg_basebackup_compatible"] = False return remote_status def backup_copy(self, backup_info): """ Perform the actual copy of the backup using pg_basebackup. First, manages tablespaces, then copies the base backup using the streaming protocol. In case of failure during the execution of the pg_basebackup command the method raises a DataTransferFailure, this trigger the retrying mechanism when necessary. :param barman.infofile.LocalBackupInfo backup_info: backup information """ # Make sure the destination directory exists, ensure the # right permissions to the destination dir backup_dest = backup_info.get_data_directory() dest_dirs = [backup_dest] # Store the start time self.copy_start_time = datetime.datetime.now() # Manage tablespaces, we need to handle them now in order to # be able to relocate them inside the # destination directory of the basebackup tbs_map = {} if backup_info.tablespaces: for tablespace in backup_info.tablespaces: source = tablespace.location destination = backup_info.get_data_directory(tablespace.oid) tbs_map[source] = destination dest_dirs.append(destination) # Prepare the destination directories for pgdata and tablespaces self._prepare_backup_destination(dest_dirs) # Retrieve pg_basebackup version information remote_status = self.get_remote_status() # If pg_basebackup supports --max-rate set the bandwidth_limit bandwidth_limit = None if remote_status["pg_basebackup_bwlimit"]: bandwidth_limit = self.config.bandwidth_limit # Make sure we are not wasting precious PostgreSQL resources # for the whole duration of the copy self.server.close() pg_basebackup = PgBaseBackup( connection=self.server.streaming, destination=backup_dest, command=remote_status["pg_basebackup_path"], version=remote_status["pg_basebackup_version"], app_name=self.config.streaming_backup_name, tbs_mapping=tbs_map, bwlimit=bandwidth_limit, immediate=self.config.immediate_checkpoint, path=self.server.path, retry_times=self.config.basebackup_retry_times, retry_sleep=self.config.basebackup_retry_sleep, retry_handler=partial(self._retry_handler, dest_dirs), ) # Do the actual copy try: pg_basebackup() except CommandFailedException as e: msg = ( "data transfer failure on directory '%s'" % backup_info.get_data_directory() ) raise DataTransferFailure.from_command_error("pg_basebackup", e, msg) # Store the end time self.copy_end_time = datetime.datetime.now() # Store statistics about the copy copy_time = total_seconds(self.copy_end_time - self.copy_start_time) backup_info.copy_stats = { "copy_time": copy_time, "total_time": copy_time, } # Check for the presence of configuration files outside the PGDATA external_config = backup_info.get_external_config_files() if any(external_config): msg = ( "pg_basebackup does not copy the PostgreSQL " "configuration files that reside outside PGDATA. " "Please manually backup the following files:\n" "\t%s\n" % "\n\t".join(ecf.path for ecf in external_config) ) # Show the warning only if the EXTERNAL_CONFIGURATION option # is not specified in the backup_options. if BackupOptions.EXTERNAL_CONFIGURATION not in self.config.backup_options: output.warning(msg) else: _logger.debug(msg) def _retry_handler(self, dest_dirs, command, args, kwargs, attempt, exc): """ Handler invoked during a backup in case of retry. The method simply warn the user of the failure and remove the already existing directories of the backup. :param list[str] dest_dirs: destination directories :param RsyncPgData command: Command object being executed :param list args: command args :param dict kwargs: command kwargs :param int attempt: attempt number (starting from 0) :param CommandFailedException exc: the exception which caused the failure """ output.warning( "Failure executing a backup using pg_basebackup (attempt %s)", attempt ) output.warning( "The files copied so far will be removed and " "the backup process will restart in %s seconds", self.config.basebackup_retry_sleep, ) # Remove all the destination directories and reinit the backup self._prepare_backup_destination(dest_dirs) def _prepare_backup_destination(self, dest_dirs): """ Prepare the destination of the backup, including tablespaces. This method is also responsible for removing a directory if it already exists and for ensuring the correct permissions for the created directories :param list[str] dest_dirs: destination directories """ for dest_dir in dest_dirs: # Remove a dir if exists. Ignore eventual errors shutil.rmtree(dest_dir, ignore_errors=True) # create the dir mkpath(dest_dir) # Ensure the right permissions to the destination directory # chmod 0700 octal os.chmod(dest_dir, 448) def _start_backup_copy_message(self, backup_info): output.info( "Starting backup copy via pg_basebackup for %s", backup_info.backup_id ) class FsBackupExecutor(with_metaclass(ABCMeta, BackupExecutor)): """ Abstract base class file system level backup executors that can operate remotely via SSH or locally: - remote mode (default), operates via SSH - local mode, operates as the same user that Barman runs with It is also a factory for exclusive/concurrent backup strategy objects. Raises a SshCommandException if 'ssh_command' is not set and not operating in local mode. """ def __init__(self, backup_manager, mode, local_mode=False): """ Constructor of the abstract class for backups via Ssh :param barman.backup.BackupManager backup_manager: the BackupManager assigned to the executor :param bool local_mode: if set to False (default), the class is able to operate on remote servers using SSH. Operates only locally if set to True. """ super(FsBackupExecutor, self).__init__(backup_manager, mode) # Set local/remote mode for copy self.local_mode = local_mode # Retrieve the ssh command and the options necessary for the # remote ssh access. self.ssh_command, self.ssh_options = _parse_ssh_command( backup_manager.config.ssh_command ) if not self.local_mode: # Remote copy requires ssh_command to be set if not self.ssh_command: raise SshCommandException( "Missing or invalid ssh_command in barman configuration " "for server %s" % backup_manager.config.name ) else: # Local copy requires ssh_command not to be set if self.ssh_command: raise SshCommandException( "Local copy requires ssh_command in barman configuration " "to be empty for server %s" % backup_manager.config.name ) # Apply the default backup strategy backup_options = self.config.backup_options concurrent_backup = BackupOptions.CONCURRENT_BACKUP in backup_options exclusive_backup = BackupOptions.EXCLUSIVE_BACKUP in backup_options if not concurrent_backup and not exclusive_backup: self.config.backup_options.add(BackupOptions.EXCLUSIVE_BACKUP) output.warning( "No backup strategy set for server '%s' " "(using default 'exclusive_backup').", self.config.name, ) output.warning( "The default backup strategy will change " "to 'concurrent_backup' in the future. " "Explicitly set 'backup_options' to silence this warning." ) # Depending on the backup options value, create the proper strategy if BackupOptions.CONCURRENT_BACKUP in self.config.backup_options: # Concurrent backup strategy self.strategy = LocalConcurrentBackupStrategy( self.server.postgres, self.config.name ) else: # Exclusive backup strategy self.strategy = ExclusiveBackupStrategy( self.server.postgres, self.config.name ) def _update_action_from_strategy(self): """ Update the executor's current action with the one of the strategy. This is used during exception handling to let the caller know where the failure occurred. """ action = getattr(self.strategy, "current_action", None) if action: self.current_action = action @abstractmethod def backup_copy(self, backup_info): """ Performs the actual copy of a backup for the server :param barman.infofile.LocalBackupInfo backup_info: backup information """ def backup(self, backup_info): """ Perform a backup for the server - invoked by BackupManager.backup() through the generic interface of a BackupExecutor. This implementation is responsible for performing a backup through a remote connection to the PostgreSQL server via Ssh. The specific set of instructions depends on both the specific class that derives from FsBackupExecutor and the selected strategy (e.g. exclusive backup through Rsync). :param barman.infofile.LocalBackupInfo backup_info: backup information """ # Start the backup, all the subsequent code must be wrapped in a # try except block which finally issues a stop_backup command try: self.strategy.start_backup(backup_info) except BaseException: self._update_action_from_strategy() raise try: # save any metadata changed by start_backup() call # This must be inside the try-except, because it could fail backup_info.save() if backup_info.begin_wal is not None: output.info( "Backup start at LSN: %s (%s, %08X)", backup_info.begin_xlog, backup_info.begin_wal, backup_info.begin_offset, ) else: output.info("Backup start at LSN: %s", backup_info.begin_xlog) # If this is the first backup, purge eventually unused WAL files self._purge_unused_wal_files(backup_info) # Start the copy self.current_action = "copying files" self._start_backup_copy_message(backup_info) self.backup_copy(backup_info) self._stop_backup_copy_message(backup_info) # Try again to purge eventually unused WAL files. At this point # the begin_wal value is surely known. Doing it twice is safe # because this function is useful only during the first backup. self._purge_unused_wal_files(backup_info) except BaseException: # we do not need to do anything here besides re-raising the # exception. It will be handled in the external try block. output.error("The backup has failed %s", self.current_action) raise else: self.current_action = "issuing stop of the backup" finally: output.info("Asking PostgreSQL server to finalize the backup.") try: self.strategy.stop_backup(backup_info) except BaseException: self._update_action_from_strategy() raise def _local_check(self, check_strategy): """ Specific checks for local mode of FsBackupExecutor (same user) :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ cmd = UnixLocalCommand(path=self.server.path) pgdata = self.server.postgres.get_setting("data_directory") # Check that PGDATA is accessible check_strategy.init_check("local PGDATA") hint = "Access to local PGDATA" try: cmd.check_directory_exists(pgdata) except FsOperationFailed as e: hint = force_str(e).strip() # Output the result check_strategy.result(self.config.name, cmd is not None, hint=hint) def _remote_check(self, check_strategy): """ Specific checks for remote mode of FsBackupExecutor, via SSH. :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # Check the SSH connection check_strategy.init_check("ssh") hint = "PostgreSQL server" cmd = None minimal_ssh_output = None try: cmd = UnixRemoteCommand( self.ssh_command, self.ssh_options, path=self.server.path ) minimal_ssh_output = "".join(cmd.get_last_output()) except FsOperationFailed as e: hint = force_str(e).strip() # Output the result check_strategy.result(self.config.name, cmd is not None, hint=hint) # Check that the communication channel is "clean" if minimal_ssh_output: check_strategy.init_check("ssh output clean") check_strategy.result( self.config.name, False, hint="the configured ssh_command must not add anything to " "the remote command output", ) # If SSH works but PostgreSQL is not responding server_txt_version = self.server.get_remote_status().get("server_txt_version") if cmd is not None and server_txt_version is None: # Check for 'backup_label' presence last_backup = self.server.get_backup( self.server.get_last_backup_id(BackupInfo.STATUS_NOT_EMPTY) ) # Look for the latest backup in the catalogue if last_backup: check_strategy.init_check("backup_label") # Get PGDATA and build path to 'backup_label' backup_label = os.path.join(last_backup.pgdata, "backup_label") # Verify that backup_label exists in the remote PGDATA. # If so, send an alert. Do not show anything if OK. exists = cmd.exists(backup_label) if exists: hint = ( "Check that the PostgreSQL server is up " "and no 'backup_label' file is in PGDATA." ) check_strategy.result(self.config.name, False, hint=hint) def check(self, check_strategy): """ Perform additional checks for FsBackupExecutor, including Ssh connection (executing a 'true' command on the remote server) and specific checks for the given backup strategy. :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ if self.local_mode: # Perform checks for the local case self._local_check(check_strategy) else: # Perform checks for the remote case self._remote_check(check_strategy) try: # Invoke specific checks for the backup strategy self.strategy.check(check_strategy) except BaseException: self._update_action_from_strategy() raise def status(self): """ Set additional status info for FsBackupExecutor using remote commands via Ssh, as well as those defined by the given backup strategy. """ try: # Invoke the status() method for the given strategy self.strategy.status() except BaseException: self._update_action_from_strategy() raise def fetch_remote_status(self): """ Get remote information on PostgreSQL using Ssh, such as last archived WAL file This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ remote_status = {} # Retrieve the last archived WAL using a Ssh connection on # the remote server and executing an 'ls' command. Only # for pre-9.4 versions of PostgreSQL. try: if self.server.postgres and self.server.postgres.server_version < 90400: remote_status["last_archived_wal"] = None if self.server.postgres.get_setting( "data_directory" ) and self.server.postgres.get_setting("archive_command"): if not self.local_mode: cmd = UnixRemoteCommand( self.ssh_command, self.ssh_options, path=self.server.path ) else: cmd = UnixLocalCommand(path=self.server.path) # Here the name of the PostgreSQL WALs directory is # hardcoded, but that doesn't represent a problem as # this code runs only for PostgreSQL < 9.4 archive_dir = os.path.join( self.server.postgres.get_setting("data_directory"), "pg_xlog", "archive_status", ) out = str(cmd.list_dir_content(archive_dir, ["-t"])) for line in out.splitlines(): if line.endswith(".done"): name = line[:-5] if xlog.is_any_xlog_file(name): remote_status["last_archived_wal"] = name break except (PostgresConnectionError, FsOperationFailed) as e: _logger.warning("Error retrieving PostgreSQL status: %s", e) return remote_status def _start_backup_copy_message(self, backup_info): number_of_workers = self.config.parallel_jobs via = "rsync/SSH" if self.local_mode: via = "local rsync" message = "Starting backup copy via %s for %s" % ( via, backup_info.backup_id, ) if number_of_workers > 1: message += " (%s jobs)" % number_of_workers output.info(message) class PassiveBackupExecutor(BackupExecutor): """ Dummy backup executors for Passive servers. Raises a SshCommandException if 'primary_ssh_command' is not set. """ def __init__(self, backup_manager): """ Constructor of Dummy backup executors for Passive servers. :param barman.backup.BackupManager backup_manager: the BackupManager assigned to the executor """ super(PassiveBackupExecutor, self).__init__(backup_manager) # Retrieve the ssh command and the options necessary for the # remote ssh access. self.ssh_command, self.ssh_options = _parse_ssh_command( backup_manager.config.primary_ssh_command ) # Requires ssh_command to be set if not self.ssh_command: raise SshCommandException( "Invalid primary_ssh_command in barman configuration " "for server %s" % backup_manager.config.name ) def backup(self, backup_info): """ This method should never be called, because this is a passive server :param barman.infofile.LocalBackupInfo backup_info: backup information """ # The 'backup' command is not available on a passive node. # If we get here, there is a programming error assert False def check(self, check_strategy): """ Perform additional checks for PassiveBackupExecutor, including Ssh connection to the primary (executing a 'true' command on the remote server). :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("ssh") hint = "Barman primary node" cmd = None minimal_ssh_output = None try: cmd = UnixRemoteCommand( self.ssh_command, self.ssh_options, path=self.server.path ) minimal_ssh_output = "".join(cmd.get_last_output()) except FsOperationFailed as e: hint = force_str(e).strip() # Output the result check_strategy.result(self.config.name, cmd is not None, hint=hint) # Check if the communication channel is "clean" if minimal_ssh_output: check_strategy.init_check("ssh output clean") check_strategy.result( self.config.name, False, hint="the configured ssh_command must not add anything to " "the remote command output", ) def status(self): """ Set additional status info for PassiveBackupExecutor. """ # On passive nodes show the primary_ssh_command output.result( "status", self.config.name, "primary_ssh_command", "SSH command to primary server", self.config.primary_ssh_command, ) @property def mode(self): """ Property that defines the mode used for the backup. :return str: a string describing the mode used for the backup """ return "passive" class RsyncBackupExecutor(FsBackupExecutor): """ Concrete class for backup via Rsync+Ssh. It invokes PostgreSQL commands to start and stop the backup, depending on the defined strategy. Data files are copied using Rsync via Ssh. It heavily relies on methods defined in the FsBackupExecutor class from which it derives. """ def __init__(self, backup_manager, local_mode=False): """ Constructor :param barman.backup.BackupManager backup_manager: the BackupManager assigned to the strategy """ super(RsyncBackupExecutor, self).__init__(backup_manager, "rsync", local_mode) def backup_copy(self, backup_info): """ Perform the actual copy of the backup using Rsync. First, it copies one tablespace at a time, then the PGDATA directory, and finally configuration files (if outside PGDATA). Bandwidth limitation, according to configuration, is applied in the process. This method is the core of base backup copy using Rsync+Ssh. :param barman.infofile.LocalBackupInfo backup_info: backup information """ # Retrieve the previous backup metadata, then calculate safe_horizon previous_backup = self.backup_manager.get_previous_backup(backup_info.backup_id) safe_horizon = None reuse_backup = None # Store the start time self.copy_start_time = datetime.datetime.now() if previous_backup: # safe_horizon is a tz-aware timestamp because BackupInfo class # ensures that property reuse_backup = self.config.reuse_backup safe_horizon = previous_backup.begin_time # Create the copy controller object, specific for rsync, # which will drive all the copy operations. Items to be # copied are added before executing the copy() method controller = RsyncCopyController( path=self.server.path, ssh_command=self.ssh_command, ssh_options=self.ssh_options, network_compression=self.config.network_compression, reuse_backup=reuse_backup, safe_horizon=safe_horizon, retry_times=self.config.basebackup_retry_times, retry_sleep=self.config.basebackup_retry_sleep, workers=self.config.parallel_jobs, ) # List of paths to be excluded by the PGDATA copy exclude_and_protect = [] # Process every tablespace if backup_info.tablespaces: for tablespace in backup_info.tablespaces: # If the tablespace location is inside the data directory, # exclude and protect it from being copied twice during # the data directory copy if tablespace.location.startswith(backup_info.pgdata + "/"): exclude_and_protect += [ tablespace.location[len(backup_info.pgdata) :] ] # Exclude and protect the tablespace from being copied again # during the data directory copy exclude_and_protect += ["/pg_tblspc/%s" % tablespace.oid] # Make sure the destination directory exists in order for # smart copy to detect that no file is present there tablespace_dest = backup_info.get_data_directory(tablespace.oid) mkpath(tablespace_dest) # Add the tablespace directory to the list of objects # to be copied by the controller. # NOTE: Barman should archive only the content of directory # "PG_" + PG_MAJORVERSION + "_" + CATALOG_VERSION_NO # but CATALOG_VERSION_NO is not easy to retrieve, so we copy # "PG_" + PG_MAJORVERSION + "_*" # It could select some spurious directory if a development or # a beta version have been used, but it's good enough for a # production system as it filters out other major versions. controller.add_directory( label=tablespace.name, src="%s/" % self._format_src(tablespace.location), dst=tablespace_dest, exclude=["/*"] + EXCLUDE_LIST, include=["/PG_%s_*" % self.server.postgres.server_major_version], bwlimit=self.config.get_bwlimit(tablespace), reuse=self._reuse_path(previous_backup, tablespace), item_class=controller.TABLESPACE_CLASS, ) # Make sure the destination directory exists in order for smart copy # to detect that no file is present there backup_dest = backup_info.get_data_directory() mkpath(backup_dest) # Add the PGDATA directory to the list of objects to be copied # by the controller controller.add_directory( label="pgdata", src="%s/" % self._format_src(backup_info.pgdata), dst=backup_dest, exclude=PGDATA_EXCLUDE_LIST + EXCLUDE_LIST, exclude_and_protect=exclude_and_protect, bwlimit=self.config.get_bwlimit(), reuse=self._reuse_path(previous_backup), item_class=controller.PGDATA_CLASS, ) # At last copy pg_control controller.add_file( label="pg_control", src="%s/global/pg_control" % self._format_src(backup_info.pgdata), dst="%s/global/pg_control" % (backup_dest,), item_class=controller.PGCONTROL_CLASS, ) # Copy configuration files (if not inside PGDATA) external_config_files = backup_info.get_external_config_files() included_config_files = [] for config_file in external_config_files: # Add included files to a list, they will be handled later if config_file.file_type == "include": included_config_files.append(config_file) continue # If the ident file is missing, it isn't an error condition # for PostgreSQL. # Barman is consistent with this behavior. optional = False if config_file.file_type == "ident_file": optional = True # Create the actual copy jobs in the controller controller.add_file( label=config_file.file_type, src=self._format_src(config_file.path), dst=backup_dest, optional=optional, item_class=controller.CONFIG_CLASS, ) # Execute the copy try: controller.copy() # TODO: Improve the exception output except CommandFailedException as e: msg = "data transfer failure" raise DataTransferFailure.from_command_error("rsync", e, msg) # Store the end time self.copy_end_time = datetime.datetime.now() # Store statistics about the copy backup_info.copy_stats = controller.statistics() # Check for any include directives in PostgreSQL configuration # Currently, include directives are not supported for files that # reside outside PGDATA. These files must be manually backed up. # Barman will emit a warning and list those files if any(included_config_files): msg = ( "The usage of include directives is not supported " "for files that reside outside PGDATA.\n" "Please manually backup the following files:\n" "\t%s\n" % "\n\t".join(icf.path for icf in included_config_files) ) # Show the warning only if the EXTERNAL_CONFIGURATION option # is not specified in the backup_options. if BackupOptions.EXTERNAL_CONFIGURATION not in self.config.backup_options: output.warning(msg) else: _logger.debug(msg) def _reuse_path(self, previous_backup_info, tablespace=None): """ If reuse_backup is 'copy' or 'link', builds the path of the directory to reuse, otherwise always returns None. If oid is None, it returns the full path of PGDATA directory of the previous_backup otherwise it returns the path to the specified tablespace using it's oid. :param barman.infofile.LocalBackupInfo previous_backup_info: backup to be reused :param barman.infofile.Tablespace tablespace: the tablespace to copy :returns: a string containing the local path with data to be reused or None :rtype: str|None """ oid = None if tablespace: oid = tablespace.oid if ( self.config.reuse_backup in ("copy", "link") and previous_backup_info is not None ): try: return previous_backup_info.get_data_directory(oid) except ValueError: return None def _format_src(self, path): """ If the executor is operating in remote mode, add a `:` in front of the path for rsync to work via SSH. :param string path: the path to format :return str: the formatted path string """ if not self.local_mode: return ":%s" % path return path class BackupStrategy(with_metaclass(ABCMeta, object)): """ Abstract base class for a strategy to be used by a backup executor. """ #: Regex for START WAL LOCATION info START_TIME_RE = re.compile(r"^START TIME: (.*)", re.MULTILINE) #: Regex for START TIME info WAL_RE = re.compile(r"^START WAL LOCATION: (.*) \(file (.*)\)", re.MULTILINE) def __init__(self, postgres, server_name, mode=None): """ Constructor :param barman.postgres.PostgreSQLConnection postgres: the PostgreSQL connection :param str server_name: The name of the server """ self.postgres = postgres self.server_name = server_name # Holds the action being executed. Used for error messages. self.current_action = None self.mode = mode def start_backup(self, backup_info): """ Issue a start of a backup - invoked by BackupExecutor.backup() :param barman.infofile.BackupInfo backup_info: backup information """ # Retrieve PostgreSQL server metadata self._pg_get_metadata(backup_info) # Record that we are about to start the backup self.current_action = "issuing start backup command" _logger.debug(self.current_action) @abstractmethod def stop_backup(self, backup_info): """ Issue a stop of a backup - invoked by BackupExecutor.backup() :param barman.infofile.LocalBackupInfo backup_info: backup information """ @abstractmethod def check(self, check_strategy): """ Perform additional checks - invoked by BackupExecutor.check() :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # noinspection PyMethodMayBeStatic def status(self): """ Set additional status info - invoked by BackupExecutor.status() """ def _pg_get_metadata(self, backup_info): """ Load PostgreSQL metadata into the backup_info parameter :param barman.infofile.BackupInfo backup_info: backup information """ # Get the PostgreSQL data directory location self.current_action = "detecting data directory" output.debug(self.current_action) data_directory = self.postgres.get_setting("data_directory") backup_info.set_attribute("pgdata", data_directory) # Set server version backup_info.set_attribute("version", self.postgres.server_version) # Set XLOG segment size backup_info.set_attribute("xlog_segment_size", self.postgres.xlog_segment_size) # Set configuration files location cf = self.postgres.get_configuration_files() for key in cf: backup_info.set_attribute(key, cf[key]) # Get tablespaces information self.current_action = "detecting tablespaces" output.debug(self.current_action) tablespaces = self.postgres.get_tablespaces() if tablespaces and len(tablespaces) > 0: backup_info.set_attribute("tablespaces", tablespaces) for item in tablespaces: msg = "\t%s, %s, %s" % (item.oid, item.name, item.location) _logger.info(msg) @staticmethod def _backup_info_from_start_location(backup_info, start_info): """ Fill a backup info with information from a start_backup :param barman.infofile.BackupInfo backup_info: object representing a backup :param DictCursor start_info: the result of the pg_start_backup command """ backup_info.set_attribute("status", BackupInfo.STARTED) backup_info.set_attribute("begin_time", start_info["timestamp"]) backup_info.set_attribute("begin_xlog", start_info["location"]) # PostgreSQL 9.6+ directly provides the timeline if start_info.get("timeline") is not None: backup_info.set_attribute("timeline", start_info["timeline"]) # Take a copy of stop_info because we are going to update it start_info = start_info.copy() start_info.update( xlog.location_to_xlogfile_name_offset( start_info["location"], start_info["timeline"], backup_info.xlog_segment_size, ) ) # If file_name and file_offset are available, use them file_name = start_info.get("file_name") file_offset = start_info.get("file_offset") if file_name is not None and file_offset is not None: backup_info.set_attribute("begin_wal", start_info["file_name"]) backup_info.set_attribute("begin_offset", start_info["file_offset"]) # If the timeline is still missing, extract it from the file_name if backup_info.timeline is None: backup_info.set_attribute( "timeline", int(start_info["file_name"][0:8], 16) ) @staticmethod def _backup_info_from_stop_location(backup_info, stop_info): """ Fill a backup info with information from a backup stop location :param barman.infofile.BackupInfo backup_info: object representing a backup :param DictCursor stop_info: location info of stop backup """ # If file_name or file_offset are missing build them using the stop # location and the timeline. file_name = stop_info.get("file_name") file_offset = stop_info.get("file_offset") if file_name is None or file_offset is None: # Take a copy of stop_info because we are going to update it stop_info = stop_info.copy() # Get the timeline from the stop_info if available, otherwise # Use the one from the backup_label timeline = stop_info.get("timeline") if timeline is None: timeline = backup_info.timeline stop_info.update( xlog.location_to_xlogfile_name_offset( stop_info["location"], timeline, backup_info.xlog_segment_size ) ) backup_info.set_attribute("end_time", stop_info["timestamp"]) backup_info.set_attribute("end_xlog", stop_info["location"]) backup_info.set_attribute("end_wal", stop_info["file_name"]) backup_info.set_attribute("end_offset", stop_info["file_offset"]) def _backup_info_from_backup_label(self, backup_info): """ Fill a backup info with information from the backup_label file :param barman.infofile.BackupInfo backup_info: object representing a backup """ # The backup_label must be already loaded assert backup_info.backup_label # Parse backup label wal_info = self.WAL_RE.search(backup_info.backup_label) start_time = self.START_TIME_RE.search(backup_info.backup_label) if wal_info is None or start_time is None: raise ValueError( "Failure parsing backup_label for backup %s" % backup_info.backup_id ) # Set data in backup_info from backup_label backup_info.set_attribute("timeline", int(wal_info.group(2)[0:8], 16)) backup_info.set_attribute("begin_xlog", wal_info.group(1)) backup_info.set_attribute("begin_wal", wal_info.group(2)) backup_info.set_attribute( "begin_offset", xlog.parse_lsn(wal_info.group(1)) % backup_info.xlog_segment_size, ) # If we have already obtained a begin_time then it takes precedence over the # begin time in the backup label if not backup_info.begin_time: backup_info.set_attribute( "begin_time", dateutil.parser.parse(start_time.group(1)) ) def _read_backup_label(self, backup_info): """ Read the backup_label file :param barman.infofile.LocalBackupInfo backup_info: backup information """ self.current_action = "reading the backup label" label_path = os.path.join(backup_info.get_data_directory(), "backup_label") output.debug("Reading backup label: %s" % label_path) with open(label_path, "r") as f: backup_info.set_attribute("backup_label", f.read()) class PostgresBackupStrategy(BackupStrategy): """ Concrete class for postgres backup strategy. This strategy is for PostgresBackupExecutor only and is responsible for executing pre e post backup operations during a physical backup executed using pg_basebackup. """ def check(self, check_strategy): """ Perform additional checks for the Postgres backup strategy """ def start_backup(self, backup_info): """ Manage the start of an pg_basebackup backup The method performs all the preliminary operations required for a backup executed using pg_basebackup to start, gathering information from postgres and filling the backup_info. :param barman.infofile.LocalBackupInfo backup_info: backup information """ self.current_action = "initialising postgres backup_method" super(PostgresBackupStrategy, self).start_backup(backup_info) current_xlog_info = self.postgres.current_xlog_info self._backup_info_from_start_location(backup_info, current_xlog_info) def stop_backup(self, backup_info): """ Manage the stop of an pg_basebackup backup The method retrieves the information necessary for the backup.info file reading the backup_label file. Due of the nature of the pg_basebackup, information that are gathered during the start of a backup performed using rsync, are retrieved here :param barman.infofile.LocalBackupInfo backup_info: backup information """ self._read_backup_label(backup_info) self._backup_info_from_backup_label(backup_info) # Set data in backup_info from current_xlog_info self.current_action = "stopping postgres backup_method" output.info("Finalising the backup.") # Get the current xlog position current_xlog_info = self.postgres.current_xlog_info if current_xlog_info: self._backup_info_from_stop_location(backup_info, current_xlog_info) # Ask PostgreSQL to switch to another WAL file. This is needed # to archive the transaction log file containing the backup # end position, which is required to recover from the backup. try: self.postgres.switch_wal() except PostgresIsInRecovery: # Skip switching XLOG if a standby server pass class ExclusiveBackupStrategy(BackupStrategy): """ Concrete class for exclusive backup strategy. This strategy is for FsBackupExecutor only and is responsible for coordinating Barman with PostgreSQL on standard physical backup operations (known as 'exclusive' backup), such as invoking pg_start_backup() and pg_stop_backup() on the master server. """ def __init__(self, postgres, server_name): """ Constructor :param barman.postgres.PostgreSQLConnection postgres: the PostgreSQL connection :param str server_name: The name of the server """ super(ExclusiveBackupStrategy, self).__init__( postgres, server_name, "exclusive" ) def start_backup(self, backup_info): """ Manage the start of an exclusive backup The method performs all the preliminary operations required for an exclusive physical backup to start, as well as preparing the information on the backup for Barman. :param barman.infofile.LocalBackupInfo backup_info: backup information """ super(ExclusiveBackupStrategy, self).start_backup(backup_info) label = "Barman backup %s %s" % (backup_info.server_name, backup_info.backup_id) # Issue an exclusive start backup command _logger.debug("Start of exclusive backup") start_info = self.postgres.start_exclusive_backup(label) self._backup_info_from_start_location(backup_info, start_info) def stop_backup(self, backup_info): """ Manage the stop of an exclusive backup The method informs the PostgreSQL server that the physical exclusive backup is finished, as well as preparing the information returned by PostgreSQL for Barman. :param barman.infofile.LocalBackupInfo backup_info: backup information """ self.current_action = "issuing stop backup command" _logger.debug("Stop of exclusive backup") stop_info = self.postgres.stop_exclusive_backup() self._backup_info_from_stop_location(backup_info, stop_info) def check(self, check_strategy): """ Perform additional checks for ExclusiveBackupStrategy :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # Make sure PostgreSQL is not in recovery (i.e. is a master) check_strategy.init_check("not in recovery") if self.postgres: is_in_recovery = self.postgres.is_in_recovery if not is_in_recovery: check_strategy.result(self.server_name, True) else: check_strategy.result( self.server_name, False, hint="cannot perform exclusive backup on a standby", ) class ConcurrentBackupStrategy(BackupStrategy): """ Concrete class for concurrent backup strategy. This strategy is responsible for coordinating Barman with PostgreSQL on concurrent physical backup operations through concurrent backup PostgreSQL api or the pgespresso extension. """ def __init__(self, postgres, server_name): """ Constructor :param barman.postgres.PostgreSQLConnection postgres: the PostgreSQL connection :param str server_name: The name of the server """ super(ConcurrentBackupStrategy, self).__init__( postgres, server_name, "concurrent" ) def check(self, check_strategy): """ Perform additional checks for ConcurrentBackupStrategy :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ check_strategy.init_check("pgespresso extension") try: # We execute this check only if the postgres connection is non None # and the server version is lower than 9.6. On latest PostgreSQL # there is a native API for concurrent backups. if self.postgres and self.postgres.server_version < 90600: if self.postgres.has_pgespresso: check_strategy.result(self.server_name, True) else: check_strategy.result( self.server_name, False, hint="required for concurrent " "backups on PostgreSQL %s" % self.postgres.server_major_version, ) except PostgresConnectionError: # Skip the check if the postgres connection doesn't work. # We assume that this error condition will be reported by # another check. pass def start_backup(self, backup_info): """ Start of the backup. The method performs all the preliminary operations required for a backup to start. :param barman.infofile.BackupInfo backup_info: backup information """ super(ConcurrentBackupStrategy, self).start_backup(backup_info) label = "Barman backup %s %s" % (backup_info.server_name, backup_info.backup_id) pg_version = self.postgres.server_version if pg_version >= 90600: # On 9.6+ execute native concurrent start backup _logger.debug("Start of native concurrent backup") self._concurrent_start_backup(backup_info, label) else: # On older Postgres use pgespresso _logger.debug("Start of concurrent backup with pgespresso") self._pgespresso_start_backup(backup_info, label) def stop_backup(self, backup_info): """ Stop backup wrapper :param barman.infofile.BackupInfo backup_info: backup information """ pg_version = self.postgres.server_version self.current_action = "issuing stop backup command" if pg_version >= 90600: # On 9.6+ execute native concurrent stop backup self.current_action += " (native concurrent)" _logger.debug("Stop of native concurrent backup") self._concurrent_stop_backup(backup_info) else: # On older Postgres use pgespresso self.current_action += " (pgespresso)" _logger.debug("Stop of concurrent backup with pgespresso") self._pgespresso_stop_backup(backup_info) # Write backup_label retrieved from postgres connection self.current_action = "writing backup label" # Ask PostgreSQL to switch to another WAL file. This is needed # to archive the transaction log file containing the backup # end position, which is required to recover from the backup. try: self.postgres.switch_wal() except PostgresIsInRecovery: # Skip switching XLOG if a standby server pass def _pgespresso_start_backup(self, backup_info, label): """ Start a concurrent backup using pgespresso :param barman.infofile.BackupInfo backup_info: backup information """ backup_info.set_attribute("status", BackupInfo.STARTED) start_info = self.postgres.pgespresso_start_backup(label) backup_info.set_attribute("backup_label", start_info["backup_label"]) self._backup_info_from_backup_label(backup_info) def _pgespresso_stop_backup(self, backup_info): """ Stop a concurrent backup using pgespresso :param barman.infofile.BackupInfo backup_info: backup information """ stop_info = self.postgres.pgespresso_stop_backup(backup_info.backup_label) # Obtain a modifiable copy of stop_info object stop_info = stop_info.copy() # We don't know the exact backup stop location, # so we include the whole segment. stop_info["location"] = xlog.location_from_xlogfile_name_offset( stop_info["end_wal"], self.postgres.xlog_segment_size - 1, self.postgres.xlog_segment_size, ) self._backup_info_from_stop_location(backup_info, stop_info) def _concurrent_start_backup(self, backup_info, label): """ Start a concurrent backup using the PostgreSQL 9.6 concurrent backup api :param barman.infofile.BackupInfo backup_info: backup information :param str label: the backup label """ start_info = self.postgres.start_concurrent_backup(label) self.postgres.allow_reconnect = False self._backup_info_from_start_location(backup_info, start_info) def _concurrent_stop_backup(self, backup_info): """ Stop a concurrent backup using the PostgreSQL 9.6 concurrent backup api :param barman.infofile.BackupInfo backup_info: backup information """ stop_info = self.postgres.stop_concurrent_backup() self.postgres.allow_reconnect = True backup_info.set_attribute("backup_label", stop_info["backup_label"]) self._backup_info_from_stop_location(backup_info, stop_info) class LocalConcurrentBackupStrategy(ConcurrentBackupStrategy): """ Concrete class for concurrent backup strategy writing data locally. This strategy is for FsBackupExecutor only and is responsible for coordinating Barman with PostgreSQL on concurrent physical backup operations through the pgespresso extension. """ # noinspection PyMethodMayBeStatic def _write_backup_label(self, backup_info): """ Write the backup_label file inside local data directory :param barman.infofile.LocalBackupInfo backup_info: backup information """ label_file = os.path.join(backup_info.get_data_directory(), "backup_label") output.debug("Writing backup label: %s" % label_file) with open(label_file, "w") as f: f.write(backup_info.backup_label) def stop_backup(self, backup_info): """ Stop backup wrapper :param barman.infofile.LocalBackupInfo backup_info: backup information """ super(LocalConcurrentBackupStrategy, self).stop_backup(backup_info) self._write_backup_label(backup_info) barman-2.18/barman/xlog.py0000644000621200062120000004003414172556763013664 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains functions to retrieve information about xlog files """ import collections import os import re from functools import partial from tempfile import NamedTemporaryFile from barman.exceptions import ( BadHistoryFileContents, BadXlogSegmentName, CommandException, WalArchiveContentError, ) # xlog file segment name parser (regular expression) _xlog_re = re.compile( r""" ^ ([\dA-Fa-f]{8}) # everything has a timeline (?: ([\dA-Fa-f]{8})([\dA-Fa-f]{8}) # segment name, if a wal file (?: # and optional \.[\dA-Fa-f]{8}\.backup # offset, if a backup label | \.partial # partial, if a partial file )? | \.history # or only .history, if a history file ) $ """, re.VERBOSE, ) # xlog location parser for concurrent backup (regular expression) _location_re = re.compile(r"^([\dA-F]+)/([\dA-F]+)$") # Taken from xlog_internal.h from PostgreSQL sources #: XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2 #: and larger than XLOG_BLCKSZ (preferably, a great deal larger than #: XLOG_BLCKSZ). DEFAULT_XLOG_SEG_SIZE = 1 << 24 #: This namedtuple is a container for the information #: contained inside history files HistoryFileData = collections.namedtuple( "HistoryFileData", "tli parent_tli switchpoint reason" ) def is_any_xlog_file(path): """ Return True if the xlog is either a WAL segment, a .backup file or a .history file, False otherwise. It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.match(os.path.basename(path)) if match: return True return False def is_history_file(path): """ Return True if the xlog is a .history file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if match and match.group(0).endswith(".history"): return True return False def is_backup_file(path): """ Return True if the xlog is a .backup file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if match and match.group(0).endswith(".backup"): return True return False def is_partial_file(path): """ Return True if the xlog is a .partial file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if match and match.group(0).endswith(".partial"): return True return False def is_wal_file(path): """ Return True if the xlog is a regular xlog file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if not match: return False ends_with_backup = match.group(0).endswith(".backup") ends_with_history = match.group(0).endswith(".history") ends_with_partial = match.group(0).endswith(".partial") if ends_with_backup: return False if ends_with_history: return False if ends_with_partial: return False return True def decode_segment_name(path): """ Retrieve the timeline, log ID and segment ID from the name of a xlog segment It can handle either a full file path or a simple file name. :param str path: the file name to decode :rtype: list[int] """ name = os.path.basename(path) match = _xlog_re.match(name) if not match: raise BadXlogSegmentName(name) return [int(x, 16) if x else None for x in match.groups()] def encode_segment_name(tli, log, seg): """ Build the xlog segment name based on timeline, log ID and segment ID :param int tli: timeline number :param int log: log number :param int seg: segment number :return str: segment file name """ return "%08X%08X%08X" % (tli, log, seg) def encode_history_file_name(tli): """ Build the history file name based on timeline :return str: history file name """ return "%08X.history" % (tli,) def xlog_segments_per_file(xlog_segment_size): """ Given that WAL files are named using the following pattern: this is the number of XLOG segments in an XLOG file. By XLOG file we don't mean an actual file on the filesystem, but the definition used in the PostgreSQL sources: meaning a set of files containing the same file number. :param int xlog_segment_size: The XLOG segment size in bytes :return int: The number of segments in an XLOG file """ return 0xFFFFFFFF // xlog_segment_size def xlog_segment_mask(xlog_segment_size): """ Given that WAL files are named using the following pattern: this is the bitmask of segment part of an XLOG file. See the documentation of `xlog_segments_per_file` for a commentary on the definition of `XLOG` file. :param int xlog_segment_size: The XLOG segment size in bytes :return int: The size of an XLOG file """ return xlog_segment_size * xlog_segments_per_file(xlog_segment_size) def generate_segment_names(begin, end=None, version=None, xlog_segment_size=None): """ Generate a sequence of XLOG segments starting from ``begin`` If an ``end`` segment is provided the sequence will terminate after returning it, otherwise the sequence will never terminate. If the XLOG segment size is known, this generator is precise, switching to the next file when required. It the XLOG segment size is unknown, this generator will generate all the possible XLOG file names. The size of an XLOG segment can be every power of 2 between the XLOG block size (8Kib) and the size of a log segment (4Gib) :param str begin: begin segment name :param str|None end: optional end segment name :param int|None version: optional postgres version as an integer (e.g. 90301 for 9.3.1) :param int xlog_segment_size: the size of a XLOG segment :rtype: collections.Iterable[str] :raise: BadXlogSegmentName """ begin_tli, begin_log, begin_seg = decode_segment_name(begin) end_tli, end_log, end_seg = None, None, None if end: end_tli, end_log, end_seg = decode_segment_name(end) # this method doesn't support timeline changes assert begin_tli == end_tli, ( "Begin segment (%s) and end segment (%s) " "must have the same timeline part" % (begin, end) ) # If version is less than 9.3 the last segment must be skipped skip_last_segment = version is not None and version < 90300 # This is the number of XLOG segments in an XLOG file. By XLOG file # we don't mean an actual file on the filesystem, but the definition # used in the PostgreSQL sources: a set of files containing the # same file number. if xlog_segment_size: # The generator is operating is precise and correct mode: # knowing exactly when a switch to the next file is required xlog_seg_per_file = xlog_segments_per_file(xlog_segment_size) else: # The generator is operating only in precise mode: generating every # possible XLOG file name. xlog_seg_per_file = 0x7FFFF # Start from the first xlog and generate the segments sequentially # If ``end`` has been provided, the while condition ensure the termination # otherwise this generator will never stop cur_log, cur_seg = begin_log, begin_seg while ( end is None or cur_log < end_log or (cur_log == end_log and cur_seg <= end_seg) ): yield encode_segment_name(begin_tli, cur_log, cur_seg) cur_seg += 1 if cur_seg > xlog_seg_per_file or ( skip_last_segment and cur_seg == xlog_seg_per_file ): cur_seg = 0 cur_log += 1 def hash_dir(path): """ Get the directory where the xlog segment will be stored It can handle either a full file path or a simple file name. :param str|unicode path: xlog file name :return str: directory name """ tli, log, _ = decode_segment_name(path) # tli is always not None if log is not None: return "%08X%08X" % (tli, log) else: return "" def parse_lsn(lsn_string): """ Transform a string XLOG location, formatted as %X/%X, in the corresponding numeric representation :param str lsn_string: the string XLOG location, i.e. '2/82000168' :rtype: int """ lsn_list = lsn_string.split("/") if len(lsn_list) != 2: raise ValueError("Invalid LSN: %s", lsn_string) return (int(lsn_list[0], 16) << 32) + int(lsn_list[1], 16) def diff_lsn(lsn_string1, lsn_string2): """ Calculate the difference in bytes between two string XLOG location, formatted as %X/%X Tis function is a Python implementation of the ``pg_xlog_location_diff(str, str)`` PostgreSQL function. :param str lsn_string1: the string XLOG location, i.e. '2/82000168' :param str lsn_string2: the string XLOG location, i.e. '2/82000168' :rtype: int """ # If one the input is None returns None if lsn_string1 is None or lsn_string2 is None: return None return parse_lsn(lsn_string1) - parse_lsn(lsn_string2) def format_lsn(lsn): """ Transform a numeric XLOG location, in the corresponding %X/%X string representation :param int lsn: numeric XLOG location :rtype: str """ return "%X/%X" % (lsn >> 32, lsn & 0xFFFFFFFF) def location_to_xlogfile_name_offset(location, timeline, xlog_segment_size): """ Convert transaction log location string to file_name and file_offset This is a reimplementation of pg_xlogfile_name_offset PostgreSQL function This method returns a dictionary containing the following data: * file_name * file_offset :param str location: XLOG location :param int timeline: timeline :param int xlog_segment_size: the size of a XLOG segment :rtype: dict """ lsn = parse_lsn(location) log = lsn >> 32 seg = (lsn & xlog_segment_mask(xlog_segment_size)) // xlog_segment_size offset = lsn & (xlog_segment_size - 1) return { "file_name": encode_segment_name(timeline, log, seg), "file_offset": offset, } def location_from_xlogfile_name_offset(file_name, file_offset, xlog_segment_size): """ Convert file_name and file_offset to a transaction log location. This is the inverted function of PostgreSQL's pg_xlogfile_name_offset function. :param str file_name: a WAL file name :param int file_offset: a numeric offset :param int xlog_segment_size: the size of a XLOG segment :rtype: str """ decoded_segment = decode_segment_name(file_name) location = decoded_segment[1] << 32 location += decoded_segment[2] * xlog_segment_size location += file_offset return format_lsn(location) def decode_history_file(wal_info, comp_manager): """ Read an history file and parse its contents. Each line in the file represents a timeline switch, each field is separated by tab, empty lines are ignored and lines starting with '#' are comments. Each line is composed by three fields: parentTLI, switchpoint and reason. "parentTLI" is the ID of the parent timeline. "switchpoint" is the WAL position where the switch happened "reason" is an human-readable explanation of why the timeline was changed The method requires a CompressionManager object to handle the eventual compression of the history file. :param barman.infofile.WalFileInfo wal_info: history file obj :param comp_manager: compression manager used in case of history file compression :return List[HistoryFileData]: information from the history file """ path = wal_info.orig_filename # Decompress the file if needed if wal_info.compression: # Use a NamedTemporaryFile to avoid explicit cleanup uncompressed_file = NamedTemporaryFile( dir=os.path.dirname(path), prefix=".%s." % wal_info.name, suffix=".uncompressed", ) path = uncompressed_file.name comp_manager.get_compressor(wal_info.compression).decompress( wal_info.orig_filename, path ) # Extract the timeline from history file name tli, _, _ = decode_segment_name(wal_info.name) lines = [] with open(path) as fp: for line in fp: line = line.strip() # Skip comments and empty lines if line.startswith("#"): continue # Skip comments and empty lines if len(line) == 0: continue # Use tab as separator contents = line.split("\t") if len(contents) != 3: # Invalid content of the line raise BadHistoryFileContents(path) history = HistoryFileData( tli=tli, parent_tli=int(contents[0]), switchpoint=parse_lsn(contents[1]), reason=contents[2], ) lines.append(history) # Empty history file or containing invalid content if len(lines) == 0: raise BadHistoryFileContents(path) else: return lines def _validate_timeline(timeline): """Check that timeline is a valid timeline value.""" try: # Explicitly check the type becauase python 2 will allow < to be used # between strings and ints if type(timeline) is not int or timeline < 1: raise ValueError() return True except Exception: raise CommandException( "Cannot check WAL archive with malformed timeline %s" % timeline ) def _wal_archive_filter_fun(timeline, wal): try: if not is_any_xlog_file(wal): raise ValueError() except Exception: raise WalArchiveContentError("Unexpected file %s found in WAL archive" % wal) wal_timeline, _, _ = decode_segment_name(wal) return timeline <= wal_timeline def check_archive_usable(existing_wals, timeline=None): """ Carry out pre-flight checks on the existing content of a WAL archive to determine if it is safe to archive WALs from the supplied timeline. """ if timeline is None: if len(existing_wals) > 0: raise WalArchiveContentError("Expected empty archive") else: _validate_timeline(timeline) filter_fun = partial(_wal_archive_filter_fun, timeline) unexpected_wals = [wal for wal in existing_wals if filter_fun(wal)] num_unexpected_wals = len(unexpected_wals) if num_unexpected_wals > 0: raise WalArchiveContentError( "Found %s file%s in WAL archive equal to or newer than " "timeline %s" % ( num_unexpected_wals, num_unexpected_wals > 1 and "s" or "", timeline, ) ) barman-2.18/barman/compression.py0000644000621200062120000003110514172556763015253 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2011-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module is responsible to manage the compression features of Barman """ import binascii import bz2 import gzip import logging import shutil import sys from abc import ABCMeta, abstractmethod from contextlib import closing import barman.infofile from barman.command_wrappers import Command from barman.exceptions import CommandFailedException, CompressionIncompatibility from barman.utils import force_str, with_metaclass _logger = logging.getLogger(__name__) class CompressionManager(object): def __init__(self, config, path): """ Compression manager """ self.config = config self.path = path self.unidentified_compression = None # If Barman is set to use the custom compression, it assumes that # every unidentified file is custom compressed if self.config.compression == "custom": self.unidentified_compression = self.config.compression def check(self, compression=None): """ This method returns True if the compression specified in the configuration file is present in the register, otherwise False """ if not compression: compression = self.config.compression if compression not in compression_registry: return False return True def get_default_compressor(self): """ Returns a new default compressor instance """ return self.get_compressor(self.config.compression) def get_compressor(self, compression): """ Returns a new compressor instance :param str compression: Compression name or none """ # Check if the requested compression mechanism is allowed if compression and self.check(compression): return compression_registry[compression]( config=self.config, compression=compression, path=self.path ) return None def get_wal_file_info(self, filename): """ Populate a WalFileInfo object taking into account the server configuration. Set compression to 'custom' if no compression is identified and Barman is configured to use custom compression. :param str filename: the path of the file to identify :rtype: barman.infofile.WalFileInfo """ return barman.infofile.WalFileInfo.from_file( filename, compression_manager=self, unidentified_compression=self.unidentified_compression, ) def identify_compression(self, filename): """ Try to guess the compression algorithm of a file :param str filename: the path of the file to identify :rtype: str """ # TODO: manage multiple decompression methods for the same # compression algorithm (e.g. what to do when gzip is detected? # should we use gzip or pigz?) with open(filename, "rb") as f: file_start = f.read(MAGIC_MAX_LENGTH) for file_type, cls in sorted(compression_registry.items()): if file_type == "custom": try: compressor = self.get_compressor(file_type) if compressor.validate(file_start): return file_type except CompressionIncompatibility: # ignore exceptions that might happen when creating # a custom compressor pass elif cls.validate(file_start): return file_type return None class Compressor(with_metaclass(ABCMeta, object)): """ Base class for all the compressors """ MAGIC = None def __init__(self, config, compression, path=None): self.config = config self.compression = compression self.path = path @classmethod def validate(cls, file_start): """ Guess if the first bytes of a file are compatible with the compression implemented by this class :param file_start: a binary string representing the first few bytes of a file :rtype: bool """ return cls.MAGIC and file_start.startswith(cls.MAGIC) @abstractmethod def compress(self, src, dst): """ Abstract Method for compression method :param str src: source file path :param str dst: destination file path """ @abstractmethod def decompress(self, src, dst): """ Abstract method for decompression method :param str src: source file path :param str dst: destination file path """ class CommandCompressor(Compressor): """ Base class for compressors built on external commands """ def __init__(self, config, compression, path=None): super(CommandCompressor, self).__init__(config, compression, path) self._compress = None self._decompress = None def compress(self, src, dst): """ Compress using the specific command defined in the sublcass :param src: source file to compress :param dst: destination of the decompression """ return self._compress(src, dst) def decompress(self, src, dst): """ Decompress using the specific command defined in the sublcass :param src: source file to decompress :param dst: destination of the decompression """ return self._decompress(src, dst) def _build_command(self, pipe_command): """ Build the command string and create the actual Command object :param pipe_command: the command used to compress/decompress :rtype: Command """ command = "barman_command(){ " command += pipe_command command += ' > "$2" < "$1"' command += ";}; barman_command" return Command(command, shell=True, check=True, path=self.path) class InternalCompressor(Compressor): """ Base class for compressors built on python libraries """ def compress(self, src, dst): """ Compress using the object defined in the sublcass :param src: source file to compress :param dst: destination of the decompression """ try: with open(src, "rb") as istream: with closing(self._compressor(dst)) as ostream: shutil.copyfileobj(istream, ostream) except Exception as e: # you won't get more information from the compressors anyway raise CommandFailedException(dict(ret=None, err=force_str(e), out=None)) return 0 def decompress(self, src, dst): """ Decompress using the object defined in the sublcass :param src: source file to decompress :param dst: destination of the decompression """ try: with closing(self._decompressor(src)) as istream: with open(dst, "wb") as ostream: shutil.copyfileobj(istream, ostream) except Exception as e: # you won't get more information from the compressors anyway raise CommandFailedException(dict(ret=None, err=force_str(e), out=None)) return 0 @abstractmethod def _decompressor(self, src): """ Abstract decompressor factory method :param src: source file path :return: a file-like readable decompressor object """ @abstractmethod def _compressor(self, dst): """ Abstract compressor factory method :param dst: destination file path :return: a file-like writable compressor object """ class GZipCompressor(CommandCompressor): """ Predefined compressor with GZip """ MAGIC = b"\x1f\x8b\x08" def __init__(self, config, compression, path=None): super(GZipCompressor, self).__init__(config, compression, path) self._compress = self._build_command("gzip -c") self._decompress = self._build_command("gzip -c -d") class PyGZipCompressor(InternalCompressor): """ Predefined compressor that uses GZip Python libraries """ MAGIC = b"\x1f\x8b\x08" def __init__(self, config, compression, path=None): super(PyGZipCompressor, self).__init__(config, compression, path) # Default compression level used in system gzip utility self._level = -1 # Z_DEFAULT_COMPRESSION constant of zlib def _compressor(self, name): return gzip.GzipFile(name, mode="wb", compresslevel=self._level) def _decompressor(self, name): return gzip.GzipFile(name, mode="rb") class PigzCompressor(CommandCompressor): """ Predefined compressor with Pigz Note that pigz on-disk is the same as gzip, so the MAGIC value of this class is the same """ MAGIC = b"\x1f\x8b\x08" def __init__(self, config, compression, path=None): super(PigzCompressor, self).__init__(config, compression, path) self._compress = self._build_command("pigz -c") self._decompress = self._build_command("pigz -c -d") class BZip2Compressor(CommandCompressor): """ Predefined compressor with BZip2 """ MAGIC = b"\x42\x5a\x68" def __init__(self, config, compression, path=None): super(BZip2Compressor, self).__init__(config, compression, path) self._compress = self._build_command("bzip2 -c") self._decompress = self._build_command("bzip2 -c -d") class PyBZip2Compressor(InternalCompressor): """ Predefined compressor with BZip2 Python libraries """ MAGIC = b"\x42\x5a\x68" def __init__(self, config, compression, path=None): super(PyBZip2Compressor, self).__init__(config, compression, path) # Default compression level used in system gzip utility self._level = 9 def _compressor(self, name): return bz2.BZ2File(name, mode="wb", compresslevel=self._level) def _decompressor(self, name): return bz2.BZ2File(name, mode="rb") class CustomCompressor(CommandCompressor): """ Custom compressor """ def __init__(self, config, compression, path=None): if ( config.custom_compression_filter is None or type(config.custom_compression_filter) != str ): raise CompressionIncompatibility("custom_compression_filter") if ( config.custom_decompression_filter is None or type(config.custom_decompression_filter) != str ): raise CompressionIncompatibility("custom_decompression_filter") if type(config.custom_compression_magic) == str: self.MAGIC = binascii.unhexlify(config.custom_compression_magic[2:]) # increase the MAGIC_MAX_LENGTH sys.modules[__name__].MAGIC_MAX_LENGTH = max( MAGIC_MAX_LENGTH, len(self.MAGIC) ) super(CustomCompressor, self).__init__(config, compression, path) self._compress = self._build_command(config.custom_compression_filter) self._decompress = self._build_command(config.custom_decompression_filter) def validate(self, file_start): """ An instance version of the validate class method. This is needed because it is not possible to determine what the first bytes should be for a custom compressor until the compressor object is created. """ return self.MAGIC and file_start.startswith(self.MAGIC) # a dictionary mapping all supported compression schema # to the class implementing it # WARNING: items in this dictionary are extracted using alphabetical order # It's important that gzip and bzip2 are positioned before their variants compression_registry = { "gzip": GZipCompressor, "pigz": PigzCompressor, "bzip2": BZip2Compressor, "pygzip": PyGZipCompressor, "pybzip2": PyBZip2Compressor, "custom": CustomCompressor, } #: The longest string needed to identify a compression schema MAGIC_MAX_LENGTH = max(len(x.MAGIC or "") for x in compression_registry.values()) barman-2.18/barman/backup_manifest.py0000644000621200062120000001271114172556763016047 0ustar 00000000000000# -*- coding: utf-8 -*- # © Copyright EnterpriseDB UK Limited 2013-2022 # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging import os import json from barman import output class BackupManifest: name = "backup_manifest" def __init__(self, path, file_manager, checksum_algorithm): """ :param path: backup directory :type path: str :param file_manager: File manager :type file_manager: barman. """ self.files = [] self.path = path self.file_manager = file_manager self.checksum_algorithm = checksum_algorithm def create_backup_manifest(self): """ Will create a manifest file if it doesn't exists. :return: """ if self.file_manager.file_exist(self._get_manifest_file_path()): msg = ( "File %s already exists. Skip file creation." % self._get_manifest_file_path() ) logging.info(msg) output.info(msg) return self._create_files_metadata() str_manifest = self._get_manifest_str() # Create checksum from string without last '}' and ',' instead manifest_checksum = self.checksum_algorithm.checksum_from_str(str_manifest) last_line = '"Manifest-Checksum": "%s"}\n' % manifest_checksum full_manifest = str_manifest + last_line self.file_manager.save_content_to_file( self._get_manifest_file_path(), full_manifest.encode(), file_mode="wb" ) def _get_manifest_from_dict(self): """ Old version used to create manifest first section Could be used :return: str """ manifest = { "PostgreSQL-Backup-Manifest-Version": 1, "Files": self.files, } # Convert to text # sort_keys and separators are used for python compatibility str_manifest = json.dumps( manifest, indent=2, sort_keys=True, separators=(",", ": ") ) str_manifest = str_manifest[:-2] + ",\n" return str_manifest def _get_manifest_str(self): """ :return: """ manifest = '{"PostgreSQL-Backup-Manifest-Version": 1,\n"Files": [\n' for i in self.files: # sort_keys needed for python 2/3 compatibility manifest += json.dumps(i, sort_keys=True) + ",\n" manifest = manifest[:-2] + "],\n" return manifest def _create_files_metadata(self): """ Parse all files in backup directory and get file identity values for each one of them. """ file_list = self.file_manager.get_file_list(self.path) for filepath in file_list: # Create FileEntity identity = FileIdentity( filepath, self.path, self.file_manager, self.checksum_algorithm ) self.files.append(identity.get_value()) def _get_manifest_file_path(self): """ Generates backup-manifest file path :return: backup-manifest file path :rtype: str """ return os.path.join(self.path, self.name) class FileIdentity: """ This class purpose is to aggregate file information for backup-manifest. """ def __init__(self, file_path, dir_path, file_manager, checksum_algorithm): """ :param file_path: File path to analyse :type file_path: str :param dir_path: Backup directory path :type dir_path: str :param file_manager: :type file_manager: barman.storage.FileManager :param checksum_algorithm: Object that will create checksum from bytes :type checksum_algorithm: """ self.file_path = file_path self.dir_path = dir_path self.file_manager = file_manager self.checksum_algorithm = checksum_algorithm def get_value(self): """ Returns a dictionary containing FileIdentity values """ stats = self.file_manager.get_file_stats(self.file_path) return { "Size": stats.get_size(), "Last-Modified": stats.get_last_modified(), "Checksum-Algorithm": self.checksum_algorithm.get_name(), "Path": self._get_relative_path(), "Checksum": self._get_checksum(), } def _get_relative_path(self): """ :return: file path from directory path :rtype: string """ if not self.file_path.startswith(self.dir_path): msg = "Expecting %s to start with %s" % (self.file_path, self.dir_path) raise AttributeError(msg) return self.file_path.split(self.dir_path)[1].strip("/") def _get_checksum(self): """ :return: file checksum :rtype: str """ content = self.file_manager.get_file_content(self.file_path) return self.checksum_algorithm.checksum(content) barman-2.18/barman.egg-info/0000755000621200062120000000000014172556766014035 5ustar 00000000000000barman-2.18/barman.egg-info/SOURCES.txt0000644000621200062120000001737314172556766015734 0ustar 00000000000000AUTHORS LICENSE MANIFEST.in NEWS README.rst setup.cfg setup.py barman/__init__.py barman/annotations.py barman/backup.py barman/backup_executor.py barman/backup_manifest.py barman/cli.py barman/cloud.py barman/command_wrappers.py barman/compression.py barman/config.py barman/copy_controller.py barman/diagnose.py barman/exceptions.py barman/fs.py barman/hooks.py barman/infofile.py barman/lockfile.py barman/output.py barman/postgres.py barman/postgres_plumbing.py barman/process.py barman/recovery_executor.py barman/remote_status.py barman/retention_policies.py barman/server.py barman/utils.py barman/version.py barman/wal_archiver.py barman/xlog.py barman.egg-info/PKG-INFO barman.egg-info/SOURCES.txt barman.egg-info/dependency_links.txt barman.egg-info/entry_points.txt barman.egg-info/requires.txt barman.egg-info/top_level.txt barman/clients/__init__.py barman/clients/cloud_backup.py barman/clients/cloud_backup_delete.py barman/clients/cloud_backup_keep.py barman/clients/cloud_backup_list.py barman/clients/cloud_check_wal_archive.py barman/clients/cloud_cli.py barman/clients/cloud_compression.py barman/clients/cloud_restore.py barman/clients/cloud_walarchive.py barman/clients/cloud_walrestore.py barman/clients/walarchive.py barman/clients/walrestore.py barman/cloud_providers/__init__.py barman/cloud_providers/aws_s3.py barman/cloud_providers/azure_blob_storage.py barman/storage/__init__.py barman/storage/file_manager.py barman/storage/file_stats.py barman/storage/local_file_manager.py doc/.gitignore doc/Makefile doc/barman-cloud-backup-delete.1 doc/barman-cloud-backup-delete.1.md doc/barman-cloud-backup-keep.1 doc/barman-cloud-backup-keep.1.md doc/barman-cloud-backup-list.1 doc/barman-cloud-backup-list.1.md doc/barman-cloud-backup.1 doc/barman-cloud-backup.1.md doc/barman-cloud-check-wal-archive.1 doc/barman-cloud-check-wal-archive.1.md doc/barman-cloud-restore.1 doc/barman-cloud-restore.1.md doc/barman-cloud-wal-archive.1 doc/barman-cloud-wal-archive.1.md doc/barman-cloud-wal-restore.1 doc/barman-cloud-wal-restore.1.md doc/barman-wal-archive.1 doc/barman-wal-archive.1.md doc/barman-wal-restore.1 doc/barman-wal-restore.1.md doc/barman.1 doc/barman.5 doc/barman.conf doc/barman.1.d/00-header.md doc/barman.1.d/05-name.md doc/barman.1.d/10-synopsis.md doc/barman.1.d/15-description.md doc/barman.1.d/20-options.md doc/barman.1.d/45-commands.md doc/barman.1.d/50-archive-wal.md doc/barman.1.d/50-backup.md doc/barman.1.d/50-check-backup.md doc/barman.1.d/50-check-wal-archive.md doc/barman.1.d/50-check.md doc/barman.1.d/50-cron.md doc/barman.1.d/50-delete.md doc/barman.1.d/50-diagnose.md doc/barman.1.d/50-get-wal.md doc/barman.1.d/50-keep.md doc/barman.1.d/50-list-backups.md doc/barman.1.d/50-list-files.md doc/barman.1.d/50-list-servers.md doc/barman.1.d/50-put-wal.md doc/barman.1.d/50-rebuild-xlogdb.md doc/barman.1.d/50-receive-wal.md doc/barman.1.d/50-recover.md doc/barman.1.d/50-replication-status.md doc/barman.1.d/50-show-backup.md doc/barman.1.d/50-show-servers.md doc/barman.1.d/50-status.md doc/barman.1.d/50-switch-wal.md doc/barman.1.d/50-switch-xlog.md doc/barman.1.d/50-sync-backup.md doc/barman.1.d/50-sync-info.md doc/barman.1.d/50-sync-wals.md doc/barman.1.d/70-backup-id-shortcuts.md doc/barman.1.d/75-exit-status.md doc/barman.1.d/80-see-also.md doc/barman.1.d/85-bugs.md doc/barman.1.d/90-authors.md doc/barman.1.d/95-resources.md doc/barman.1.d/99-copying.md doc/barman.5.d/00-header.md doc/barman.5.d/05-name.md doc/barman.5.d/15-description.md doc/barman.5.d/20-configuration-file-locations.md doc/barman.5.d/25-configuration-file-syntax.md doc/barman.5.d/30-configuration-file-directory.md doc/barman.5.d/45-options.md doc/barman.5.d/50-active.md doc/barman.5.d/50-archiver.md doc/barman.5.d/50-archiver_batch_size.md doc/barman.5.d/50-backup_directory.md doc/barman.5.d/50-backup_method.md doc/barman.5.d/50-backup_options.md doc/barman.5.d/50-bandwidth_limit.md doc/barman.5.d/50-barman_home.md doc/barman.5.d/50-barman_lock_directory.md doc/barman.5.d/50-basebackup_retry_sleep.md doc/barman.5.d/50-basebackup_retry_times.md doc/barman.5.d/50-basebackups_directory.md doc/barman.5.d/50-check_timeout.md doc/barman.5.d/50-compression.md doc/barman.5.d/50-conninfo.md doc/barman.5.d/50-create_slot.md doc/barman.5.d/50-custom_compression_filter.md doc/barman.5.d/50-custom_compression_magic.md doc/barman.5.d/50-custom_decompression_filter.md doc/barman.5.d/50-description.md doc/barman.5.d/50-errors_directory.md doc/barman.5.d/50-forward-config-path.md doc/barman.5.d/50-immediate_checkpoint.md doc/barman.5.d/50-incoming_wals_directory.md doc/barman.5.d/50-last_backup_maximum_age.md doc/barman.5.d/50-last_backup_minimum_size.md doc/barman.5.d/50-last_wal_maximum_age.md doc/barman.5.d/50-log_file.md doc/barman.5.d/50-log_level.md doc/barman.5.d/50-max_incoming_wals_queue.md doc/barman.5.d/50-minimum_redundancy.md doc/barman.5.d/50-network_compression.md doc/barman.5.d/50-parallel_jobs.md doc/barman.5.d/50-path_prefix.md doc/barman.5.d/50-post_archive_retry_script.md doc/barman.5.d/50-post_archive_script.md doc/barman.5.d/50-post_backup_retry_script.md doc/barman.5.d/50-post_backup_script.md doc/barman.5.d/50-post_delete_retry_script.md doc/barman.5.d/50-post_delete_script.md doc/barman.5.d/50-post_recovery_retry_script.md doc/barman.5.d/50-post_recovery_script.md doc/barman.5.d/50-post_wal_delete_retry_script.md doc/barman.5.d/50-post_wal_delete_script.md doc/barman.5.d/50-pre_archive_retry_script.md doc/barman.5.d/50-pre_archive_script.md doc/barman.5.d/50-pre_backup_retry_script.md doc/barman.5.d/50-pre_backup_script.md doc/barman.5.d/50-pre_delete_retry_script.md doc/barman.5.d/50-pre_delete_script.md doc/barman.5.d/50-pre_recovery_retry_script.md doc/barman.5.d/50-pre_recovery_script.md doc/barman.5.d/50-pre_wal_delete_retry_script.md doc/barman.5.d/50-pre_wal_delete_script.md doc/barman.5.d/50-primary_ssh_command.md doc/barman.5.d/50-recovery_options.md doc/barman.5.d/50-retention_policy.md doc/barman.5.d/50-retention_policy_mode.md doc/barman.5.d/50-reuse_backup.md doc/barman.5.d/50-slot_name.md doc/barman.5.d/50-ssh_command.md doc/barman.5.d/50-streaming_archiver.md doc/barman.5.d/50-streaming_archiver_batch_size.md doc/barman.5.d/50-streaming_archiver_name.md doc/barman.5.d/50-streaming_backup_name.md doc/barman.5.d/50-streaming_conninfo.md doc/barman.5.d/50-streaming_wals_directory.md doc/barman.5.d/50-tablespace_bandwidth_limit.md doc/barman.5.d/50-wal_retention_policy.md doc/barman.5.d/50-wals_directory.md doc/barman.5.d/70-hook-scripts.md doc/barman.5.d/75-example.md doc/barman.5.d/80-see-also.md doc/barman.5.d/90-authors.md doc/barman.5.d/95-resources.md doc/barman.5.d/99-copying.md doc/barman.d/passive-server.conf-template doc/barman.d/ssh-server.conf-template doc/barman.d/streaming-server.conf-template doc/images/barman-architecture-georedundancy.png doc/images/barman-architecture-scenario1.png doc/images/barman-architecture-scenario1b.png doc/images/barman-architecture-scenario2.png doc/images/barman-architecture-scenario2b.png doc/manual/.gitignore doc/manual/00-head.en.md doc/manual/01-intro.en.md doc/manual/02-before_you_start.en.md doc/manual/10-design.en.md doc/manual/15-system_requirements.en.md doc/manual/16-installation.en.md doc/manual/17-configuration.en.md doc/manual/20-server_setup.en.md doc/manual/21-preliminary_steps.en.md doc/manual/22-config_file.en.md doc/manual/23-wal_streaming.en.md doc/manual/24-wal_archiving.en.md doc/manual/25-streaming_backup.en.md doc/manual/26-rsync_backup.en.md doc/manual/27-windows-support.en.md doc/manual/41-global-commands.en.md doc/manual/42-server-commands.en.md doc/manual/43-backup-commands.en.md doc/manual/50-feature-details.en.md doc/manual/55-barman-cli.en.md doc/manual/65-troubleshooting.en.md doc/manual/66-about.en.md doc/manual/70-feature-matrix.en.md doc/manual/99-references.en.md doc/manual/Makefile scripts/barman.bash_completionbarman-2.18/barman.egg-info/dependency_links.txt0000644000621200062120000000000114172556766020103 0ustar 00000000000000 barman-2.18/barman.egg-info/requires.txt0000644000621200062120000000020514172556766016432 0ustar 00000000000000psycopg2>=2.4.2 python-dateutil argcomplete [azure] azure-identity azure-storage-blob [cloud] boto3 [snappy] python-snappy>=0.6.0 barman-2.18/barman.egg-info/entry_points.txt0000644000621200062120000000123014172556766017327 0ustar 00000000000000[console_scripts] barman = barman.cli:main barman-cloud-backup = barman.clients.cloud_backup:main barman-cloud-backup-delete = barman.clients.cloud_backup_delete:main barman-cloud-backup-keep = barman.clients.cloud_backup_keep:main barman-cloud-backup-list = barman.clients.cloud_backup_list:main barman-cloud-check-wal-archive = barman.clients.cloud_check_wal_archive:main barman-cloud-restore = barman.clients.cloud_restore:main barman-cloud-wal-archive = barman.clients.cloud_walarchive:main barman-cloud-wal-restore = barman.clients.cloud_walrestore:main barman-wal-archive = barman.clients.walarchive:main barman-wal-restore = barman.clients.walrestore:main barman-2.18/barman.egg-info/PKG-INFO0000644000621200062120000000275014172556766015136 0ustar 00000000000000Metadata-Version: 2.1 Name: barman Version: 2.18 Summary: Backup and Recovery Manager for PostgreSQL Home-page: https://www.pgbarman.org/ Author: EnterpriseDB Author-email: barman@enterprisedb.com License: GPL-3.0 Description: Barman (Backup and Recovery Manager) is an open-source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments to reduce risk and help DBAs during the recovery phase. Barman is distributed under GNU GPL 3 and maintained by EnterpriseDB. Platform: Linux Platform: Mac OS X Classifier: Environment :: Console Classifier: Development Status :: 5 - Production/Stable Classifier: Topic :: System :: Archiving :: Backup Classifier: Topic :: Database Classifier: Topic :: System :: Recovery Tools Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+) Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Provides-Extra: azure Provides-Extra: snappy Provides-Extra: cloud barman-2.18/barman.egg-info/top_level.txt0000644000621200062120000000000714172556766016564 0ustar 00000000000000barman barman-2.18/README.rst0000644000621200062120000000433014172556763012567 0ustar 00000000000000Barman, Backup and Recovery Manager for PostgreSQL ================================================== This is the new (starting with version 2.13) home of Barman. It replaces the legacy sourceforge repository. Barman (Backup and Recovery Manager) is an open-source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments to reduce risk and help DBAs during the recovery phase. Barman is distributed under GNU GPL 3 and maintained by EnterpriseDB. For further information, look at the "Web resources" section below. Source content -------------- Here you can find a description of files and directory distributed with Barman: - AUTHORS : development team of Barman - NEWS : release notes - ChangeLog : log of changes - LICENSE : GNU GPL3 details - TODO : our wishlist for Barman - barman : sources in Python - doc : tutorial and man pages - scripts : auxiliary scripts - tests : unit tests Web resources ------------- - Website : http://www.pgbarman.org/ - Download : http://github.com/EnterpriseDB/barman - Documentation : http://www.pgbarman.org/documentation/ - Man page, section 1 : http://docs.pgbarman.org/barman.1.html - Man page, section 5 : http://docs.pgbarman.org/barman.5.html - Community support : http://www.pgbarman.org/support/ - Professional support : https://www.enterprisedb.com/ - pgespresso extension : https://github.com/2ndquadrant-it/pgespresso - pre barman 2.13 versions : https://sourceforge.net/projects/pgbarman/files/ Licence ------- © Copyright 2011-2022 EnterpriseDB UK Limited Barman is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Barman is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Barman. If not, see http://www.gnu.org/licenses/. barman-2.18/NEWS0000644000621200062120000012365314172556763011611 0ustar 00000000000000Barman News - History of user-visible changes Version 2.18 - 21 January 2022 - Add snappy compression algorithm support in barman cloud (requires the optional python-snappy dependency). - Allow Azure client concurrency parameters to be set when uploading WALs with barman-cloud-wal-archive. - Add `--tags` option in barman cloud so that backup files and archived WALs can be tagged in cloud storage (aws and azure). - Update the barman cloud exit status codes so that there is a dedicated code (2) for connectivity errors. - Add the commands `barman verify-backup` and `barman generate-manifest` to check if a backup is valid. - Add support for Azure Managed Identity auth in barman cloud which can be enabled with the `--credential` option. - Bug fixes: - Change `barman-cloud-check-wal-archive` behavior when bucket does not exist. - Ensure `list-files` output is always sorted regardless of the underlying filesystem. - Man pages for barman-cloud-backup-keep, barman-cloud-backup-delete and barman-cloud-check-wal-archive added to Python packaging. - We thank richyen and stratakis for their contributions to this release. Version 2.17 - 1 December 2021 - Bug fixes: - Resolves a performance regression introduced in version 2.14 which increased copy times for `barman backup` or `barman recover` commands when using the `--jobs` flag. - Ignore rsync partial transfer errors for `sender` processes so that such errors do not cause the backup to fail (thanks to barthisrael). Version 2.16 - 17 November 2021 - Add the commands `barman-check-wal-archive` and `barman-cloud-check-wal-archive` to validate if a proposed archive location is safe to use for a new PostgreSQL server. - Allow Barman to identify WAL that's already compressed using a custom compression scheme to avoid compressing it again. - Add `last_backup_minimum_size` and `last_wal_maximum_age` options to `barman check`. - Bug fixes: - Use argparse for command line parsing instead of the unmaintained argh module. - Make timezones consistent for `begin_time` and `end_time`. - We thank chtitux, George Hansper, stratakis, Thoro, and vrms for their contributions to this release. Version 2.15 - 12 October 2021 - Add plural forms for the `list-backup`, `list-server` and `show-server` commands which are now `list-backups`, `list-servers` and `show-servers`. The singular forms are retained for backward compatibility. - Add the `last-failed` backup shortcut which references the newest failed backup in the catalog so that you can do: - `barman delete last-failed` - Bug fixes: - Tablespaces will no longer be omitted from backups of EPAS versions 9.6 and 10 due to an issue detecting the correct version string on older versions of EPAS. Version 2.14 - 22 September 2021 - Add the `barman-cloud-backup-delete` command which allows backups in cloud storage to be deleted by specifying either a backup ID or a retention policy. - Allow backups to be retained beyond any retention policies in force by introducing the ability to tag existing backups as archival backups using `barman keep` and `barman-cloud-backup-keep`. - Allow the use of SAS authentication tokens created at the restricted blob container level (instead of the wider storage account level) for Azure blob storage - Significantly speed up `barman restore` into an empty directory for backups that contain hundreds of thousands of files. - Bug fixes: - The backup privileges check will no longer fail if the user lacks "userepl" permissions and will return better error messages if any required permissions are missing (#318 and #319). Version 2.13 - 26 July 2021 - Add Azure blob storage support to barman-cloud - Support tablespace remapping in barman-cloud-restore via `--tablespace name:location` - Allow barman-cloud-backup and barman-cloud-wal-archive to run as Barman hook scripts, to allow data to be relayed to cloud storage from the Barman server - Bug fixes: - Stop backups failing due to idle_in_transaction_session_timeout (https://github.com/EnterpriseDB/barman/issues/333) - Fix a race condition between backup and archive-wal in updating xlog.db entries (#328) - Handle PGDATA being a symlink in barman-cloud-backup, which led to "seeking backwards is not allowed" errors on restore (#351) - Recreate pg_wal on restore if the original was a symlink (#327) - Recreate pg_tblspc symlinks for tablespaces on restore (#343) - Make barman-cloud-backup-list skip backups it cannot read, e.g., because they are in Glacier storage (#332) - Add `-d database` option to barman-cloud-backup to specify which database to connect to initially (#307) - Fix "Backup failed uploading data" errors from barman-cloud-backup on Python 3.8 and above, caused by attempting to pickle the boto3 client (#361) - Correctly enable server-side encryption in S3 for buckets that do not have encryption enabled by default. In Barman 2.12, barman-cloud-backup's `--encryption` option did not correctly enable encryption for the contents of the backup if the backup was stored in an S3 bucket that did not have encryption enabled. If this is the case for you, please consider deleting your old backups and taking new backups with Barman 2.13. If your S3 buckets already have encryption enabled by default (which we recommend), this does not affect you. Version 2.12.1 - 30 June 2021 - Bug fixes: - Allow specifying target-tli with other target-* recovery options - Fix incorrect NAME in barman-cloud-backup-list manpage - Don't raise an error if SIGALRM is ignored - Fetch wal_keep_size, not wal_keep_segments, from Postgres 13 Version 2.12 - 5 Nov 2020 - Introduce a new backup_method option called local-rsync which targets those cases where Barman is installed on the same server where PostgreSQL is and directly uses rsync to take base backups, bypassing the SSH layer. - Bug fixes: - Avoid corrupting boto connection in worker processes - Avoid connection attempts to PostgreSQL during tests Version 2.11 - 9 Jul 2020 - Introduction of the barman-cli-cloud package that contains all cloud related utilities. - Add barman-cloud-wal-restore to restore a WAL file previously archived with barman-cloud-wal-archive from an object store - Add barman-cloud-restore to restore a backup previously taken with barman-cloud-backup from an object store - Add barman-cloud-backup-list to list backups taken with barman-cloud-backup in an object store - Add support for arbitrary archive size for barman-cloud-backup - Add support for --endpoint-url option to cloud utilities - Remove strict superuser requirement for PG 10+ (by Kaarel Moppel) - Add --log-level runtime option for barman to override default log level for a specific command - Support for PostgreSQL 13 - Bug fixes: - Suppress messages and warning with SSH connections in barman-cli (GH-257) - Fix a race condition when retrieving uploaded parts in barman-cloud-backup (GH-259) - Close the PostgreSQL connection after a backup (GH-258) - Check for uninitialized replication slots in receive-wal --reset (GH-260) - Ensure that begin_wal is valorised before acting on it (GH-262) - Fix bug in XLOG/WAL arithmetic with custom segment size (GH-287) - Fix rsync compatibility error with recent rsync - Fix PostgreSQLClient version parsing - Fix PostgreSQL exception handling with non ASCII messages - Ensure each postgres connection has an empty search_path - Avoid connecting to PostgreSQL while reading a backup.info file If you are using already barman-cloud-wal-archive or barman-cloud-backup installed via RPM/Apt package and you are upgrading your system, you must install the barman-cli-cloud package. All cloud related tools are now part of the barman-cli-cloud package, including barman-cloud-wal-archive and barman-cloud-backup that were previosly shipped with barman-cli. The reason is complex dependency management of the boto3 library, which is a requirement for the cloud utilities. Version 2.10 - 5 Dec 2019 - Pull .partial WAL files with get-wal and barman-wal-restore, allowing restore_command in a recovery scenario to fetch a partial WAL file's content from the Barman server. This feature simplifies and enhances RPO=0 recovery operations. - Store the PostgreSQL system identifier in the server directory and inside the backup information file. Improve check command to verify the consistency of the system identifier with active connections (standard and replication) and data on disk. - A new script called barman-cloud-wal-archive has been added to the barman-cli package to directly ship WAL files from PostgreSQL (using archive_command) to cloud object storage services that are compatible with AWS S3. It supports encryption and compression. - A new script called barman-cloud-backup has been added to the barman-cli package to directly ship base backups from a local PostgreSQL server to cloud object storage services that are compatible with AWS S3. It supports encryption, parallel upload, compression. - Automated creation of replication slots through the server/global option create_slot. When set to auto, Barman creates the replication slot, in case streaming_archiver is enabled and slot_name is defined. The default value is manual for back-compatibility. - Add '-w/--wait' option to backup command, making Barman wait for all required WAL files to be archived before considering the backup completed. Add also the --wait-timeout option (default 0, no timeout). - Redact passwords from Barman output, in particular from barman diagnose (InfoSec) - Improve robustness of receive-wal --reset command, by verifying that the last partial file is aligned with the current location or, if present, with replication slot's. - Documentation improvements - Bug fixes: - Wrong string matching operation when excluding tablespaces inside PGDATA (GH-245) - Minor fixes in WAL delete hook scripts (GH-240) - Fix PostgreSQL connection aliveness check (GH-239) Version 2.9 - 1 Aug 2019 - Transparently support PostgreSQL 12, by supporting the new way of managing recovery and standby settings through GUC options and signal files (recovery.signal and standby.signal) - Add --bwlimit command line option to set bandwidth limitation for backup and recover commands - Ignore WAL archive failure for check command in case the latest backup is WAITING_FOR_WALS - Add --target-lsn option to set recovery target Log Sequence Number for recover command with PostgreSQL 10 or higher - Add --spool-dir option to barman-wal-restore so that users can change the spool directory location from the default, avoiding conflicts in case of multiple PostgreSQL instances on the same server (thanks to Drazen Kacar). - Rename barman_xlog directory to barman_wal - JSON output writer to export command output as JSON objects and facilitate integration with external tools and systems (thanks to Marcin Onufry Hlybin). Experimental in this release. Bug fixes: - replication-status doesn’t show streamers with no slot (GH-222) - When checking that a connection is alive (“SELECT 1” query), preserve the status of the PostgreSQL connection (GH-149). This fixes those cases of connections that were terminated due to idle-in-transaction timeout, causing concurrent backups to fail. Version 2.8 - 17 May 2019 - Add support for reuse_backup in geo-redundancy for incremental backup copy in passive nodes - Improve performance of rsync based copy by using strptime instead of the more generic dateutil.parser (#210) - Add ‘--test’ option to barman-wal-archive and barman-wal-restore to verify the connection with the Barman server - Complain if backup_options is not explicitly set, as the future default value will change from exclusive_backup to concurrent_backup when PostgreSQL 9.5 will be declared EOL by the PGDG - Display additional settings in the show-server and diagnose commands: archive_timeout, data_checksums, hot_standby, max_wal_senders, max_replication_slots and wal_compression. - Merge the barman-cli project in Barman - Bug fixes: - Fix encoding error in get-wal on Python 3 (Jeff Janes, #221) - Fix exclude_and_protect_filter (Jeff Janes, #217) - Remove spurious message when resetting WAL (Jeff Janes, #215) - Fix sync-wals error if primary has WALs older than the first backup - Support for double quotes in synchronous_standby_names setting - Minor changes: - Improve messaging of check --nagios for inactive servers - Log remote SSH command with recover command - Hide logical decoding connections in replication-status command This release officially supports Python 3 and deprecates Python 2 (which might be discontinued in future releases). PostgreSQL 9.3 and older is deprecated from this release of Barman. Support for backup from standby is now limited to PostgreSQL 9.4 or higher and to WAL shipping from the standby (please refer to the documentation for details). Version 2.7 - 21 Mar 2019 - Fix error handling during the parallel backup. Previously an unrecoverable error during the copy could have corrupted the barman internal state, requiring a manual kill of barman process with SIGTERM and a manual cleanup of the running backup in PostgreSQL. (GH#199) - Fix support of UTF-8 characters in input and output (GH#194 and GH#196) - Ignore history/backup/partial files for first sync of geo-redundancy (GH#198) - Fix network failure with geo-redundancy causing cron to break (GH#202) - Fix backup validation in PostgreSQL older than 9.2 - Various documentation fixes Version 2.6 - 4 Feb 2019 - Add support for Geographical redundancy, introducing 3 new commands: sync-info, sync-backup and sync-wals. Geo-redundancy allows a Barman server to use another Barman server as data source instead of a PostgreSQL server. - Add put-wal command that allows Barman to safely receive WAL files via PostgreSQL's archive_command using the barman-wal-archive script included in barman-cli - Add ANSI colour support to check command - Minor fixes: - Fix switch-wal on standby with an empty WAL directory - Honour archiver locking in wait_for_wal method - Fix WAL compression detection algorithm - Fix current_action in concurrent stop backup errors - Do not treat lock file busy as an error when validating a backup Version 2.5 - 23 Oct 2018 - Add support for PostgreSQL 11 - Add check-backup command to verify that WAL files required for consistency of a base backup are present in the archive. Barman now adds a new state (WAITING_FOR_WALS) after completing a base backup, and sets it to DONE once it has verified that all WAL files from start to the end of the backup exist. This command is included in the regular cron maintenance job. Barman now notifies users attempting to recover a backup that is in WAITING_FOR_WALS state. - Allow switch-xlog --archive to work on a standby (just for the archive part) - Bug fixes: - Fix decoding errors reading external commands output (issue #174) - Fix documentation regarding WAL streaming and backup from standby Version 2.4 - 25 May 2018 - Add standard and retry hook scripts for backup deletion (pre/post) - Add standard and retry hook scripts for recovery (pre/post) - Add standard and retry hook scripts for WAL deletion (pre/post) - Add --standby-mode option to barman recover to add standby_mode = on in pre-generated recovery.conf - Add --target-action option to barman recover, allowing users to add shutdown, pause or promote to the pre-generated recovery.conf file - Improve usability of point-in-time recovery with consistency checks (e.g. recovery time is after end time of backup) - Minor documentation improvements - Drop support for Python 3.3 Relevant bug fixes: - Fix remote get_file_content method (Github #151), preventing incremental recovery from happening - Unicode issues with command (Github #143 and #150) - Add --wal-method=none when pg_basebackup >= 10 (Github #133) Minor bug fixes: - Stop process manager module from ovewriting lock files content - Relax the rules for rsync output parsing - Ignore vanished files in streaming directory - Case insensitive slot names (Github #170) - Make DataTransferFailure.from_command_error() more resilient (Github #86) - Rename command() to barman_command() (Github #118) - Initialise synchronous standby names list if not set (Github #111) - Correct placeholders ordering (GitHub #138) - Force datestyle to iso for replication connections - Returns error if delete command does not remove the backup - Fix exception when calling is_power_of_two(None) - Downgraded sync standby names messages to debug (Github #89) Version 2.3 - 5 Sep 2017 - Add support to PostgreSQL 10 - Follow naming changes in PostgreSQL 10: - The switch-xlog command has been renamed to switch-wal. - In commands output, the xlog word has been changed to WAL and location has been changed to LSN when appropriate. - Add the --network-compression/--no-network-compression options to barman recover to enable or disable network compression at run-time - Add --target-immediate option to recover command, in order to exit recovery when a consistent state is reached (end of the backup, available from PostgreSQL 9.4) - Show cluster state (master or standby) with barman status command - Documentation improvements - Bug fixes: - Fix high memory usage with parallel_jobs > 1 (#116) - Better handling of errors using parallel copy (#114) - Make barman diagnose more robust with system exceptions - Let archive-wal ignore files with .tmp extension Version 2.2 - 17 Jul 2017 - Implement parallel copy for backup/recovery through the parallel_jobs global/server option to be overridden by the --jobs or -j runtime option for the backup and recover command. Parallel backup is available only for the rsync copy method. By default, it is set to 1 (for behaviour compatibility with previous versions). - Support custom WAL size for PostgreSQL 8.4 and newer. At backup time, Barman retrieves from PostgreSQL wal_segment_size and wal_block_size values and computes the necessary calculations. - Improve check command to ensure that incoming directory is empty when archiver=off, and streaming directory is empty when streaming_archiver=off (#80). - Add external_configuration to backup_options so that users can instruct Barman to ignore backup of configuration files when they are not inside PGDATA (default for Debian/Ubuntu installations). In this case, Barman does not display a warning anymore. - Add --get-wal and --no-get-wal options to barman recover - Add max_incoming_wals_queue global/server option for the check command so that a non blocking error is returned in case incoming WAL directories for both archiver and the streaming_archiver contain more files than the specified value. - Documentation improvements - File format changes: - The format of backup.info file has changed. For this reason a backup taken with Barman 2.2 cannot be read by a previous version of Barman. But, backups taken by previous versions can be read by Barman 2.2. - Minor bug fixes: - Allow replication-status to work against a standby - Close any PostgreSQL connection before starting pg_basebackup (#104, #108) - Safely handle paths containing special characters - Archive .partial files after promotion of streaming source - Recursively create directories during recovery (SF#44) - Improve xlog.db locking (#99) - Remove tablespace_map file during recover (#95) - Reconnect to PostgreSQL if connection drops (SF#82) Version 2.1 - 5 Jan 2017 - Add --archive and --archive-timeout options to switch-xlog command - Preliminary support for PostgreSQL 10 (#73) - Minor additions: - Add last archived WAL info to diagnose output - Add start time and execution time to the output of delete command - Minor bug fixes: - Return failure for get-wal command on inactive server - Make streaming_archiver_names and streaming_backup_name options global (#57) - Fix rsync failures due to files truncated during transfer (#64) - Correctly handle compressed history files (#66) - Avoid de-referencing symlinks in pg_tblspc when preparing recovery (#55) - Fix comparison of last archiving failure (#40, #58) - Avoid failing recovery if postgresql.conf is not writable (#68) - Fix output of replication-status command (#56) - Exclude files from backups like pg_basebackup (#65, #72) - Exclude directories from other Postgres versions while copying tablespaces (#74) - Make retry hook script options global Version 2.0 - 27 Sep 2016 - Support for pg_basebackup and base backups over the PostgreSQL streaming replication protocol with backup_method=postgres (PostgreSQL 9.1 or higher required) - Support for physical replication slots through the slot_name configuration option as well as the --create-slot and --drop-slot options for the receive-wal command (PostgreSQL 9.4 or higher required). When slot_name is specified and streaming_archiver is enabled, receive-wal transparently integrates with pg_receivexlog, and check makes sure that slots exist and are actively used - Support for the new backup API introduced in PostgreSQL 9.6, which transparently enables concurrent backups and backups from standby servers using the standard rsync method of backup. Concurrent backup was only possible for PostgreSQL 9.2 to 9.5 versions through the pgespresso extension. The new backup API will make pgespresso redundant in the future - If properly configured, Barman can function as a synchronous standby in terms of WAL streaming. By properly setting the streaming_archiver_name in the synchronous_standby_names priority list on the master, and enabling replication slot support, the receive-wal command can now be part of a PostgreSQL synchronous replication cluster, bringing RPO=0 (PostgreSQL 9.5.5 or higher required) - Introduce barman-wal-restore, a standard and robust script written in Python that can be used as restore_command in recovery.conf files of any standby server of a cluster. It supports remote parallel fetching of WAL files by efficiently invoking get-wal through SSH. Currently available as a separate project called barman-cli. The barman-cli package is required for remote recovery when get-wal is listed in recovery_options - Control the maximum execution time of the check command through the check_timeout global/server configuration option (30 seconds by default) - Limit the number of WAL segments that are processed by an archive-wal run, through the archiver_batch_size and streaming_archiver_batch_size global/server options which control archiving of WAL segments coming from, respectively, the standard archiver and receive-wal - Removed locking of the XLOG database during check operations - The show-backup command is now aware of timelines and properly displays which timelines can be used as recovery targets for a given base backup. Internally, Barman is now capable of parsing .history files - Improved the logic behind the retry mechanism when copy operations experience problems. This involves backup (rsync and postgres) as well as remote recovery (rsync) - Code refactoring involving remote command and physical copy interfaces - Bug fixes: - Correctly handle .history files from streaming - Fix replication-status on PostgreSQL 9.1 - Fix replication-status when sent and write locations are not available - Fix misleading message on pg_receivexlog termination Version 1.6.1 - 23 May 2016 - Add --peek option to get-wal command to discover existing WAL files from the Barman's archive - Add replication-status command for monitoring the status of any streaming replication clients connected to the PostgreSQL server. The --target option allows users to limit the request to only hot standby servers or WAL streaming clients - Add the switch-xlog command to request a switch of a WAL file to the PostgreSQL server. Through the '--force' it issues a CHECKPOINT beforehand - Add streaming_archiver_name option, which sets a proper application_name to pg_receivexlog when streaming_archiver is enabled (only for PostgreSQL 9.3 and above) - Check for _superuser_ privileges with PostgreSQL's standard connections (#30) - Check the WAL archive is never empty - Check for 'backup_label' on the master when server is down - Improve barman-wal-restore contrib script - Bug fixes: - Treat the "failed backups" check as non-fatal - Rename '-x' option for get-wal as '-z' - Add archive_mode=always support for PostgreSQL 9.5 (#32) - Properly close PostgreSQL connections when necessary - Fix receive-wal for pg_receive_xlog version 9.2 Version 1.6.0 - 29 Feb 2016 - Support for streaming replication connection through the streaming_conninfo server option - Support for the streaming_archiver option that allows Barman to receive WAL files through PostgreSQL's native streaming protocol. When set to 'on', it relies on pg_receivexlog to receive WAL data, reducing Recovery Point Objective. Currently, WAL streaming is an additional feature (standard log archiving is still required) - Implement the receive-wal command that, when streaming_archiver is on, wraps pg_receivexlog for WAL streaming. Add --stop option to stop receiving WAL files via streaming protocol. Add --reset option to reset the streaming status and restart from the current xlog in Postgres. - Automatic management (startup and stop) of receive-wal command via cron command - Support for the path_prefix configuration option - Introduction of the archiver option (currently fixed to on) which enables continuous WAL archiving for a specific server, through log shipping via PostgreSQL's archive_command - Support for streaming_wals_directory and errors_directory options - Management of WAL duplicates in archive-wal command and integration with check command - Verify if pg_receivexlog is running in check command when streaming_archiver is enabled - Verify if failed backups are present in check command - Accept compressed WAL files in incoming directory - Add support for the pigz compressor (thanks to Stefano Zacchiroli zack@upsilon.cc) - Implement pygzip and pybzip2 compressors (based on an initial idea of Christoph Moench-Tegeder christoph@2ndquadrant.de) - Creation of an implicit restore point at the end of a backup - Current size of the PostgreSQL data files in barman status - Permit archive_mode=always for PostgreSQL 9.5 servers (thanks to Christoph Moench-Tegeder christoph@2ndquadrant.de) - Complete refactoring of the code responsible for connecting to PostgreSQL - Improve messaging of cron command regarding sub-processes - Native support for Python >= 3.3 - Changes of behaviour: - Stop trashing WAL files during archive-wal (commit:e3a1d16) - Bug fixes: - Atomic WAL file archiving (#9 and #12) - Propagate "-c" option to any Barman subprocess (#19) - Fix management of backup ID during backup deletion (#22) - Improve archive-wal robustness and log messages (#24) - Improve error handling in case of missing parameters Version 1.5.1 - 16 Nov 2015 - Add support for the 'archive-wal' command which performs WAL maintenance operations on a given server - Add support for "per-server" concurrency of the 'cron' command - Improved management of xlog.db errors - Add support for mixed compression types in WAL files (SF.net#61) - Bug fixes: - Avoid retention policy checks during the recovery - Avoid 'wal_level' check on PostgreSQL version < 9.0 (#3) - Fix backup size calculation (#5) Version 1.5.0 - 28 Sep 2015 - Add support for the get-wal command which allows users to fetch any WAL file from the archive of a specific server - Add support for retry hook scripts, a special kind of hook scripts that Barman tries to run until they succeed - Add active configuration option for a server to temporarily disable the server by setting it to False - Add barman_lock_directory global option to change the location of lock files (by default: 'barman_home') - Execute the full suite of checks before starting a backup, and skip it in case one or more checks fail - Forbid to delete a running backup - Analyse include directives of a PostgreSQL server during backup and recover operations - Add check for conflicting paths in the configuration of Barman, both intra (by temporarily disabling a server) and inter-server (by refusing any command, to any server). - Add check for wal_level - Add barman-wal-restore script to be used as restore_command on a standby server, in conjunction with barman get-wal - Implement a standard and consistent policy for error management - Improved cache management of backups - Improved management of configuration in unit tests - Tutorial and man page sources have been converted to Markdown format - Add code documentation through Sphinx - Complete refactor of the code responsible for managing the backup and the recover commands - Changed internal directory structure of a backup - Introduce copy_method option (currently fixed to rsync) - Bug fixes: - Manage options without '=' in PostgreSQL configuration files - Preserve Timeline history files (Fixes: #70) - Workaround for rsync on SUSE Linux (Closes: #13 and #26) - Disables dangerous settings in postgresql.auto.conf (Closes: #68) Version 1.4.1 - 05 May 2015 * Fix for WAL archival stop working if first backup is EMPTY (Closes: #64) * Fix exception during error handling in Barman recovery (Closes: #65) * After a backup, limit cron activity to WAL archiving only (Closes: #62) * Improved robustness and error reporting of the backup delete command (Closes: #63) * Fix computation of WAL production ratio as reported in the show-backup command * Improved management of xlogdb file, which is now correctly fsynced when updated. Also, the rebuild-xlogdb command now operates on a temporary new file, which overwrites the main one when finished. * Add unit tests for dateutil module compatibility * Modified Barman version following PEP 440 rules and added support of tests in Python 3.4 Version 1.4.0 - 26 Jan 2015 * Incremental base backup implementation through the reuse_backup global/server option. Possible values are off (disabled, default), copy (preventing unmodified files from being transferred) and link (allowing for deduplication through hard links). * Store and show deduplication effects when using reuse_backup= link. * Added transparent support of pg_stat_archiver (PostgreSQL 9.4) in check, show-server and status commands. * Improved administration by invoking WAL maintenance at the end of a successful backup. * Changed the way unused WAL files are trashed, by differentiating between concurrent and exclusive backup cases. * Improved performance of WAL statistics calculation. * Treat a missing pg_ident.conf as a WARNING rather than an error. * Refactored output layer by removing remaining yield calls. * Check that rsync is in the system path. * Include history files in WAL management. * Improved robustness through more unit tests. * Fixed bug #55: Ignore fsync EINVAL errors on directories. * Fixed bug #58: retention policies delete. Version 1.3.3 - 21 Aug 2014 * Added "last_backup_max_age", a new global/server option that allows administrators to set the max age of the last backup in a catalogue, making it easier to detect any issues with periodical backup execution * Improved robustness of "barman backup" by introducing two global/ server options: "basebackup_retry_times" and "basebackup_retry_sleep". These options allow an administrator to specify, respectively, the number of attempts for a copy operation after a failure, and the number of seconds of wait before retrying * Improved the recovery process via rsync on an existing directory (incremental recovery), by splitting the previous rsync call into several ones - invoking checksum control only when necessary * Added support for PostgreSQL 8.3 * Minor changes: + Support for comma separated list values configuration options + Improved backup durability by calling fsync() on backup and WAL files during "barman backup" and "barman cron" + Improved Nagios output for "barman check --nagios" + Display compression ratio for WALs in "barman show-backup" + Correctly handled keyboard interruption (CTRL-C) while performing barman backup + Improved error messages of failures regarding the stop of a backup + Wider coverage of unit tests * Bug fixes: + Copies "recovery.conf" on the remote server during "barman recover" (#45) + Correctly detect pre/post archive hook scripts (#41) Version 1.3.2 - 15 Apr 2014 * Fixed incompatibility with PostgreSQL 8.4 (Closes #40, bug introduced in version 1.3.1) Version 1.3.1 - 14 Apr 2014 * Added support for concurrent backup of PostgreSQL 9.2 and 9.3 servers that use the "pgespresso" extension. This feature is controlled by the "backup_options" configuration option (global/ server) and activated when set to "concurrent_backup". Concurrent backup allows DBAs to perform full backup operations from a streaming replicated standby. * Added the "barman diagnose" command which prints important information about the Barman system (extremely useful for support and problem solving) * Improved error messages and exception handling interface * Fixed bug in recovery of tablespaces that are created inside the PGDATA directory (bug introduced in version 1.3.0) * Fixed minor bug of unhandled -q option, for quiet mode of commands to be used in cron jobs (bug introduced in version 1.3.0) * Minor bug fixes and code refactoring Version 1.3.0 - 3 Feb 2014 * Refactored BackupInfo class for backup metadata to use the new FieldListFile class (infofile module) * Refactored output layer to use a dedicated module, in order to facilitate integration with Nagios (NagiosOutputWriter class) * Refactored subprocess handling in order to isolate stdin/stderr/ stdout channels (command_wrappers module) * Refactored hook scripts management * Extracted logging configuration and userid enforcement from the configuration class. * Support for hook scripts to be executed before and after a WAL file is archived, through the 'pre_archive_script' and 'post_archive_script' configuration options. * Implemented immediate checkpoint capability with --immediate-checkpoint command option and 'immediate_checkpoint' configuration option * Implemented network compression for remote backup and recovery through the 'network_compression' configuration option (#19) * Implemented the 'rebuild-xlogdb' command (Closes #27 and #28) * Added deduplication of tablespaces located inside the PGDATA directory * Refactored remote recovery code to work the same way local recovery does, by performing remote directory preparation (assuming the remote user has the right permissions on the remote server) * 'barman backup' now tries and create server directories before attempting to execute a full backup (#14) * Fixed bug #22: improved documentation for tablespaces relocation * Fixed bug #31: 'barman cron' checks directory permissions for lock file * Fixed bug #32: xlog.db read access during cron activities Version 1.2.3 - 5 September 2013 * Added support for PostgreSQL 9.3 * Added support for the "--target-name" recovery option, which allows to restore to a named point previously specified with pg_create_restore_point (only for PostgreSQL 9.1 and above users) * Fixed bug #27 about flock() usage with barman.lockfile (many thanks to Damon Snyder ) * Introduced Python 3 compatibility Version 1.2.2 - 24 June 2013 * Fix python 2.6 compatibility Version 1.2.1 - 17 June 2013 * Added the "bandwidth_limit" global/server option which allows to limit the I/O bandwidth (in KBPS) for backup and recovery operations * Added the "tablespace_bandwidth_limit" global/server option which allows to limit the I/O bandwidth (in KBPS) for backup and recovery operations on a per tablespace basis * Added /etc/barman/barman.conf as default location * Bug fix: avoid triggering the minimum_redundancy check on FAILED backups (thanks to Jérôme Vanandruel) Version 1.2.0 - 31 Jan 2013 * Added the "retention_policy_mode" global/server option which defines the method for enforcing retention policies (currently only "auto") * Added the "minimum_redundancy" global/server option which defines the minimum number of backups to be kept for a server * Added the "retention_policy" global/server option which defines retention policies management based on redundancy (e.g. REDUNDANCY 4) or recovery window (e.g. RECOVERY WINDOW OF 3 MONTHS) * Added retention policy support to the logging infrastructure, the "check" and the "status" commands * The "check" command now integrates minimum redundancy control * Added retention policy states (valid, obsolete and potentially obsolete) to "show-backup" and "list-backup" commands * The 'all' keyword is now forbidden as server name * Added basic support for Nagios plugin output to the 'check' command through the --nagios option * Barman now requires argh => 0.21.2 and argcomplete- * Minor bug fixes Version 1.1.2 - 29 Nov 2012 * Added "configuration_files_directory" option that allows to include multiple server configuration files from a directory * Support for special backup IDs: latest, last, oldest, first * Management of multiple servers to the 'list-backup' command. 'barman list-backup all' now list backups for all the configured servers. * Added "application_name" management for PostgreSQL >= 9.0 * Fixed bug #18: ignore missing WAL files if not found during delete Version 1.1.1 - 16 Oct 2012 * Fix regressions in recover command. Version 1.1.0 - 12 Oct 2012 * Support for hook scripts to be executed before and after a 'backup' command through the 'pre_backup_script' and 'post_backup_script' configuration options. * Management of multiple servers to the 'backup' command. 'barman backup all' now iteratively backs up all the configured servers. * Fixed bug #9: "9.2 issue with pg_tablespace_location()" * Add warning in recovery when file location options have been defined in the postgresql.conf file (issue #10) * Fail fast on recover command if the destination directory contains the ':' character (Closes: #4) or if an invalid tablespace relocation rule is passed * Report an informative message when pg_start_backup() invocation fails because an exclusive backup is already running (Closes: #8) Version 1.0.0 - 6 July 2012 * Backup of multiple PostgreSQL servers, with different versions. Versions from PostgreSQL 8.4+ are supported. * Support for secure remote backup (through SSH) * Management of a catalog of backups for every server, allowing users to easily create new backups, delete old ones or restore them * Compression of WAL files that can be configured on a per server basis using compression/decompression filters, both predefined (gzip and bzip2) or custom * Support for INI configuration file with global and per-server directives. Default location for configuration files are /etc/barman.conf or ~/.barman.conf. The '-c' option allows users to specify a different one * Simple indexing of base backups and WAL segments that does not require a local database * Maintenance mode (invoked through the 'cron' command) which performs ordinary operations such as WAL archival and compression, catalog updates, etc. * Added the 'backup' command which takes a full physical base backup of the given PostgreSQL server configured in Barman * Added the 'recover' command which performs local recovery of a given backup, allowing DBAs to specify a point in time. The 'recover' command supports relocation of both the PGDATA directory and, where applicable, the tablespaces * Added the '--remote-ssh-command' option to the 'recover' command for remote recovery of a backup. Remote recovery does not currently support relocation of tablespaces * Added the 'list-server' command that lists all the active servers that have been configured in barman * Added the 'show-server' command that shows the relevant information for a given server, including all configuration options * Added the 'status' command which shows information about the current state of a server, including Postgres version, current transaction ID, archive command, etc. * Added the 'check' command which returns 0 if everything Barman needs is functioning correctly * Added the 'list-backup' command that lists all the available backups for a given server, including size of the base backup and total size of the related WAL segments * Added the 'show-backup' command that shows the relevant information for a given backup, including time of start, size, number of related WAL segments and their size, etc. * Added the 'delete' command which removes a backup from the catalog * Added the 'list-files' command which lists all the files for a single backup * RPM Package for RHEL 5/6 barman-2.18/AUTHORS0000644000621200062120000000235314172556763012153 0ustar 00000000000000Barman maintainers (in alphabetical order): * Abhijit Menon-Sen * Didier Michel * Jane Threefoot * Michael Wallace Past contributors (in alphabetical order): * Anna Bellandi (QA/testing) * Britt Cole (documentation reviewer) * Carlo Ascani (developer) * Francesco Canovai (QA/testing) * Gabriele Bartolini (architect) * Gianni Ciolli (QA/testing) * Giulio Calacoci (developer) * Giuseppe Broccolo (developer) * Jonathan Battiato (QA/testing) * Leonardo Cecchi (developer) * Marco Nenciarini (project leader) * Niccolò Fei (QA/testing) * Rubens Souza (QA/testing) * Stefano Bianucci (developer) Many thanks go to our sponsors (in alphabetical order): * 4Caast - http://4caast.morfeo-project.org/ (Founding sponsor) * Adyen - http://www.adyen.com/ * Agile Business Group - http://www.agilebg.com/ * BIJ12 - http://www.bij12.nl/ * CSI Piemonte - http://www.csipiemonte.it/ (Founding sponsor) * Ecometer - http://www.ecometer.it/ * GestionaleAuto - http://www.gestionaleauto.com/ (Founding sponsor) * Jobrapido - http://www.jobrapido.com/ * Navionics - http://www.navionics.com/ (Founding sponsor) * Sovon Vogelonderzoek Nederland - https://www.sovon.nl/ * Subito.it - http://www.subito.it/ * XCon Internet Services - http://www.xcon.it/ (Founding sponsor) barman-2.18/doc/0000755000621200062120000000000014172556766011650 5ustar 00000000000000barman-2.18/doc/.gitignore0000644000621200062120000000005714172556763013637 0ustar 00000000000000barman-tutorial.en.pdf barman-tutorial.en.html barman-2.18/doc/barman-cloud-wal-restore.1.md0000644000621200062120000000676114172556763017146 0ustar 00000000000000% BARMAN-CLOUD-WAL-RESTORE(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-cloud-wal-restore - Restore PostgreSQL WAL files from the Cloud using `restore_command` # SYNOPSIS barman-cloud-wal-restore [*OPTIONS*] *SOURCE_URL* *SERVER_NAME* *WAL_NAME* *WAL_PATH* # DESCRIPTION This script can be used as a `restore_command` to download WAL files previously archived with `barman-cloud-wal-archive` command. Currently AWS S3 and Azure Blob Storage are supported. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. # POSITIONAL ARGUMENTS SOURCE_URL : URL of the cloud source, such as a bucket in AWS S3. For example: `s3://BUCKET_NAME/path/to/folder` (where `BUCKET_NAME` is the bucket you have created in AWS). SERVER_NAME : the name of the server as configured in Barman. WAL_NAME : the name of the WAL file, equivalent of '%f' keyword (according to 'restore_command'). WAL_PATH : the value of the '%p' keyword (according to 'restore_command'). # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -v, --verbose : increase output verbosity (e.g., -vv is more than -v) -q, --quiet : decrease output verbosity (e.g., -qq is less than -q) -t, --test : test connectivity to the cloud destination and exit --cloud-provider {aws-s3,azure-blob-storage} : the cloud provider to which the backup should be uploaded -P, --profile : profile name (e.g. INI section in AWS credentials file) --endpoint-url : override the default S3 URL construction mechanism by specifying an endpoint. --credential {azure-cli,managed-identity} : optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. # REFERENCES For Boto: * https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html For AWS: * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html. For Azure Blob Storage: * https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-data-operations-cli#set-environment-variables-for-authorization-parameters * https://docs.microsoft.com/en-us/python/api/azure-storage-blob/?view=azure-python # DEPENDENCIES If using `--cloud-provider=aws-s3`: * boto3 If using `--cloud-provider=azure-blob-storage`: * azure-storage-blob * azure-identity (optional, if you wish to use DefaultAzureCredential) # EXIT STATUS 0 : Success 1 : The requested WAL could not be found 2 : The connection to the cloud provider failed 3 : There was an error in the command input Other non-zero codes : Failure # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/Makefile0000644000621200062120000000420714172556763013310 0ustar 00000000000000MANPAGES=barman.1 barman.5 \ barman-wal-archive.1 barman-wal-restore.1 \ barman-cloud-backup.1 \ barman-cloud-backup-delete.1 \ barman-cloud-backup-keep.1 \ barman-cloud-backup-list.1 \ barman-cloud-check-wal-archive.1 \ barman-cloud-wal-archive.1 \ barman-cloud-restore.1 \ barman-cloud-wal-restore.1 SUBDIRS=manual # Detect the pandoc major version (1 or 2) PANDOC_VERSION = $(shell pandoc --version | awk -F '[ .]+' '/^pandoc/{print $$2; exit}') ifeq ($(PANDOC_VERSION),1) SMART = --smart NOSMART_SUFFIX = else SMART = NOSMART_SUFFIX = -smart endif all: $(MANPAGES) $(SUBDIRS) barman.1: $(sort $(wildcard barman.1.d/??-*.md)) pandoc -s -f markdown$(NOSMART_SUFFIX) -t man -o $@ $^ barman.5: $(sort $(wildcard barman.5.d/??-*.md)) pandoc -s -f markdown$(NOSMART_SUFFIX) -t man -o $@ $^ barman-wal-archive.1: barman-wal-archive.1.md pandoc -s -f markdown$(NOSMART_SUFFIX) -t man -o $@ $< barman-wal-restore.1: barman-wal-restore.1.md pandoc -s -f markdown$(NOSMART_SUFFIX) -t man -o $@ $< barman-cloud-backup.1: barman-cloud-backup.1.md pandoc -s -f markdown$(nosmart_suffix) -t man -o $@ $< barman-cloud-backup-delete.1: barman-cloud-backup-delete.1.md pandoc -s -f markdown$(nosmart_suffix) -t man -o $@ $< barman-cloud-backup-keep.1: barman-cloud-backup-keep.1.md pandoc -s -f markdown$(nosmart_suffix) -t man -o $@ $< barman-cloud-backup-list.1: barman-cloud-backup-list.1.md pandoc -s -f markdown$(nosmart_suffix) -t man -o $@ $< barman-cloud-check-wal-archive.1: barman-cloud-check-wal-archive.1.md pandoc -s -f markdown$(nosmart_suffix) -t man -o $@ $< barman-cloud-restore.1: barman-cloud-restore.1.md pandoc -s -f markdown$(nosmart_suffix) -t man -o $@ $< barman-cloud-wal-archive.1: barman-cloud-wal-archive.1.md pandoc -s -f markdown$(nosmart_suffix) -t man -o $@ $< barman-cloud-wal-restore.1: barman-cloud-wal-restore.1.md pandoc -s -f markdown$(nosmart_suffix) -t man -o $@ $< clean: rm -f $(MANPAGES) for dir in $(SUBDIRS); do \ $(MAKE) -C $$dir clean; \ done help: @echo "Usage:" @echo " $$ make" subdirs: $(SUBDIRS) $(SUBDIRS): $(MAKE) -C $@ .PHONY: all clean help subdirs $(SUBDIRS) barman-2.18/doc/barman-wal-archive.10000644000621200062120000000474314172556763015377 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-WAL\-ARCHIVE" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-wal\-archive \- \f[C]archive_command\f[] based on Barman\[aq]s put\-wal .SH SYNOPSIS .PP barman\-wal\-archive [\f[I]OPTIONS\f[]] \f[I]BARMAN_HOST\f[] \f[I]SERVER_NAME\f[] \f[I]WAL_PATH\f[] .SH DESCRIPTION .PP This script can be used in the \f[C]archive_command\f[] of a PostgreSQL server to ship WAL files to a Barman host using the \[aq]put\-wal\[aq] command (introduced in Barman 2.6). An SSH connection will be opened to the Barman host. \f[C]barman\-wal\-archive\f[] allows the integration of Barman in PostgreSQL clusters for better business continuity results. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .SH POSITIONAL ARGUMENTS .TP .B BARMAN_HOST the host of the Barman server. .RS .RE .TP .B SERVER_NAME the server name configured in Barman from which WALs are taken. .RS .RE .TP .B WAL_PATH the value of the \[aq]%p\[aq] keyword (according to \[aq]archive_command\[aq]). .RS .RE .SH OPTIONS .TP .B \-h, \-\-help show a help message and exit .RS .RE .TP .B \-V, \-\-version show program\[aq]s version number and exit .RS .RE .TP .B \-U \f[I]USER\f[], \-\-user \f[I]USER\f[] the user used for the ssh connection to the Barman server. Defaults to \[aq]barman\[aq]. .RS .RE .TP .B \-c \f[I]CONFIG\f[], \-\-config \f[I]CONFIG\f[] configuration file on the Barman server .RS .RE .TP .B \-t, \-\-test test both the connection and the configuration of the requested PostgreSQL server in Barman for WAL retrieval. With this option, the \[aq]WAL_PATH\[aq] mandatory argument is ignored. .RS .RE .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B Not zero Failure .RS .RE .SH SEE ALSO .PP \f[C]barman\f[] (1), \f[C]barman\f[] (5). .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman-cloud-wal-archive.1.md0000644000621200062120000001371414172556763017100 0ustar 00000000000000% BARMAN-CLOUD-WAL-ARCHIVE(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-cloud-wal-archive - Archive PostgreSQL WAL files in the Cloud using `archive_command` # SYNOPSIS barman-cloud-wal-archive [*OPTIONS*] *DESTINATION_URL* *SERVER_NAME* *WAL_PATH* # DESCRIPTION This script can be used in the `archive_command` of a PostgreSQL server to ship WAL files to the Cloud. Currently AWS S3 and Azure Blob Storage are supported. Note: If you are running python 2 or older unsupported versions of python 3 then avoid the compression options `--gzip` or `--bzip2` as barman-cloud-wal-restore is unable to restore gzip-compressed WALs on python < 3.2 or bzip2-compressed WALs on python < 3.3. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. # POSITIONAL ARGUMENTS DESTINATION_URL : URL of the cloud destination, such as a bucket in AWS S3. For example: `s3://BUCKET_NAME/path/to/folder` (where `BUCKET_NAME` is the bucket you have created in AWS). SERVER_NAME : the name of the server as configured in Barman. WAL_PATH : the value of the '%p' keyword (according to 'archive_command'). # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -v, --verbose : increase output verbosity (e.g., -vv is more than -v) -q, --quiet : decrease output verbosity (e.g., -qq is less than -q) -t, --test : test connectivity to the cloud destination and exit -z, --gzip : gzip-compress the WAL while uploading to the cloud (should not be used with python < 3.2) -j, --bzip2 : bzip2-compress the WAL while uploading to the cloud (should not be used with python < 3.3) --snappy : snappy-compress the WAL while uploading to the cloud (requires optional python-snappy library and should not be used with python < 3.3) --cloud-provider {aws-s3,azure-blob-storage} : the cloud provider to which the backup should be uploaded --tags KEY1,VALUE1 KEY2,VALUE2 ... : A space-separated list of comma-separated key-value pairs representing tags to be added to each WAL file archived to cloud storage. --history-tags KEY1,VALUE1 KEY2,VALUE2 ... : A space-separated list of comma-separated key-value pairs representing tags to be added to each history file archived to cloud storage. If this is provided alongside the `--tags` option then the value of `--history-tags` will be used in place of `--tags` for history files. All other WAL files will continue to be tagged with the value of `--tags`. -P, --profile : profile name (e.g. INI section in AWS credentials file) --endpoint-url : override the default S3 URL construction mechanism by specifying an endpoint. -e, --encryption : the encryption algorithm used when storing the uploaded data in S3 Allowed values: 'AES256'|'aws:kms' --encryption-scope : the name of an encryption scope defined in the Azure Blob Storage service which is to be used to encrypt the data in Azure --credential {azure-cli,managed-identity} : optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. --max-block-size SIZE : the chunk size to be used when uploading an object to Azure Blob Storage via the concurrent chunk method (default: 4MB). --max-concurrency CONCURRENCY : the maximum number of chunks to be uploaded concurrently to Azure Blob Storage (default: 1). Whether the maximum concurrency is achieved depends on the values of --max-block-size (should be less than or equal to `WAL segment size after compression / max_concurrency`) and --max-single-put-size (must be less than WAL segment size after compression). --max-single-put-size SIZE : maximum size for which the Azure client will upload an object to Azure Blob Storage in a single request (default: 64MB). If this is set lower than the WAL segment size after any applied compression then the concurrent chunk upload method for WAL archiving will be used. # REFERENCES For Boto: * https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html For AWS: * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html. For Azure Blob Storage: * https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-data-operations-cli#set-environment-variables-for-authorization-parameters * https://docs.microsoft.com/en-us/python/api/azure-storage-blob/?view=azure-python # DEPENDENCIES If using `--cloud-provider=aws-s3`: * boto3 If using `--cloud-provider=azure-blob-storage`: * azure-storage-blob * azure-identity (optional, if you wish to use DefaultAzureCredential) # EXIT STATUS 0 : Success 1 : The WAL archive operation was not successful 2 : The connection to the cloud provider failed 3 : There was an error in the command input Other non-zero codes : Failure # SEE ALSO This script can be used in conjunction with `pre_archive_retry_script` to relay WAL files to S3, as follows: ``` pre_archive_retry_script = 'barman-cloud-wal-archive [*OPTIONS*] *DESTINATION_URL* ${BARMAN_SERVER}' ``` # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman.conf0000644000621200062120000000665514172556763013770 0ustar 00000000000000; Barman, Backup and Recovery Manager for PostgreSQL ; http://www.pgbarman.org/ - http://www.enterprisedb.com/ ; ; Main configuration file [barman] ; System user barman_user = barman ; Directory of configuration files. Place your sections in separate files with .conf extension ; For example place the 'main' server section in /etc/barman.d/main.conf configuration_files_directory = /etc/barman.d ; Main directory barman_home = /var/lib/barman ; Locks directory - default: %(barman_home)s ;barman_lock_directory = /var/run/barman ; Log location log_file = /var/log/barman/barman.log ; Log level (see https://docs.python.org/3/library/logging.html#levels) log_level = INFO ; Default compression level: possible values are None (default), bzip2, gzip, pigz, pygzip or pybzip2 ;compression = gzip ; Pre/post backup hook scripts ;pre_backup_script = env | grep ^BARMAN ;pre_backup_retry_script = env | grep ^BARMAN ;post_backup_retry_script = env | grep ^BARMAN ;post_backup_script = env | grep ^BARMAN ; Pre/post archive hook scripts ;pre_archive_script = env | grep ^BARMAN ;pre_archive_retry_script = env | grep ^BARMAN ;post_archive_retry_script = env | grep ^BARMAN ;post_archive_script = env | grep ^BARMAN ; Pre/post delete scripts ;pre_delete_script = env | grep ^BARMAN ;pre_delete_retry_script = env | grep ^BARMAN ;post_delete_retry_script = env | grep ^BARMAN ;post_delete_script = env | grep ^BARMAN ; Pre/post wal delete scripts ;pre_wal_delete_script = env | grep ^BARMAN ;pre_wal_delete_retry_script = env | grep ^BARMAN ;post_wal_delete_retry_script = env | grep ^BARMAN ;post_wal_delete_script = env | grep ^BARMAN ; Global bandwidth limit in kilobytes per second - default 0 (meaning no limit) ;bandwidth_limit = 4000 ; Number of parallel jobs for backup and recovery via rsync (default 1) ;parallel_jobs = 1 ; Immediate checkpoint for backup command - default false ;immediate_checkpoint = false ; Enable network compression for data transfers - default false ;network_compression = false ; Number of retries of data copy during base backup after an error - default 0 ;basebackup_retry_times = 0 ; Number of seconds of wait after a failed copy, before retrying - default 30 ;basebackup_retry_sleep = 30 ; Maximum execution time, in seconds, per server ; for a barman check command - default 30 ;check_timeout = 30 ; Time frame that must contain the latest backup date. ; If the latest backup is older than the time frame, barman check ; command will report an error to the user. ; If empty, the latest backup is always considered valid. ; Syntax for this option is: "i (DAYS | WEEKS | MONTHS | HOURS)" where i is an ; integer > 0 which identifies the number of days | weeks | months of ; validity of the latest backup for this check. Also known as 'smelly backup'. ;last_backup_maximum_age = ; Time frame that must contain the latest WAL file ; If the latest WAL file is older than the time frame, barman check ; command will report an error to the user. ; Syntax for this option is: "i (DAYS | WEEKS | MONTHS | HOURS)" where i is an ; integer > 0 ;last_wal_maximum_age = ; Minimum number of required backups (redundancy) ;minimum_redundancy = 1 ; Global retention policy (REDUNDANCY or RECOVERY WINDOW) ; Examples of retention policies ; Retention policy (disabled, default) ;retention_policy = ; Retention policy (based on redundancy) ;retention_policy = REDUNDANCY 2 ; Retention policy (based on recovery window) ;retention_policy = RECOVERY WINDOW OF 4 WEEKS barman-2.18/doc/manual/0000755000621200062120000000000014172556766013125 5ustar 00000000000000barman-2.18/doc/manual/.gitignore0000644000621200062120000000005314172556763015110 0ustar 00000000000000barman-manual.en.html barman-manual.en.pdf barman-2.18/doc/manual/23-wal_streaming.en.md0000644000621200062120000001234314172556763017126 0ustar 00000000000000## WAL streaming Barman can reduce the Recovery Point Objective (RPO) by allowing users to add continuous WAL streaming from a PostgreSQL server, on top of the standard `archive_command` strategy. Barman relies on [`pg_receivewal`][25], a utility that has been available from PostgreSQL 9.2 which exploits the native streaming replication protocol and continuously receives transaction logs from a PostgreSQL server (master or standby). Prior to PostgreSQL 10, `pg_receivewal` was named `pg_receivexlog`. > **IMPORTANT:** > Barman requires that `pg_receivewal` is installed on the same > server. For PostgreSQL 9.2 servers, you need `pg_receivexlog` of > version 9.2 installed alongside Barman. For PostgreSQL 9.3 and > above, it is recommended to install the latest available version of > `pg_receivewal`, as it is back compatible. Otherwise, users can > install multiple versions of `pg_receivewal`/`pg_receivexlog` on the Barman server > and properly point to the specific version for a server, using the > `path_prefix` option in the configuration file. In order to enable streaming of transaction logs, you need to: 1. setup a streaming connection as previously described 2. set the `streaming_archiver` option to `on` The `cron` command, if the aforementioned requirements are met, transparently manages log streaming through the execution of the `receive-wal` command. This is the recommended scenario. However, users can manually execute the `receive-wal` command: ``` bash barman receive-wal ``` > **NOTE:** > The `receive-wal` command is a foreground process. Transaction logs are streamed directly in the directory specified by the `streaming_wals_directory` configuration option and are then archived by the `archive-wal` command. Unless otherwise specified in the `streaming_archiver_name` parameter, and only for PostgreSQL 9.3 or above, Barman will set `application_name` of the WAL streamer process to `barman_receive_wal`, allowing you to monitor its status in the `pg_stat_replication` system view of the PostgreSQL server. ### Replication slots > **IMPORTANT:** replication slots are available since PostgreSQL 9.4 Replication slots are an automated way to ensure that the PostgreSQL server will not remove WAL files until they were received by all archivers. Barman uses this mechanism to receive the transaction logs from PostgreSQL. You can find more information about replication slots in the [PostgreSQL manual][replication-slots]. You can even base your backup architecture on streaming connection only. This scenario is useful to configure Docker-based PostgreSQL servers and even to work with PostgreSQL servers running on Windows. > **IMPORTANT:** > In this moment, the Windows support is still experimental, as it is > not yet part of our continuous integration system. ### How to configure the WAL streaming First, the PostgreSQL server must be configured to stream the transaction log files to the Barman server. To configure the streaming connection from Barman to the PostgreSQL server you need to enable the `streaming_archiver`, as already said, including this line in the server configuration file: ``` ini streaming_archiver = on ``` If you plan to use replication slots (recommended), another essential option for the setup of the streaming-based transaction log archiving is the `slot_name` option: ``` ini slot_name = barman ``` This option defines the name of the replication slot that will be used by Barman. It is mandatory if you want to use replication slots. When you configure the replication slot name, you can manually create a replication slot for Barman with this command: ``` bash barman@backup$ barman receive-wal --create-slot pg Creating physical replication slot 'barman' on server 'pg' Replication slot 'barman' created ``` Starting with Barman 2.10, you can configure Barman to automatically create the replication slot by setting: ``` ini create_slot = auto ``` ### Limitations of partial WAL files with recovery The standard behaviour of `pg_receivewal` is to write transactional information in a file with `.partial` suffix after the WAL segment name. Barman expects a partial file to be in the `streaming_wals_directory` of a server. When completed, `pg_receivewal` removes the `.partial` suffix and opens the following one, delivering the file to the `archive-wal` command of Barman for permanent storage and compression. In case of a sudden and unrecoverable failure of the master PostgreSQL server, the `.partial` file that has been streamed to Barman contains very important information that the standard archiver (through PostgreSQL's `archive_command`) has not been able to deliver to Barman. As of Barman 2.10, the `get-wal` command is able to return the content of the current `.partial` WAL file through the `--partial/-P` option. This is particularly useful in the case of recovery, both full or to a point in time. Therefore, in case you run a `recover` command with `get-wal` enabled, and without `--standby-mode`, Barman will automatically add the `-P` option to `barman-wal-restore` (which will then relay that to the remote `get-wal` command) in the `restore_command` recovery option. `get-wal` will also search in the `incoming` directory, in case a WAL file has already been shipped to Barman, but not yet archived. barman-2.18/doc/manual/Makefile0000644000621200062120000000123314172556763014561 0ustar 00000000000000DOCS = barman-manual.en.pdf barman-manual.en.html MDS = $(sort $(wildcard ??-*.en.md)) # Detect the pandoc major version (1 or 2) PANDOC_VERSION = $(shell pandoc --version | awk -F '[ .]+' '/^pandoc/{print $$2; exit}') ifeq ($(PANDOC_VERSION),1) SMART = --smart NOSMART_SUFFIX = else SMART = NOSMART_SUFFIX = -smart endif all: $(DOCS) barman-manual.en.pdf: $(MDS) ../images/*.png pandoc -o $@ -s -f markdown$(NOSMART_SUFFIX) --toc $(MDS) barman-manual.en.html: $(MDS) ../images/*.png pandoc -o $@ -s -f markdown$(NOSMART_SUFFIX) --toc -t html5 $(MDS) clean: rm -f $(DOCS) help: @echo "Usage:" @echo " $$ make" .PHONY: all clean help barman-2.18/doc/manual/50-feature-details.en.md0000644000621200062120000010275314172556763017355 0ustar 00000000000000\newpage # Features in detail In this section we present several Barman features and discuss their applicability and the configuration required to use them. This list is not exhaustive, as many scenarios can be created working on the Barman configuration. Nevertheless, it is useful to discuss common patterns. ## Backup features ### Incremental backup Barman implements **file-level incremental backup**. Incremental backup is a type of full periodic backup which only saves data changes from the latest full backup available in the catalog for a specific PostgreSQL server. It must not be confused with differential backup, which is implemented by _WAL continuous archiving_. > **NOTE:** Block level incremental backup will be available in > future versions. > **IMPORTANT:** The `reuse_backup` option can't be used with the > `postgres` backup method at this time. The main goals of incremental backups in Barman are: - Reduce the time taken for the full backup process - Reduce the disk space occupied by several periodic backups (**data deduplication**) This feature heavily relies on `rsync` and [hard links][8], which must therefore be supported by both the underlying operating system and the file system where the backup data resides. The main concept is that a subsequent base backup will share those files that have not changed since the previous backup, leading to relevant savings in disk usage. This is particularly true of VLDB contexts and of those databases containing a high percentage of _read-only historical tables_. Barman implements incremental backup through a global/server option called `reuse_backup`, that transparently manages the `barman backup` command. It accepts three values: - `off`: standard full backup (default) - `link`: incremental backup, by reusing the last backup for a server and creating a hard link of the unchanged files (for backup space and time reduction) - `copy`: incremental backup, by reusing the last backup for a server and creating a copy of the unchanged files (just for backup time reduction) The most common scenario is to set `reuse_backup` to `link`, as follows: ``` ini reuse_backup = link ``` Setting this at global level will automatically enable incremental backup for all your servers. As a final note, users can override the setting of the `reuse_backup` option through the `--reuse-backup` runtime option for the `barman backup` command. Similarly, the runtime option accepts three values: `off`, `link` and `copy`. For example, you can run a one-off incremental backup as follows: ``` bash barman backup --reuse-backup=link ``` ### Limiting bandwidth usage It is possible to limit the usage of I/O bandwidth through the `bandwidth_limit` option (global/per server), by specifying the maximum number of kilobytes per second. By default it is set to 0, meaning no limit. > **IMPORTANT:** the `bandwidth_limit` option is supported with the > `postgres` backup method for Postgres 9.4 and above, but the > `tablespace_bandwidth_limit` option is available only if you use > `rsync` In case you have several tablespaces and you prefer to limit the I/O workload of your backup procedures on one or more tablespaces, you can use the `tablespace_bandwidth_limit` option (global/per server): ``` ini tablespace_bandwidth_limit = tbname:bwlimit[, tbname:bwlimit, ...] ``` The option accepts a comma separated list of pairs made up of the tablespace name and the bandwidth limit (in kilobytes per second). When backing up a server, Barman will try and locate any existing tablespace in the above option. If found, the specified bandwidth limit will be enforced. If not, the default bandwidth limit for that server will be applied. ### Network Compression It is possible to reduce the size of transferred data using compression. It can be enabled using the `network_compression` option (global/per server): > **IMPORTANT:** the `network_compression` option is not available > with the `postgres` backup method. ``` ini network_compression = true|false ``` Setting this option to `true` will enable data compression during network transfers (for both backup and recovery). By default it is set to `false`. ### Concurrent Backup and backup from a standby Normally, during backup operations, Barman uses PostgreSQL native functions `pg_start_backup` and `pg_stop_backup` for _exclusive backup_. These operations are not allowed on a read-only standby server. Barman is also capable of performing backups of PostgreSQL from 9.2 or greater database servers in a **concurrent way**, primarily through the `backup_options` configuration parameter.[^ABOUT_CONCURRENT_BACKUP] [^ABOUT_CONCURRENT_BACKUP]: Concurrent backup is a technology that has been available in PostgreSQL since version 9.2, through the _streaming replication protocol_ (for example, using a tool like `pg_basebackup`). This introduces a new architecture scenario with Barman: **backup from a standby server**, using `rsync`. > **IMPORTANT:** **Concurrent backup** requires users of PostgreSQL > 9.2, 9.3, 9.4, and 9.5 to install the `pgespresso` open source > extension on every PostgreSQL server of the cluster. For more > detailed information and the source code, please visit the > [pgespresso extension website][9]. Barman supports the new API > introduced in PostgreSQL 9.6. This removes the requirement of the > `pgespresso` extension to perform concurrent backups from this > version of PostgreSQL. By default, `backup_options` is transparently set to `exclusive_backup` for backwards compatibility reasons. Users of PostgreSQL 9.6 and later versions should set `backup_options` to `concurrent_backup`. > **IMPORTANT:** When PostgreSQL 9.5 is declared EOL by the Community, > Barman will by default set `backup_options` to `concurrent_backup`. > Support for `pgespresso` will be ceased then. When `backup_options` is set to `concurrent_backup`, Barman activates the _concurrent backup mode_ for a server and follows these two simple rules: - `ssh_command` must point to the destination Postgres server - `conninfo` must point to a database on the destination Postgres database. Using PostgreSQL 9.2, 9.3, 9.4, and 9.5, `pgespresso` must be correctly installed through `CREATE EXTENSION`. Using 9.6 or greater, concurrent backups are executed through the Postgres native API (which requires an active connection from the start to the stop of the backup). > **IMPORTANT:** In case of a concurrent backup, currently Barman > cannot determine whether the closing WAL file of a full backup has > actually been shipped - opposite of an exclusive backup > where PostgreSQL itself makes sure that the WAL file is correctly > archived. Be aware that the full backup cannot be considered > consistent until that WAL file has been received and archived by > Barman. Barman 2.5 introduces a new state, called `WAITING_FOR_WALS`, > which is managed by the `check-backup` command (part of the > ordinary maintenance job performed by the `cron` command). > From Barman 2.10, you can use the `--wait` option with `barman backup` > command. #### Current limitations on backup from standby Barman currently requires that backup data (base backups and WAL files) come from one server only. Therefore, in case of backup from a standby, you should point to the standby server: - `conninfo` - `streaming_conninfo`, if you use `postgres` as `backup_method` and/or rely on WAL streaming - `ssh_command`, if you use `rsync` as `backup_method` > **IMPORTANT:** From Barman 2.8, backup from a standby is supported > only for PostgreSQL 9.4 or higher (versions 9.4 and 9.5 require > `pgespresso`). Support for 9.2 and 9.3 is deprecated. The recommended and simplest way is to setup WAL streaming with replication slots directly from the standby, which requires PostgreSQL 9.4. This means: * configure `streaming_archiver = on`, as described in the "WAL streaming" section, including "Replication slots" * disable `archiver = on` Alternatively, from PostgreSQL 9.5 you can decide to archive from the standby only using `archive_command` with `archive_mode = always` and by disabling WAL streaming. > **NOTE:** Unfortunately, it is not currently possible to enable both WAL archiving > and streaming from the standby due to the way Barman performs WAL duplication > checks and [an undocumented behaviours in all versions of PostgreSQL](https://www.postgresql.org/message-id/20170316170513.1429.77904@wrigleys.postgresql.org). ### Immediate checkpoint Before starting a backup, Barman requests a checkpoint, which generates additional workload. Normally that checkpoint is throttled according to the settings for workload control on the PostgreSQL server, which means that the backup could be delayed. This default behaviour can be changed through the `immediate_checkpoint` configuration global/server option (set to `false` by default). If `immediate_checkpoint` is set to `true`, PostgreSQL will not try to limit the workload, and the checkpoint will happen at maximum speed, starting the backup as soon as possible. At any time, you can override the configuration option behaviour, by issuing `barman backup` with any of these two options: - `--immediate-checkpoint`, which forces an immediate checkpoint; - `--no-immediate-checkpoint`, which forces to wait for the checkpoint to happen. ### Local backup > **DISCLAIMER:** This feature is not recommended for production usage, > as Barman and PostgreSQL reside on the same server and are part of > the same single point of failure. > Some EnterpriseDB customers have requested to add support for > local backup to Barman to be used under specific circumstances > and, most importantly, under the 24/7 production service delivered > by the company. Using this feature currently requires installation > from sources, or to customise the environment for the `postgres` > user in terms of permissions as well as logging and cron configurations. Under special circumstances, Barman can be installed on the same server where the PostgreSQL instance resides, with backup data stored on a separate volume from PGDATA and, where applicable, tablespaces. Usually, these volumes reside on network storage appliances, with filesystems like NFS. This architecture is not endorsed by EnterpriseDB. For an enhanced business continuity experience of PostgreSQL, with better results in terms of RPO and RTO, EnterpriseDB still recommends the shared nothing architecture with a remote installation of Barman, capable of acting like a witness server for replication and monitoring purposes. The only requirement for local backup is that Barman runs with the same user as the PostgreSQL server, which is normally `postgres`. Given that the Community packages by default install Barman under the `barman` user, this use case requires manual installation procedures that include: - cron configurations - log configurations, including logrotate In order to use local backup for a given server in Barman, you need to set `backup_method` to `local-rsync`. The feature is essentially identical to its `rsync` equivalent, which relies on SSH instead and operates remotely. With `local-rsync` file system copy is performed issuing `rsync` commands locally (for this reason it is required that Barman runs with the same user as PostgreSQL). An excerpt of configuration for local backup for a server named `local-pg13` is: ```ini [local-pg13] description = "Local PostgreSQL 13" backup_method = local-rsync ... ``` ## Archiving features ### WAL compression The `barman cron` command will compress WAL files if the `compression` option is set in the configuration file. This option allows five values: - `bzip2`: for Bzip2 compression (requires the `bzip2` utility) - `gzip`: for Gzip compression (requires the `gzip` utility) - `pybzip2`: for Bzip2 compression (uses Python's internal compression module) - `pygzip`: for Gzip compression (uses Python's internal compression module) - `pigz`: for Pigz compression (requires the `pigz` utility) - `custom`: for custom compression, which requires you to set the following options as well: - `custom_compression_filter`: a compression filter - `custom_decompression_filter`: a decompression filter - `custom_compression_magic`: a hex string to identify a custom compressed wal file > *NOTE:* All methods but `pybzip2` and `pygzip` require `barman > archive-wal` to fork a new process. ### Synchronous WAL streaming > **IMPORTANT:** This feature is available only from PostgreSQL 9.5 > and above. Barman can also reduce the Recovery Point Objective to zero, by collecting the transaction WAL files like a synchronous standby server would. To configure such a scenario, the Barman server must be configured to archive WALs via the [streaming connection](#postgresql-streaming-connection), and the `receive-wal` process should figure as a synchronous standby of the PostgreSQL server. First of all, you need to retrieve the application name of the Barman `receive-wal` process with the `show-servers` command: ``` bash barman@backup$ barman show-servers pg|grep streaming_archiver_name streaming_archiver_name: barman_receive_wal ``` Then the application name should be added to the `postgresql.conf` file as a synchronous standby: ``` ini synchronous_standby_names = 'barman_receive_wal' ``` > **IMPORTANT:** this is only an example of configuration, to show you that > Barman is eligible to be a synchronous standby node. > We are not suggesting to use ONLY Barman. > You can read _["Synchronous Replication"][synch]_ from the PostgreSQL > documentation for further information on this topic. The PostgreSQL server needs to be restarted for the configuration to be reloaded. If the server has been configured correctly, the `replication-status` command should show the `receive_wal` process as a synchronous streaming client: ``` bash [root@backup ~]# barman replication-status pg Status of streaming clients for server 'pg': Current xlog location on master: 0/9000098 Number of streaming clients: 1 1. #1 Sync WAL streamer Application name: barman_receive_wal Sync stage : 3/3 Remote write Communication : TCP/IP IP Address : 139.59.135.32 / Port: 58262 / Host: - User name : streaming_barman Current state : streaming (sync) Replication slot: barman WAL sender PID : 2501 Started at : 2016-09-16 10:33:01.725883+00:00 Sent location : 0/9000098 (diff: 0 B) Write location : 0/9000098 (diff: 0 B) Flush location : 0/9000098 (diff: 0 B) ``` ## Catalog management features ### Minimum redundancy safety You can define the minimum number of periodic backups for a PostgreSQL server, using the global/per server configuration option called `minimum_redundancy`, by default set to 0. By setting this value to any number greater than 0, Barman makes sure that at any time you will have at least that number of backups in a server catalog. This will protect you from accidental `barman delete` operations. > **IMPORTANT:** > Make sure that your retention policy settings do not collide with > minimum redundancy requirements. Regularly check Barman's log for > messages on this topic. ### Retention policies Barman supports **retention policies** for backups. A backup retention policy is a user-defined policy that determines how long backups and related archive logs (Write Ahead Log segments) need to be retained for recovery procedures. Based on the user's request, Barman retains the periodic backups required to satisfy the current retention policy and any archived WAL files required for the complete recovery of those backups. Barman users can define a retention policy in terms of **backup redundancy** (how many periodic backups) or a **recovery window** (how long). Retention policy based on redundancy : In a redundancy based retention policy, the user determines how many periodic backups to keep. A redundancy-based retention policy is contrasted with retention policies that use a recovery window. Retention policy based on recovery window : A recovery window is one type of Barman backup retention policy, in which the DBA specifies a period of time and Barman ensures retention of backups and/or archived WAL files required for point-in-time recovery to any time during the recovery window. The interval always ends with the current time and extends back in time for the number of days specified by the user. For example, if the retention policy is set for a recovery window of seven days, and the current time is 9:30 AM on Friday, Barman retains the backups required to allow point-in-time recovery back to 9:30 AM on the previous Friday. #### Scope Retention policies can be defined for: - **PostgreSQL periodic base backups**: through the `retention_policy` configuration option - **Archive logs**, for Point-In-Time-Recovery: through the `wal_retention_policy` configuration option > **IMPORTANT:** > In a temporal dimension, archive logs must be included in the time > window of periodic backups. There are two typical use cases here: full or partial point-in-time recovery. Full point in time recovery scenario: : Base backups and archive logs share the same retention policy, allowing you to recover at any point in time from the first available backup. Partial point in time recovery scenario: : Base backup retention policy is wider than that of archive logs, for example allowing users to keep full, weekly backups of the last 6 months, but archive logs for the last 4 weeks (granting to recover at any point in time starting from the last 4 periodic weekly backups). > **IMPORTANT:** > Currently, Barman implements only the **full point in time > recovery** scenario, by constraining the `wal_retention_policy` > option to `main`. #### How they work Retention policies in Barman can be: - **automated**: enforced by `barman cron` - **manual**: Barman simply reports obsolete backups and allows you to delete them > **IMPORTANT:** > Currently Barman does not implement manual enforcement. This feature > will be available in future versions. #### Configuration and syntax Retention policies can be defined through the following configuration options: - `retention_policy`: for base backup retention - `wal_retention_policy`: for archive logs retention - `retention_policy_mode`: can only be set to `auto` (retention policies are automatically enforced by the `barman cron` command) These configuration options can be defined both at a global level and a server level, allowing users maximum flexibility on a multi-server environment. ##### Syntax for `retention_policy` The general syntax for a base backup retention policy through `retention_policy` is the following: ``` ini retention_policy = {REDUNDANCY value | RECOVERY WINDOW OF value {DAYS | WEEKS | MONTHS}} ``` Where: - syntax is case insensitive - `value` is an integer and is > 0 - in case of **redundancy retention policy**: - `value` must be greater than or equal to the server minimum redundancy level (if that value is not assigned, a warning is generated) - the first valid backup is the value-th backup in a reverse ordered time series - in case of **recovery window policy**: - the point of recoverability is: current time - window - the first valid backup is the first available backup before the point of recoverability; its value in a reverse ordered time series must be greater than or equal to the server minimum redundancy level (if it is not assigned to that value and a warning is generated) By default, `retention_policy` is empty (no retention enforced). ##### Syntax for `wal_retention_policy` Currently, the only allowed value for `wal_retention_policy` is the special value `main`, that maps the retention policy of archive logs to that of base backups. ## Hook scripts Barman allows a database administrator to run hook scripts on these two events: - before and after a backup - before and after the deletion of a backup - before and after a WAL file is archived - before and after a WAL file is deleted There are two types of hook scripts that Barman can manage: - standard hook scripts - retry hook scripts The only difference between these two types of hook scripts is that Barman executes a standard hook script only once, without checking its return code, whereas a retry hook script may be executed more than once, depending on its return code. Specifically, when executing a retry hook script, Barman checks the return code and retries indefinitely until the script returns either `SUCCESS` (with standard return code `0`), or `ABORT_CONTINUE` (return code `62`), or `ABORT_STOP` (return code `63`). Barman treats any other return code as a transient failure to be retried. Users are given more power: a hook script can control its workflow by specifying whether a failure is transient. Also, in case of a 'pre' hook script, by returning `ABORT_STOP`, users can request Barman to interrupt the main operation with a failure. Hook scripts are executed in the following order: 1. The standard 'pre' hook script (if present) 2. The retry 'pre' hook script (if present) 3. The actual event (i.e. backup operation, or WAL archiving), if retry 'pre' hook script was not aborted with `ABORT_STOP` 4. The retry 'post' hook script (if present) 5. The standard 'post' hook script (if present) The output generated by any hook script is written in the log file of Barman. > **NOTE:** > Currently, `ABORT_STOP` is ignored by retry 'post' hook scripts. In > these cases, apart from logging an additional warning, `ABORT_STOP` > will behave like `ABORT_CONTINUE`. ### Backup scripts These scripts can be configured with the following global configuration options (which can be overridden on a per server basis): - `pre_backup_script`: _hook script_ executed _before_ a base backup, only once, with no check on the exit code - `pre_backup_retry_script`: _retry hook script_ executed _before_ a base backup, repeatedly until success or abort - `post_backup_retry_script`: _retry hook script_ executed _after_ a base backup, repeatedly until success or abort - `post_backup_script`: _hook script_ executed _after_ a base backup, only once, with no check on the exit code The script definition is passed to a shell and can return any exit code. Only in case of a _retry_ script, Barman checks the return code (see the [hook script section](#hook_scripts)). The shell environment will contain the following variables: - `BARMAN_BACKUP_DIR`: backup destination directory - `BARMAN_BACKUP_ID`: ID of the backup - `BARMAN_CONFIGURATION`: configuration file used by Barman - `BARMAN_ERROR`: error message, if any (only for the `post` phase) - `BARMAN_PHASE`: phase of the script, either `pre` or `post` - `BARMAN_PREVIOUS_ID`: ID of the previous backup (if present) - `BARMAN_RETRY`: `1` if it is a retry script, `0` if not - `BARMAN_SERVER`: name of the server - `BARMAN_STATUS`: status of the backup - `BARMAN_VERSION`: version of Barman ### Backup delete scripts Version **2.4** introduces pre and post backup delete scripts. As previous scripts, backup delete scripts can be configured within global configuration options, and it is possible to override them on a per server basis: - `pre_delete_script`: _hook script_ launched _before_ the deletion of a backup, only once, with no check on the exit code - `pre_delete_retry_script`: _retry hook script_ executed _before_ the deletion of a backup, repeatedly until success or abort - `post_delete_retry_script`: _retry hook script_ executed _after_ the deletion of a backup, repeatedly until success or abort - `post_delete_script`: _hook script_ launched _after_ the deletion of a backup, only once, with no check on the exit code The script is executed through a shell and can return any exit code. Only in case of a _retry_ script, Barman checks the return code (see the upper section). Delete scripts uses the same environmental variables of a backup script, plus: - `BARMAN_NEXT_ID`: ID of the next backup (if present) ### WAL archive scripts Similar to backup scripts, archive scripts can be configured with global configuration options (which can be overridden on a per server basis): - `pre_archive_script`: _hook script_ executed _before_ a WAL file is archived by maintenance (usually `barman cron`), only once, with no check on the exit code - `pre_archive_retry_script`: _retry hook script_ executed _before_ a WAL file is archived by maintenance (usually `barman cron`), repeatedly until it is successful or aborted - `post_archive_retry_script`: _retry hook script_ executed _after_ a WAL file is archived by maintenance, repeatedly until it is successful or aborted - `post_archive_script`: _hook script_ executed _after_ a WAL file is archived by maintenance, only once, with no check on the exit code The script is executed through a shell and can return any exit code. Only in case of a _retry_ script, Barman checks the return code (see the upper section). Archive scripts share with backup scripts some environmental variables: - `BARMAN_CONFIGURATION`: configuration file used by Barman - `BARMAN_ERROR`: error message, if any (only for the `post` phase) - `BARMAN_PHASE`: phase of the script, either `pre` or `post` - `BARMAN_SERVER`: name of the server Following variables are specific to archive scripts: - `BARMAN_SEGMENT`: name of the WAL file - `BARMAN_FILE`: full path of the WAL file - `BARMAN_SIZE`: size of the WAL file - `BARMAN_TIMESTAMP`: WAL file timestamp - `BARMAN_COMPRESSION`: type of compression used for the WAL file ### WAL delete scripts Version **2.4** introduces pre and post WAL delete scripts. Similarly to the other hook scripts, wal delete scripts can be configured with global configuration options, and is possible to override them on a per server basis: - `pre_wal_delete_script`: _hook script_ executed _before_ the deletion of a WAL file - `pre_wal_delete_retry_script`: _retry hook script_ executed _before_ the deletion of a WAL file, repeatedly until it is successful or aborted - `post_wal_delete_retry_script`: _retry hook script_ executed _after_ the deletion of a WAL file, repeatedly until it is successful or aborted - `post_wal_delete_script`: _hook script_ executed _after_ the deletion of a WAL file The script is executed through a shell and can return any exit code. Only in case of a _retry_ script, Barman checks the return code (see the upper section). WAL delete scripts use the same environmental variables as WAL archive scripts. ### Recovery scripts Version **2.4** introduces pre and post recovery scripts. As previous scripts, recovery scripts can be configured within global configuration options, and is possible to override them on a per server basis: - `pre_recovery_script`: _hook script_ launched _before_ the recovery of a backup, only once, with no check on the exit code - `pre_recovery_retry_script`: _retry hook script_ executed _before_ the recovery of a backup, repeatedly until success or abort - `post_recovery_retry_script`: _retry hook script_ executed _after_ the recovery of a backup, repeatedly until success or abort - `post_recovery_script`: _hook script_ launched _after_ the recovery of a backup, only once, with no check on the exit code The script is executed through a shell and can return any exit code. Only in case of a _retry_ script, Barman checks the return code (see the upper section). Recovery scripts uses the same environmental variables of a backup script, plus: - `BARMAN_DESTINATION_DIRECTORY`: the directory where the new instance is recovered - `BARMAN_TABLESPACES`: tablespace relocation map (JSON, if present) - `BARMAN_REMOTE_COMMAND`: secure shell command used by the recovery (if present) - `BARMAN_RECOVER_OPTIONS`: recovery additional options (JSON, if present) ## Customization ### Lock file directory Barman allows you to specify a directory for lock files through the `barman_lock_directory` global option. Lock files are used to coordinate concurrent work at global and server level (for example, cron operations, backup operations, access to the WAL archive, and so on.). By default (for backward compatibility reasons), `barman_lock_directory` is set to `barman_home`. > **TIP:** > Users are encouraged to use a directory in a volatile partition, > such as the one dedicated to run-time variable data (e.g. > `/var/run/barman`). ### Binary paths As of version 1.6.0, Barman allows users to specify one or more directories where Barman looks for executable files, using the global/server option `path_prefix`. If a `path_prefix` is provided, it must contain a list of one or more directories separated by colon. Barman will search inside these directories first, then in those specified by the `PATH` environment variable. By default the `path_prefix` option is empty. ## Integration with cluster management systems Barman has been designed for integration with standby servers (with streaming replication or traditional file based log shipping) and high availability tools like [repmgr][repmgr]. From an architectural point of view, PostgreSQL must be configured to archive WAL files directly to the Barman server. Barman, thanks to the `get-wal` framework, can also be used as a WAL hub. For this purpose, you can use the `barman-wal-restore` script, part of the `barman-cli` package, with all your standby servers. The `replication-status` command allows you to get information about any streaming client attached to the managed server, in particular hot standby servers and WAL streamers. ## Parallel jobs By default, Barman uses only one worker for file copy during both backup and recover operations. Starting from version 2.2, it is possible to customize the number of workers that will perform file copy. In this case, the files to be copied will be equally distributed among all parallel workers. It can be configured in global and server scopes, adding these in the corresponding configuration file: ``` ini parallel_jobs = n ``` where `n` is the desired number of parallel workers to be used in file copy operations. The default value is 1. In any case, users can override this value at run-time when executing `backup` or `recover` commands. For example, you can use 4 parallel workers as follows: ``` bash barman backup --jobs 4 server1 ``` Or, alternatively: ``` bash barman backup --j 4 server1 ``` Please note that this parallel jobs feature is only available for servers configured through `rsync`/SSH. For servers configured through streaming protocol, Barman will rely on `pg_basebackup` which is currently limited to only one worker. ## Geographical redundancy It is possible to set up **cascading backup architectures** with Barman, where the source of a backup server is a Barman installation rather than a PostgreSQL server. This feature allows users to transparently keep _geographically distributed_ copies of PostgreSQL backups. In Barman jargon, a backup server that is connected to a Barman installation rather than a PostgreSQL server is defined **passive node**. A passive node is configured through the `primary_ssh_command` option, available both at global (for a full replica of a primary Barman installation) and server level (for mixed scenarios, having both _direct_ and _passive_ servers). ### Sync information The `barman sync-info` command is used to collect information regarding the current status of a Barman server that is useful for synchronisation purposes. The available syntax is the following: ``` bash barman sync-info [--primary] [ []] ``` The command returns a JSON object containing: - A map with all the backups having status `DONE` for that server - A list with all the archived WAL files - The configuration for the server - The last read position (in the _xlog database file_) - the name of the last read WAL file The JSON response contains all the required information for the synchronisation between the `master` and a `passive` node. If `--primary` is specified, the command is executed on the defined primary node, rather than locally. ### Configuration Configuring a server as `passive node` is a quick operation. Simply add to the server configuration the following option: ``` ini primary_ssh_command = ssh barman@primary_barman ``` This option specifies the SSH connection parameters to the primary server, identifying the source of the backup data for the passive server. If you are invoking barman with the `-c/--config` option and you want to use the same option when the passive node invokes barman on the primary node then add the following option: ``` ini forward_config_path = true ``` ### Node synchronisation When a node is marked as `passive` it is treated in a special way by Barman: - it is excluded from standard maintenance operations - direct operations to PostgreSQL are forbidden, including `barman backup` Synchronisation between a passive server and its primary is automatically managed by `barman cron` which will transparently invoke: 1. `barman sync-info --primary`, in order to collect synchronisation information 2. `barman sync-backup`, in order to create a local copy of every backup that is available on the primary node 3. `barman sync-wals`, in order to copy locally all the WAL files available on the primary node ### Manual synchronisation Although `barman cron` automatically manages passive/primary node synchronisation, it is possible to manually trigger synchronisation of a backup through: ``` bash barman sync-backup ``` Launching `sync-backup` barman will use the primary_ssh_command to connect to the master server, then if the backup is present on the remote machine, will begin to copy all the files using rsync. Only one single synchronisation process per backup is allowed. WAL files also can be synchronised, through: ``` bash barman sync-wals ``` barman-2.18/doc/manual/00-head.en.md0000644000621200062120000000142214172556763015162 0ustar 00000000000000% Barman Manual % EnterpriseDB UK Limited % January 21, 2022 (2.18) **Barman** (Backup and Recovery Manager) is an open-source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments to reduce risk and help DBAs during the recovery phase. [Barman][11] is distributed under GNU GPL 3 and maintained by [EnterpriseDB][13], a platinum sponsor of the [PostgreSQL project][31]. > **IMPORTANT:** \newline > This manual assumes that you are familiar with theoretical disaster > recovery concepts, and that you have a grasp of PostgreSQL fundamentals in > terms of physical backup and disaster recovery. See section _"Before you start"_ below for details. barman-2.18/doc/manual/22-config_file.en.md0000644000621200062120000000153614172556763016537 0ustar 00000000000000## The server configuration file Create a new file, called `pg.conf`, in `/etc/barman.d` directory, with the following content: ``` ini [pg] description = "Our main PostgreSQL server" conninfo = host=pg user=barman dbname=postgres backup_method = postgres # backup_method = rsync ``` The `conninfo` option is set accordingly to the section _"Preliminary steps: PostgreSQL connection"_. The meaning of the `backup_method` option will be covered in the backup section of this guide. If you plan to use the streaming connection for WAL archiving or to create a backup of your server, you also need a `streaming_conninfo` parameter in your server configuration file: ``` ini streaming_conninfo = host=pg user=streaming_barman dbname=postgres ``` This value must be chosen accordingly as described in the section _"Preliminary steps: PostgreSQL connection"_. barman-2.18/doc/manual/17-configuration.en.md0000644000621200062120000000675414172556763017155 0ustar 00000000000000\newpage # Configuration There are two types of configuration files in Barman: - **global/general configuration** - **server configuration** The main configuration file (set to `/etc/barman.conf` by default) contains general options such as main directory, system user, log file, and so on. Server configuration files, one for each server to be backed up by Barman, are located in the `/etc/barman.d` directory and must have a `.conf` suffix. > **IMPORTANT**: For historical reasons, you can still have one single > configuration file containing both global and server options. However, > for maintenance reasons, this approach is deprecated. Configuration files in Barman follow the _INI_ format. Configuration files accept distinct types of parameters: - string - enum - integer - boolean, `on/true/1` are accepted as well are `off/false/0`. None of them requires to be quoted. > *NOTE*: some `enum` allows `off` but not `false`. ## Options scope Every configuration option has a _scope_: - global - server - global/server: server options that can be generally set at global level Global options are allowed in the _general section_, which is identified in the INI file by the `[barman]` label: ``` ini [barman] ; ... global and global/server options go here ``` Server options can only be specified in a _server section_, which is identified by a line in the configuration file, in square brackets (`[` and `]`). The server section represents the ID of that server in Barman. The following example specifies a section for the server named `pg`: ``` ini [pg] ; Configuration options for the ; server named 'pg' go here ``` There are two reserved words that cannot be used as server names in Barman: - `barman`: identifier of the global section - `all`: a handy shortcut that allows you to execute some commands on every server managed by Barman in sequence Barman implements the **convention over configuration** design paradigm, which attempts to reduce the number of options that you are required to configure without losing flexibility. Therefore, some server options can be defined at global level and overridden at server level, allowing users to specify a generic behavior and refine it for one or more servers. These options have a global/server scope. For a list of all the available configurations and their scope, please refer to [section 5 of the 'man' page][man5]. ``` bash man 5 barman ``` ## Examples of configuration The following is a basic example of main configuration file: ``` ini [barman] barman_user = barman configuration_files_directory = /etc/barman.d barman_home = /var/lib/barman log_file = /var/log/barman/barman.log log_level = INFO compression = gzip ``` The example below, on the other hand, is a server configuration file that uses streaming backup: ``` ini [streaming-pg] description = "Example of PostgreSQL Database (Streaming-Only)" conninfo = host=pg user=barman dbname=postgres streaming_conninfo = host=pg user=streaming_barman backup_method = postgres streaming_archiver = on slot_name = barman ``` The following code shows a basic example of traditional backup using `rsync`/SSH: ``` ini [ssh-pg] description = "Example of PostgreSQL Database (via Ssh)" ssh_command = ssh postgres@pg conninfo = host=pg user=barman dbname=postgres backup_method = rsync parallel_jobs = 1 reuse_backup = link archiver = on ``` For more detailed information, please refer to the distributed `barman.conf` file, as well as the `ssh-server.conf-template` and `streaming-server.conf-template` template files. barman-2.18/doc/manual/16-installation.en.md0000644000621200062120000001367614172556763017007 0ustar 00000000000000\newpage # Installation > **IMPORTANT:** > The recommended way to install Barman is by using the available > packages for your GNU/Linux distribution. ## Installation on RedHat/CentOS using RPM packages Barman can be installed on RHEL7 and RHEL6 Linux systems using RPM packages. It is required to install the Extra Packages Enterprise Linux (EPEL) repository and the [PostgreSQL Global Development Group RPM repository][yumpgdg] beforehand. Official RPM packages for Barman are distributed by EnterpriseDB via Yum through the [public RPM repository][2ndqrpmrepo], by following the instructions you find on that website. Then, as `root` simply type: ``` bash yum install barman ``` > **NOTE: ** > We suggest that you exclude any Barman related packages from getting updated > via the PGDG repository. This can be done by adding the following line > to any PGDG repository definition that is included in the Barman server inside > any `/etc/yum.repos.d/pgdg-*.repo` file: ```ini exclude=barman* python*-barman ``` > By doing this, you solely rely on > EnterpriseDB repositories for package management of Barman software. For historical reasons, EnterpriseDB keeps maintaining package distribution of Barman through [Sourceforge.net][3]. ## Installation on Debian/Ubuntu using packages Barman can be installed on Debian and Ubuntu Linux systems using packages. It is directly available in the official repository for Debian and Ubuntu, however, these repositories might not contain the latest available version. If you want to have the latest version of Barman, the recommended method is to install both these repositories: * [Public APT repository][2ndqdebrepo], directly maintained by Barman developers * the [PostgreSQL Community APT repository][aptpgdg], by following instructions in the [APT section of the PostgreSQL Wiki][aptpgdgwiki] > **NOTE:** > Thanks to the direct involvement of Barman developers in the > PostgreSQL Community APT repository project, you will always have access > to the most updated versions of Barman. Installing Barman is as easy. As `root` user simply type: ``` bash apt-get install barman ``` ## Installation from sources > **WARNING:** > Manual installation of Barman from sources should only be performed > by expert GNU/Linux users. Installing Barman this way requires > system administration activities such as dependencies management, > `barman` user creation, configuration of the `barman.conf` file, > cron setup for the `barman cron` command, log management, and so on. Create a system user called `barman` on the `backup` server. As `barman` user, download the sources and uncompress them. For a system-wide installation, type: ``` bash barman@backup$ ./setup.py build # run this command with root privileges or through sudo barman@backup# ./setup.py install ``` For a local installation, type: ``` bash barman@backup$ ./setup.py install --user ``` The `barman` application will be installed in your user directory ([make sure that your `PATH` environment variable is set properly][setup_user]). [Barman is also available on the Python Package Index (PyPI)][pypi] and can be installed through `pip`. ## PostgreSQL client binaries The following Barman features depend on PostgreSQL client binaries: * [Streaming backup](#streaming-backup) with `backup_method = postgres` (requires `pg_basebackup`) * [Streaming WAL archiving](#wal-streaming) with `streaming_archiver = on` (requires `pg_receivewal` or `pg_receivexlog`) * [Verifying backups](#verify) with `barman verfy-backup` (requires `pg_verifybackup`) These binaries are installed with the PostgreSQL client packages and can be found in the following locations: * On RedHat/CentOS: `/usr/pgsql-${PG_MAJOR_VERSION}/bin` * On Debian/Ubuntu: `/usr/lib/postgresql/${PG_MAJOR_VERSION}/bin` You must ensure that either: 1. The Barman user has the `bin` directory for the appropriate `PG_MAJOR_VERSION` on its path, or: 2. The [path_prefix](#binary-paths) option is set in the Barman configuration for each server and points to the `bin` directory for the appropriate `PG_MAJOR_VERSION`. # Upgrading Barman Barman follows the trunk-based development paradigm, and as such there is only one stable version, the latest. After every commit, Barman goes through thousands of automated tests for each supported PostgreSQL version and on each supported Linux distribution. Also, **every version is back compatible** with previous ones. Thefore, upgrading Barman normally requires a simple update of packages using `yum update` or `apt update`. There have been, however, the following exceptions in our development history, which required some small changes to the configuration. ## Upgrading from Barman 2.10 If you are using `barman-cloud-wal-archive` or `barman-cloud-backup` you need to be aware that from version 2.11 all cloud utilities have been moved into the new `barman-cli-cloud` package. Therefore, you need to ensure that the `barman-cli-cloud` package is properly installed as part of the upgrade to the latest version. If you are not using the above tools, you can upgrade to the latest version as usual. ## Upgrading from Barman 2.X (prior to 2.8) Before upgrading from a version of Barman 2.7 or older users of `rsync` backup method on a primary server should explicitly set `backup_options` to either `concurrent_backup` (recommended for PostgreSQL 9.6 or higher) or `exclusive_backup` (current default), otherwise Barman emits a warning every time it runs. ## Upgrading from Barman 1.X If your Barman installation is 1.X, you need to explicitly configure the archiving strategy. Before, the file based archiver, controlled by `archiver`, was enabled by default. Before you upgrade your Barman installation to the latest version, make sure you add the following line either globally or for any server that requires it: ``` ini archiver = on ``` Additionally, for a few releases, Barman will transparently set `archiver = on` with any server that has not explicitly set an archiving strategy and emit a warning. barman-2.18/doc/manual/55-barman-cli.en.md0000644000621200062120000001320714172556763016304 0ustar 00000000000000\newpage # Barman client utilities (`barman-cli`) Formerly a separate open-source project, `barman-cli` has been merged into Barman's core since version 2.8, and is distributed as an RPM/Debian package. `barman-cli` contains a set of recommended client utilities to be installed alongside the PostgreSQL server: - `barman-wal-archive`: archiving script to be used as `archive_command` as described in the "WAL archiving via `barman-wal-archive`" section; - `barman-wal-restore`: WAL restore script to be used as part of the `restore_command` recovery option on standby and recovery servers, as described in the "`get-wal`" section above; For more detailed information, please refer to the specific man pages or the `--help` option. ## Installation Barman client utilities are normally installed where PostgreSQL is installed. Our recommendation is to install the `barman-cli` package on every PostgreSQL server, being that primary or standby. Please refer to the main "Installation" section to install the repositories. To install the package on RedHat/CentOS system, as `root` type: ``` bash yum install barman-cli ``` On Debian/Ubuntu, as `root` user type: ``` bash apt-get install barman-cli ``` # Barman client utilities for the Cloud (`barman-cli-cloud`) Barman client utilities have been extended to support object storage integration and enhance disaster recovery capabilities of your PostgreSQL databases by relaying WAL files and backups to a supported cloud provider. Supported cloud providers are: * AWS S3 (or any S3 compatible object store) * Azure Blob Storage These utilities are distributed in the `barman-cli-cloud` RPM/Debian package, and can be installed alongside the PostgreSQL server: - `barman-cloud-wal-archive`: archiving script to be used as `archive_command` to directly ship WAL files to cloud storage, bypassing the Barman server; alternatively, as a hook script for WAL archiving (`pre_archive_retry_script`); - `barman-cloud-wal-restore`: script to be used as `restore_command` to fetch WAL files from cloud storage, bypassing the Barman server, and store them directly in the PostgreSQL standby; - `barman-cloud-backup`: backup script to be used to take a local backup directly on the PostgreSQL server and to ship it to a supported cloud provider, bypassing the Barman server; alternatively, as a hook script for copying barman backups to the cloud (`post_backup_retry_script)` - `barman-cloud-backup-delete`: script to be used to delete one or more backups taken with `barman-cloud-backup` from cloud storage and remove associated WALs; - `barman-cloud-backup-keep`: script to be used to flag backups in cloud storage as archival backups - such backups will be kept forever regardless of any retention policies applied; - `barman-cloud-backup-list`: script to be used to list the content of Barman backups taken with `barman-cloud-backup` from cloud storage; - `barman-cloud-restore`: script to be used to restore a backup directly taken with `barman-cloud-backup` from cloud storage; For information on how to setup credentials for the aws-s3 cloud provider please refer to the ["Credentials" section in Boto 3 documentation][boto3creds]. For credentials for the azure-blob-storage cloud provider see the ["Environment variables for authorization parameters" section in the Azure documentation][azure-storage-auth]. The following environment variables are supported: `AZURE_STORAGE_CONNECTION_STRING`, `AZURE_STORAGE_KEY` and `AZURE_STORAGE_SAS_TOKEN`. You can also use the `--credential` option to specify either `azure-cli` or `managed-identity` credentials in order to authenticate via Azure Active Directory. > **WARNING:** Cloud utilities require the appropriate library for the cloud > provider you wish to use - either: [boto3][boto3] or > [azure-storage-blob][azure-storage-blob] and (optionally) > [azure-identity][azure-identity] ## Installation Barman client utilities for the Cloud need to be installed on those PostgreSQL servers that you want to direcly backup to a cloud provider, bypassing Barman. In case you want to use `barman-cloud-backup` and/or `barman-cloud-wal-archive` as hook scripts, you can install the `barman-cli-cloud` package on the Barman server also. Please refer to the main "Installation" section to install the repositories. To install the package on RedHat/CentOS system, as `root` type: ``` bash yum install barman-cli-cloud ``` On Debian/Ubuntu, as `root` user type: ``` bash apt-get install barman-cli-cloud ``` ## barman-cloud hook scripts Install the `barman-cli-cloud` package on the Barman server as described above. Configure `barman-cloud-backup` as a post backup script by adding the following to the Barman configuration for a PostgreSQL server: ``` post_backup_retry_script = 'barman-cloud-backup [*OPTIONS*] *DESTINATION_URL* ${BARMAN_SERVER} ``` > **WARNING:** When running as a hook script barman-cloud-backup requires that > the status of the backup is DONE and it will fail if the backup has any other > status. For this reason it is recommended backups are run with the > `-w / --wait` option so that the hook script is not executed while a > backup has status `WAITING_FOR_WALS`. Configure `barman-cloud-wal-archive` as a pre WAL archive script by adding the following to the Barman configuration for a PostgreSQL server: ``` pre_archive_retry_script = 'barman-cloud-wal-archive [*OPTIONS*] *DESTINATION_URL* ${BARMAN_SERVER}' ``` ## Selecting a cloud provider Use the `--cloud-provider` option to choose the cloud provider for your backups and WALs. This can be set to one of the following: * `aws-s3` [DEFAULT]: AWS S3 or S3-compatible object store. * `azure-blob-storage`: Azure Blob Storage service. barman-2.18/doc/manual/20-server_setup.en.md0000644000621200062120000000146714172556763017022 0ustar 00000000000000\newpage # Setup of a new server in Barman As mentioned in the _"Design and architecture"_ section, we will use the following conventions: - `pg` as server ID and host name where PostgreSQL is installed - `backup` as host name where Barman is located - `barman` as the user running Barman on the `backup` server (identified by the parameter `barman_user` in the configuration) - `postgres` as the user running PostgreSQL on the `pg` server > **IMPORTANT:** a server in Barman must refer to the same PostgreSQL > instance for the whole backup and recoverability history (i.e. the > same system identifier). **This means that if you perform an upgrade > of the instance (using for example `pg_upgrade`, you must not reuse > the same server definition in Barman, rather use another one as they > have nothing in common.** barman-2.18/doc/manual/24-wal_archiving.en.md0000644000621200062120000001150214172556763017104 0ustar 00000000000000## WAL archiving via `archive_command` The `archive_command` is the traditional method to archive WAL files. The value of this PostgreSQL configuration parameter must be a shell command to be executed by the PostgreSQL server to copy the WAL files to the Barman incoming directory. This can be done in two ways, both requiring a SSH connection: - via `barman-wal-archive` utility (from Barman 2.6) - via rsync/SSH (common approach before Barman 2.6) See sections below for more details. > **IMPORTANT:** PostgreSQL 9.5 introduced support for WAL file > archiving using `archive_command` from a standby. Read the > "Concurrent Backup and backup from a standby" section for more > detailed information on how Barman supports this feature. ### WAL archiving via `barman-wal-archive` From Barman 2.6, the **recommended way** to safely and reliably archive WAL files to Barman via `archive_command` is to use the `barman-wal-archive` command contained in the `barman-cli` package, distributed via EnterpriseDB public repositories and available under GNU GPL 3 licence. `barman-cli` must be installed on each PostgreSQL server that is part of the Barman cluster. Using `barman-wal-archive` instead of rsync/SSH reduces the risk of data corruption of the shipped WAL file on the Barman server. When using rsync/SSH as `archive_command` a WAL file, there is no mechanism that guarantees that the content of the file is flushed and fsync-ed to disk on destination. For this reason, we have developed the `barman-wal-archive` utility that natively communicates with Barman's `put-wal` command (introduced in 2.6), which is responsible to receive the file, fsync its content and place it in the proper `incoming` directory for that server. Therefore, `barman-wal-archive` reduces the risk of copying a WAL file in the wrong location/directory in Barman, as the only parameter to be used in the `archive_command` is the server's ID. For more information on the `barman-wal-archive` command, type `man barman-wal-archive` on the PostgreSQL server. You can check that `barman-wal-archive` can connect to the Barman server, and that the required PostgreSQL server is configured in Barman to accept incoming WAL files with the following command: ``` bash barman-wal-archive --test backup pg DUMMY ``` Where `backup` is the host where Barman is installed, `pg` is the name of the PostgreSQL server as configured in Barman and DUMMY is a placeholder (`barman-wal-archive` requires an argument for the WAL file name, which is ignored). Edit the `postgresql.conf` file of the PostgreSQL instance on the `pg` database, activate the archive mode and set `archive_command` to use `barman-wal-archive`: ``` ini archive_mode = on wal_level = 'replica' archive_command = 'barman-wal-archive backup pg %p' ``` Then restart the PostgreSQL server. ### WAL archiving via rsync/SSH You can retrieve the incoming WALs directory using the `show-servers` Barman command and looking for the `incoming_wals_directory` value: ``` bash barman@backup$ barman show-servers pg |grep incoming_wals_directory incoming_wals_directory: /var/lib/barman/pg/incoming ``` Edit the `postgresql.conf` file of the PostgreSQL instance on the `pg` database and activate the archive mode: ``` ini archive_mode = on wal_level = 'replica' archive_command = 'rsync -a %p barman@backup:INCOMING_WALS_DIRECTORY/%f' ``` Make sure you change the `INCOMING_WALS_DIRECTORY` placeholder with the value returned by the `barman show-servers pg` command above. Restart the PostgreSQL server. In some cases, you might want to add stricter checks to the `archive_command` process. For example, some users have suggested the following one: ``` ini archive_command = 'test $(/bin/hostname --fqdn) = HOSTNAME \ && rsync -a %p barman@backup:INCOMING_WALS_DIRECTORY/%f' ``` Where the `HOSTNAME` placeholder should be replaced with the value returned by `hostname --fqdn`. This _trick_ is a safeguard in case the server is cloned and avoids receiving WAL files from recovered PostgreSQL instances. ## Verification of WAL archiving configuration In order to test that continuous archiving is on and properly working, you need to check both the PostgreSQL server and the backup server. In particular, you need to check that WAL files are correctly collected in the destination directory. For this purpose and to facilitate the verification of the WAL archiving process, the `switch-wal` command has been developed: ``` bash barman@backup$ barman switch-wal --force --archive pg ``` The above command will force PostgreSQL to switch WAL file and trigger the archiving process in Barman. Barman will wait for one file to arrive within 30 seconds (you can change the timeout through the `--archive-timeout` option). If no WAL file is received, an error is returned. You can verify if the WAL archiving has been correctly configured using the `barman check` command. barman-2.18/doc/manual/27-windows-support.en.md0000644000621200062120000000246714172556763017510 0ustar 00000000000000## How to setup a Windows based server You can backup a PostgreSQL server running on Windows using the streaming connection for both WAL archiving and for backups. > **IMPORTANT:** This feature is still experimental because it is not > yet part of our continuous integration system. Follow every step discussed previously for a streaming connection setup. > **WARNING:**: At this moment, `pg_basebackup` interoperability from > Windows to Linux is still experimental. If you are having issues > taking a backup from a Windows server and your PostgreSQL locale is > not in English, a possible workaround for the issue is instructing > your PostgreSQL to emit messages in English. You can do this by > putting the following parameter in your `postgresql.conf` file: > > ``` ini > lc_messages = 'English' > ``` > > This has been reported to fix the issue. You can backup your server as usual. Remote recovery is not supported for Windows servers, so you must recover your cluster locally in the Barman server and then copy all the files on a Windows server or use a folder shared between the PostgreSQL server and the Barman server. Additionally, make sure that the system user chosen to run PostgreSQL has the permission needed to access the restored data. Basically, it must have full control over the PostgreSQL data directory. barman-2.18/doc/manual/42-server-commands.en.md0000644000621200062120000002344014172556763017400 0ustar 00000000000000\newpage # Server commands As we said in the previous section, server commands work directly on a PostgreSQL server or on its area in Barman, and are useful to check its status, perform maintenance operations, take backups, and manage the WAL archive. ## `archive-wal` The `archive-wal` command execute maintenance operations on WAL files for a given server. This operations include processing of the WAL files received from the streaming connection or from the `archive_command` or both. > **IMPORTANT:** > The `archive-wal` command, even if it can be directly invoked, is > designed to be started from the `cron` general command. ## `backup` The `backup` command takes a full backup (_base backup_) of a given server. It has several options that let you override the corresponding configuration parameter for the new backup. For more information, consult the manual page. You can perform a full backup for a given server with: ``` bash barman backup ``` > **TIP:** > You can use `barman backup all` to sequentially backup all your > configured servers. ## `check` You can check the connection to a given server and the configuration coherence with the `check` command: ``` bash barman check ``` > **TIP:** > You can use `barman check all` to check all your configured servers. > **IMPORTANT:** > The `check` command is probably the most critical feature that > Barman implements. We recommend to integrate it with your alerting > and monitoring infrastructure. The `--nagios` option allows you > to easily create a plugin for Nagios/Icinga. ## `generate-manifest` This command is useful when backup is created remotely and pg_basebackup is not involved and `backup_manifest` file does not exist in backup. It will generate `backup_manifest` file from backup_id using backup in barman server. If the file already exist, generation command will abort. Command example: ```bash barman generate-manifest ``` Either backup_id [backup id shortcuts]{#backup-id-shortcuts} can be used. This command can also be used as post_backup hook script as follows: ```bash post_backup_script=barman generate-manifest ${BARMAN_SERVER} ${BARMAN_BACKUP_ID} ``` ## `get-wal` Barman allows users to request any _xlog_ file from its WAL archive through the `get-wal` command: ``` bash barman get-wal [-o OUTPUT_DIRECTORY][-j|-x] ``` If the requested WAL file is found in the server archive, the uncompressed content will be returned to `STDOUT`, unless otherwise specified. The following options are available for the `get-wal` command: - `-o` allows users to specify a destination directory where Barman will deposit the requested WAL file - `-j` will compress the output using `bzip2` algorithm - `-x` will compress the output using `gzip` algorithm - `-p SIZE` peeks from the archive up to WAL files, starting from the requested file It is possible to use `get-wal` during a recovery operation, transforming the Barman server into a _WAL hub_ for your servers. This can be automatically achieved by adding the `get-wal` value to the `recovery_options` global/server configuration option: ``` ini recovery_options = 'get-wal' ``` `recovery_options` is a global/server option that accepts a list of comma separated values. If the keyword `get-wal` is present during a recovery operation, Barman will prepare the recovery configuration by setting the `restore_command` so that `barman get-wal` is used to fetch the required WAL files. Similarly, one can use the `--get-wal` option for the `recover` command at run-time. This is an example of a `restore_command` for a local recovery: ``` ini restore_command = 'sudo -u barman barman get-wal SERVER %f > %p' ``` Please note that the `get-wal` command should always be invoked as `barman` user, and that it requires the correct permission to read the WAL files from the catalog. This is the reason why we are using `sudo -u barman` in the example. Setting `recovery_options` to `get-wal` for a remote recovery will instead generate a `restore_command` using the `barman-wal-restore` script. `barman-wal-restore` is a more resilient shell script which manages SSH connection errors. This script has many useful options such as the automatic compression and decompression of the WAL files and the *peek* feature, which allows you to retrieve the next WAL files while PostgreSQL is applying one of them. It is an excellent way to optimise the bandwidth usage between PostgreSQL and Barman. `barman-wal-restore` is available in the `barman-cli` package. This is an example of a `restore_command` for a remote recovery: ``` ini restore_command = 'barman-wal-restore -U barman backup SERVER %f %p' ``` Since it uses SSH to communicate with the Barman server, SSH key authentication is required for the `postgres` user to login as `barman` on the backup server. You can check that `barman-wal-restore` can connect to the Barman server, and that the required PostgreSQL server is configured in Barman to send WAL files with the following command: ``` bash barman-wal-restore --test backup pg DUMMY DUMMY ``` Where `backup` is the host where Barman is installed, `pg` is the name of the PostgreSQL server as configured in Barman and DUMMY is a placeholder (`barman-wal-restore` requires two argument for the WAL file name and destination directory, which are ignored). For more information on the `barman-wal-restore` command, type `man barman-wal-restore` on the PostgreSQL server. ## `list-backups` You can list the catalog of available backups for a given server with: ``` bash barman list-backups ``` > **TIP:** You can request a full list of the backups of all servers > using `all` as the server name. To have a machine-readable output you can use the `--minimal` option. ## `rebuild-xlogdb` At any time, you can regenerate the content of the WAL archive for a specific server (or every server, using the `all` shortcut). The WAL archive is contained in the `xlog.db` file and every server managed by Barman has its own copy. The `xlog.db` file can be rebuilt with the `rebuild-xlogdb` command. This will scan all the archived WAL files and regenerate the metadata for the archive. For example: ``` bash barman rebuild-xlogdb ``` ## `receive-wal` This command manages the `receive-wal` process, which uses the streaming protocol to receive WAL files from the PostgreSQL streaming connection. ### receive-wal process management If the command is run without options, a `receive-wal` process will be started. This command is based on the `pg_receivewal` PostgreSQL command. ``` bash barman receive-wal ``` > **NOTE:** > The `receive-wal` command is a foreground process. If the command is run with the `--stop` option, the currently running `receive-wal` process will be stopped. The `receive-wal` process uses a status file to track last written record of the transaction log. When the status file needs to be cleaned, the `--reset` option can be used. > **IMPORTANT:** If you are not using replication slots, you rely > on the value of `wal_keep_segments` (or `wal_keep_size` from > PostgreSQL version 13.0 onwards). Be aware that under high peaks > of workload on the database, the `receive-wal` process > might fall behind and go out of sync. As a precautionary measure, > Barman currently requires that users manually execute the command with the > `--reset` option, to avoid making wrong assumptions. ### Replication slot management The `receive-wal` process is also useful to create or drop the replication slot needed by Barman for its WAL archiving procedure. With the `--create-slot` option, the replication slot named after the `slot_name` configuration option will be created on the PostgreSQL server. With the `--drop-slot`, the previous replication slot will be deleted. ## `replication-status` The `replication-status` command reports the status of any streaming client currently attached to the PostgreSQL server, including the `receive-wal` process of your Barman server (if configured). You can execute the command as follows: ``` bash barman replication-status ``` > **TIP:** You can request a full status report of the replica > for all your servers using `all` as the server name. To have a machine-readable output you can use the `--minimal` option. ## `show-servers` You can show the configuration parameters for a given server with: ``` bash barman show-servers ``` > **TIP:** you can request a full configuration report using `all` as > the server name. ## `status` The `status` command shows live information and status of a PostgreSQL server or of all servers if you use `all` as server name. ``` bash barman status ``` ## `switch-wal` This command makes the PostgreSQL server switch to another transaction log file (WAL), allowing the current log file to be closed, received and then archived. ``` bash barman switch-wal ``` If there has been no transaction activity since the last transaction log file switch, the switch needs to be forced using the `--force` option. The `--archive` option requests Barman to trigger WAL archiving after the xlog switch. By default, a 30 seconds timeout is enforced (this can be changed with `--archive-timeout`). If no WAL file is received, an error is returned. > **NOTE:** In Barman 2.1 and 2.2 this command was called `switch-xlog`. > It has been renamed for naming consistency with PostgreSQL 10 and higher. ## `verify` The `verify` command uses backup_manifest file from backup and runs `pg_verifybackup` against it. ```bash barman verify ``` This command will call `pg_verifybackup -n` (available on PG>=13) `pg_verifybackup` Must be installed on backup server. For rsync backups, it can be used with `generate-manifest` command. Either backup_id [backup id shortcuts]{#backup-id-shortcuts} can be used. barman-2.18/doc/manual/15-system_requirements.en.md0000644000621200062120000000451014172556763020417 0ustar 00000000000000\newpage # System requirements - Linux/Unix - Python >= 3.4 - Python modules: - argcomplete - psycopg2 >= 2.4.2 - python-dateutil - setuptools - PostgreSQL >= 8.3 - rsync >= 3.0.4 (optional for PostgreSQL >= 9.2) > **IMPORTANT:** > Users of RedHat Enterprise Linux, CentOS and Scientific Linux are > required to install the > [Extra Packages Enterprise Linux (EPEL) repository][epel]. > **NOTE:** > Support for Python 2.6 and 2.7 is deprecated and will be discontinued in future releases. > Support for PostgreSQL < 9.4 is deprecated and will be discontinued in future releases. ## Requirements for backup The most critical requirement for a Barman server is the amount of disk space available. You are recommended to plan the required disk space based on the size of the cluster, number of WAL files generated per day, frequency of backups, and retention policies. Although the only file systems that we officially support are XFS and Ext4, we are aware of users that deploy Barman on different file systems including ZFS and NFS. ## Requirements for recovery Barman allows you to recover a PostgreSQL instance either locally (where Barman resides) or remotely (on a separate server). Remote recovery is definitely the most common way to restore a PostgreSQL server with Barman. Either way, the same [requirements for PostgreSQL's Log shipping and Point-In-Time-Recovery apply][requirements_recovery]: - identical hardware architecture - identical major version of PostgreSQL In general, it is **highly recommended** to create recovery environments that are as similar as possible, if not identical, to the original server, because they are easier to maintain. For example, we suggest that you use the same operating system, the same PostgreSQL version, the same disk layouts, and so on. Additionally, dedicated recovery environments for each PostgreSQL server, even on demand, allows you to nurture the disaster recovery culture in your team. You can be prepared for when something unexpected happens by practising recovery operations and becoming familiar with them. Based on our experience, designated recovery environments reduce the impact of stress in real failure situations, and therefore increase the effectiveness of recovery operations. Finally, it is important that time is synchronised between the servers, using NTP for example. barman-2.18/doc/manual/43-backup-commands.en.md0000644000621200062120000002715114172556763017343 0ustar 00000000000000\newpage # Backup commands Backup commands are those that works directly on backups already existing in Barman's backup catalog. > **NOTE:** > Remember a backup ID can be retrieved with `barman list-backups > ` ## Backup ID shortcuts Barman allows you to use special keywords to identify a specific backup: * `last/latest`: identifies the newest backup in the catalog * `first/oldest`: identifies the oldest backup in the catalog * `last-failed`: identifies the newest failed backup in the catalog Using those keywords with Barman commands allows you to execute actions without knowing the exact ID of a backup for a server. For example we can issue: ``` bash barman delete oldest ``` to remove the oldest backup available in the catalog and reclaim disk space. ## `check-backup` Starting with version 2.5, you can check that all required WAL files for the consistency of a full backup have been correctly archived by `barman` with the `check-backup` command: ``` bash barman check-backup ``` > **IMPORTANT:** > This command is automatically invoked by `cron` and at the end of a > `backup` operation. This means that, under normal circumstances, > you should never need to execute it. In case one or more WAL files from the start to the end of the backup have not been archived yet, `barman` will label the backup as `WAITING_FOR_WALS`. The `cron` command will continue to check that missing WAL files are archived, then label the backup as `DONE`. In case the first required WAL file is missing at the end of the backup, such backup will be marked as `FAILED`. It is therefore important that you verify that WAL archiving (whether via streaming or `archive_command`) is properly working before executing a backup operation - especially when backing up from a standby server. Barman 2.10 introduces the `-w`/`--wait` option for the `backup` command. When set, Barman temporarily saves the state of the backup to `WAITING_FOR_WALS`, then waits for all the required WAL files to be archived before setting the state to `DONE` and proceeding with post-backup hook scripts. ## `delete` You can delete a given backup with: ``` bash barman delete ``` The `delete` command accepts any [shortcut](#backup-id-shortcuts) to identify backups. ## `keep` If you have a backup which you wish to keep beyond the retention policy of the server then you can make it an archival backup with: ```bash barman keep [--target TARGET, --status, --release] ``` Possible values for `TARGET` are: - `full`: The backup can always be used to recover to the latest point in time. To achieve this, Barman will retain all WALs needed to ensure consistency of the backup and all subsequent WALs. - `standalone`: The backup can only be used to recover the server to its state at the time the backup was taken. Barman will only retain the WALs needed to ensure consistency of the backup. If the `--status` option is provided then Barman will report the archival status of the backup. This will either be the recovery target of `full` or `standalone` for archival backups or `nokeep` for backups which have not been flagged as archival. If the `--release` option is provided then Barman will release the keep flag from this backup. This will remove its archival status and make it available for deletion, either directly or by retention policy. Once a backup has been flagged as an archival backup, the behaviour of Barman will change as follows: - Attempts to delete that backup by ID using `barman delete` will fail. - Retention policies will never consider that backup as `OBSOLETE` and therefore `barman cron` will never delete that backup. - The WALs required by that backup will be retained forever. If the specified recovery target is `full` then *all* subsequent WALs will also be retained. This can be reverted by removing the keep flag with `barman keep --release`. > **WARNING:** Once a `standalone` archival backup is not required by the > retention policy of a server `barman cron` will remove the WALs between > that backup and the begin_wal value of the next most recent backup. This > means that while it is safe to change the target from `full` to `standalone`, > it is *not* safe to change the target from `standalone` to `full` because > there is no guarantee the necessary WALs for a recovery to the latest point > in time will still be available. ## `list-files` You can list the files (base backup and required WAL files) for a given backup with: ``` bash barman list-files [--target TARGET_TYPE] ``` With the `--target TARGET_TYPE` option, it is possible to choose the content of the list for a given backup. Possible values for `TARGET_TYPE` are: - `data`: lists the data files - `standalone`: lists the base backup files, including required WAL files - `wal`: lists all WAL files from the beginning of the base backup to the start of the following one (or until the end of the log) - `full`: same as `data` + `wal` The default value for `TARGET_TYPE` is `standalone`. > **IMPORTANT:** > The `list-files` command facilitates interaction with external > tools, and can therefore be extremely useful to integrate > Barman into your archiving procedures. ## `recover` The `recover` command is used to recover a whole server after a backup is executed using the `backup` command. This is achieved issuing a command like the following: ```bash barman@backup$ barman recover /path/to/recover/dir ``` > **IMPORTANT:** > Do not issue a `recover` command using a target data directory where > a PostgreSQL instance is running. In that case, remember to stop it > before issuing the recovery. This applies also to tablespace directories. At the end of the execution of the recovery, the selected backup is recovered locally and the destination path contains a data directory ready to be used to start a PostgreSQL instance. > **IMPORTANT:** > Running this command as user `barman`, it will become the database superuser. The specific ID of a backup can be retrieved using the [list-backups](#list-backups) command. > **IMPORTANT:** > Barman does not currently keep track of symbolic links inside PGDATA > (except for tablespaces inside pg_tblspc). We encourage > system administrators to keep track of symbolic links and to add them > to the disaster recovery plans/procedures in case they need to be restored > in their original location. The recovery command has several options that modify the command behavior. ### Remote recovery Add the `--remote-ssh-command ` option to the invocation of the recovery command. Doing this will allow Barman to execute the copy on a remote server, using the provided command to connect to the remote host. > **NOTE:** > It is advisable to use the `postgres` user to perform > the recovery on the remote host. > **IMPORTANT:** > Do not issue a `recover` command using a target data directory where > a PostgreSQL instance is running. In that case, remember to stop it > before issuing the recovery. This applies also to tablespace directories. Known limitations of the remote recovery are: * Barman requires at least 4GB of free space in the system temporary directory unless the [`get-wal`](#get-wal) command is specified in the `recovery_option` parameter in the Barman configuration. * The SSH connection between Barman and the remote host **must** use the public key exchange authentication method * The remote user **must** be able to create the directory structure of the backup in the destination directory. * There must be enough free space on the remote server to contain the base backup and the WAL files needed for recovery. ### Tablespace remapping Barman is able to automatically remap one or more tablespaces using the recover command with the --tablespace option. The option accepts a pair of values as arguments using the `NAME:DIRECTORY` format: * `NAME` is the identifier of the tablespace * `DIRECTORY` is the new destination path for the tablespace If the destination directory does not exists, Barman will try to create it (assuming you have the required permissions). ### Point in time recovery Barman wraps PostgreSQL's Point-in-Time Recovery (PITR), allowing you to specify a recovery target, either as a timestamp, as a restore label, or as a transaction ID. > **IMPORTANT:** > The earliest PITR for a given backup is the end of the base > backup itself. If you want to recover at any point in time > between the start and the end of a backup, you must use > the previous backup. From Barman 2.3 you can exit recovery > when consistency is reached by using `--target-immediate` option > (available only for PostgreSQL 9.4 and newer). The recovery target can be specified using one of the following mutually exclusive options: * `--target-time TARGET_TIME`: to specify a timestamp * `--target-xid TARGET_XID`: to specify a transaction ID * `--target-lsn TARGET_LSN`: to specify a Log Sequence Number (LSN) - requires PostgreSQL 10 or higher * `--target-name TARGET_NAME`: to specify a named restore point previously created with the pg_create_restore_point(name) function[^TARGET_NAME] * `--target-immediate`: recovery ends when a consistent state is reached (that is the end of the base backup process) [^RECOVERY_TARGET_IMMEDIATE] > **IMPORTANT:** > Recovery target via _time_, _XID_ and LSN **must be** subsequent to the > end of the backup. If you want to recover to a point in time between > the start and the end of a backup, you must recover from the > previous backup in the catalogue. [^TARGET_NAME]: Only available on PostgreSQL 9.1 and above [^RECOVERY_TARGET_IMMEDIATE]: Only available on PostgreSQL 9.4 and above You can use the `--exclusive` option to specify whether to stop immediately before or immediately after the recovery target. Barman allows you to specify a target timeline for recovery, using the `target-tli` option. The notion of timeline goes beyond the scope of this document; you can find more details in the PostgreSQL documentation, as mentioned in the _"Before you start"_ section. Barman 2.4 introduces support for `--target-action` option, accepting the following values: * `shutdown`: once recovery target is reached, PostgreSQL is shut down [^TARGET_SHUTDOWN] * `pause`: once recovery target is reached, PostgreSQL is started in pause state, allowing users to inspect the instance [^TARGET_PAUSE] * `promote`: once recovery target is reached, PostgreSQL will exit recovery and is promoted as a master [^TARGET_PROMOTE] > **IMPORTANT:** > By default, no target action is defined (for back compatibility). > The `--target-action` option requires a Point In Time Recovery target > to be specified. [^TARGET_SHUTDOWN]: Only available on PostgreSQL 9.5 and above [^TARGET_PAUSE]: Only available on PostgreSQL 9.1 and above [^TARGET_PROMOTE]: Only available on PostgreSQL 9.5 and above For more detailed information on the above settings, please consult the [PostgreSQL documentation on recovery target settings][target]. Barman 2.4 also adds the `--standby-mode` option for the `recover` command which, if specified, properly configures the recovered instance as a standby by creating a `standby.signal` file (from PostgreSQL 12) or by adding `standby_mode = on` to the generated recovery configuration. Further information on _standby mode_ is available in the PostgreSQL documentation. ## `show-backup` You can retrieve all the available information for a particular backup of a given server with: ``` bash barman show-backup ``` The `show-backup` command accepts any [shortcut](#backup-id-shortcuts) to identify backups. barman-2.18/doc/manual/66-about.en.md0000644000621200062120000000706514172556763015420 0ustar 00000000000000\newpage # The Barman project ## Support and sponsor opportunities Barman is free software, written and maintained by EnterpriseDB. If you require support on using Barman, or if you need new features, please get in touch with EnterpriseDB. You can sponsor the development of new features of Barman and PostgreSQL which will be made publicly available as open source. For further information, please visit: - [Barman website][11] - [Support section][12] - [EnterpriseDB website][13] - [Barman FAQs][14] - [2ndQuadrant blog: Barman][15] ## Contributing to Barman EnterpriseDB has a team of software engineers, architects, database administrators, system administrators, QA engineers, developers and managers that dedicate their time and expertise to improve Barman's code. We adopt lean and agile methodologies for software development, and we believe in the _devops_ culture that allowed us to implement rigorous testing procedures through cross-functional collaboration. Every Barman commit is the contribution of multiple individuals, at different stages of the production pipeline. Even though this is our preferred way of developing Barman, we gladly accept patches from external developers, as long as: - user documentation (tutorial and man pages) is provided. - source code is properly documented and contains relevant comments. - code supplied is covered by unit tests. - no unrelated feature is compromised or broken. - source code is rebased on the current master branch. - commits and pull requests are limited to a single feature (multi-feature patches are hard to test and review). - changes to the user interface are discussed beforehand with EnterpriseDB. We also require that any contributions provide a copyright assignment and a disclaimer of any work-for-hire ownership claims from the employer of the developer. You can use Github's pull requests system for this purpose. ## Authors In alphabetical order: * Abhijit Menon-Sen * Jane Threefoot * Michael Wallace Past contributors (in alphabetical order): * Anna Bellandi (QA/testing) * Britt Cole (documentation reviewer) * Carlo Ascani (developer) * Francesco Canovai (QA/testing) * Gabriele Bartolini (architect) * Gianni Ciolli (QA/testing) * Giulio Calacoci (developer) * Giuseppe Broccolo (developer) * Jonathan Battiato (QA/testing) * Leonardo Cecchi (developer) * Marco Nenciarini (project leader) * Niccolò Fei (QA/testing) * Rubens Souza (QA/testing) * Stefano Bianucci (developer) ## Links - [check-barman][16]: a Nagios plugin for Barman, written by Holger Hamann (MIT license) - [puppet-barman][17]: Barman module for Puppet (GPL) - [Tutorial on "How To Back Up, Restore, and Migrate PostgreSQL Databases with Barman on CentOS 7"][26], by Sadequl Hussain (available on DigitalOcean Community) - [BarmanAPI][27]: RESTFul API for Barman, written by Mehmet Emin Karakaş (GPL) ## License and Contributions Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License 3. © Copyright EnterpriseDB UK Limited 2011-2022 Barman has been partially funded through [4CaaSt][18], a research project funded by the European Commission's Seventh Framework programme. Contributions to Barman are welcome, and will be listed in the `AUTHORS` file. EnterpriseDB UK Limited requires that any contributions provide a copyright assignment and a disclaimer of any work-for-hire ownership claims from the employer of the developer. This lets us make sure that all of the Barman distribution remains free code. Please contact barman@enterprisedb.com for a copy of the relevant Copyright Assignment Form. barman-2.18/doc/manual/10-design.en.md0000644000621200062120000002564514172556763015550 0ustar 00000000000000\newpage # Design and architecture ## Where to install Barman One of the foundations of Barman is the ability to operate remotely from the database server, via the network. Theoretically, you could have your Barman server located in a data centre in another part of the world, thousands of miles away from your PostgreSQL server. Realistically, you do not want your Barman server to be too far from your PostgreSQL server, so that both backup and recovery times are kept under control. Even though there is no _"one size fits all"_ way to setup Barman, there are a couple of recommendations that we suggest you abide by, in particular: - Install Barman on a dedicated server - Do not share the same storage with your PostgreSQL server - Integrate Barman with your monitoring infrastructure [^nagios] - Test everything before you deploy it to production [^nagios]: Integration with Nagios/Icinga is straightforward thanks to the `barman check --nagios` command, one of the most important features of Barman and a true lifesaver. A reasonable way to start modelling your disaster recovery architecture is to: - design a couple of possible architectures in respect to PostgreSQL and Barman, such as: 1. same data centre 2. different data centre in the same metropolitan area 3. different data centre - elaborate the pros and the cons of each hypothesis - evaluate the single points of failure (SPOF) of your system, with cost-benefit analysis - make your decision and implement the initial solution Having said this, a very common setup for Barman is to be installed in the same data centre where your PostgreSQL servers are. In this case, the single point of failure is the data centre. Fortunately, the impact of such a SPOF can be alleviated thanks to two features that Barman provides to increase the number of backup tiers: 1. **geographical redundancy** (introduced in Barman 2.6) 2. **hook scripts** With _geographical redundancy_, you can rely on a Barman instance that is located in a different data centre/availability zone to synchronise the entire content of the source Barman server. There's more: given that geo-redundancy can be configured in Barman not only at global level, but also at server level, you can create _hybrid installations_ of Barman where some servers are directly connected to the local PostgreSQL servers, and others are backing up subsets of different Barman installations (_cross-site backup_). Figure \ref{georedundancy-design} below shows two availability zones (one in Europe and one in the US), each with a primary PostgreSQL server that is backed up in a local Barman installation, and relayed on the other Barman server (defined as _passive_) for multi-tier backup via rsync/SSH. Further information on geo-redundancy is available in the specific section. ![An example of architecture with geo-redundancy\label{georedundancy-design}](../images/barman-architecture-georedundancy.png){ width=80% } Thanks to _hook scripts_ instead, backups of Barman can be exported on different media, such as _tape_ via `tar`, or locations, like an _S3 bucket_ in the Amazon cloud. Remember that no decision is forever. You can start this way and adapt over time to the solution that suits you best. However, try and keep it simple to start with. ## One Barman, many PostgreSQL servers Another relevant feature that was first introduced by Barman is support for multiple servers. Barman can store backup data coming from multiple PostgreSQL instances, even with different versions, in a centralised way. [^recver] [^recver]: The same [requirements for PostgreSQL's PITR][requirements_recovery] apply for recovery, as detailed in the section _"Requirements for recovery"_. As a result, you can model complex disaster recovery architectures, forming a "star schema", where PostgreSQL servers rotate around a central Barman server. Every architecture makes sense in its own way. Choose the one that resonates with you, and most importantly, the one you trust, based on real experimentation and testing. From this point forward, for the sake of simplicity, this guide will assume a basic architecture: - one PostgreSQL instance (with host name `pg`) - one backup server with Barman (with host name `backup`) ## Streaming backup vs rsync/SSH Traditionally, Barman has always operated remotely via SSH, taking advantage of `rsync` for physical backup operations. Version 2.0 introduces native support for PostgreSQL's streaming replication protocol for backup operations, via `pg_basebackup`. [^fmatrix] [^fmatrix]: Check in the "Feature matrix" which PostgreSQL versions support streaming replication backups with Barman. Choosing one of these two methods is a decision you will need to make. On a general basis, starting from Barman 2.0, backup over streaming replication is the recommended setup for PostgreSQL 9.4 or higher. Moreover, if you do not make use of tablespaces, backup over streaming can be used starting from PostgreSQL 9.2. > **IMPORTANT:** \newline > Because Barman transparently makes use of `pg_basebackup`, features such as incremental backup, parallel backup, deduplication, and network compression are currently not available. In this case, bandwidth limitation has some restrictions - compared to the traditional method via `rsync`. Traditional backup via `rsync`/SSH is available for all versions of PostgreSQL starting from 8.3, and it is recommended in all cases where `pg_basebackup` limitations occur (for example, a very large database that can benefit from incremental backup and deduplication). The reason why we recommend streaming backup is that, based on our experience, it is easier to setup than the traditional one. Also, streaming backup allows you to backup a PostgreSQL server on Windows[^windows], and makes life easier when working with Docker. [^windows]: Backup of a PostgreSQL server on Windows is possible, but it is still experimental because it is not yet part of our continuous integration system. See section _"How to setup a Windows based server"_ for details. ## Standard archiving, WAL streaming ... or both PostgreSQL's Point-In-Time-Recovery requires that transactional logs, also known as _xlog_ or WAL files, are stored alongside of base backups. Traditionally, Barman has supported standard WAL file shipping through PostgreSQL's `archive_command` (usually via `rsync`/SSH, now via `barman-wal-archive` from the `barman-cli` package). With this method, WAL files are archived only when PostgreSQL _switches_ to a new WAL file. To keep it simple, this normally happens every 16MB worth of data changes. Barman 1.6.0 introduces streaming of WAL files for PostgreSQL servers 9.2 or higher, as an additional method for transactional log archiving, through `pg_receivewal` (also known as `pg_receivexlog` before PostgreSQL 10). WAL streaming is able to reduce the risk of data loss, bringing RPO down to _near zero_ values. Barman 2.0 introduces support for replication slots with PostgreSQL servers 9.4 or above, therefore allowing WAL streaming-only configurations. Moreover, you can now add Barman as a synchronous WAL receiver in your PostgreSQL 9.5 (or higher) cluster, and achieve **zero data loss** (RPO=0). In some cases you have no choice and you are forced to use traditional archiving. In others, you can choose whether to use both or just WAL streaming. Unless you have strong reasons not to do it, we recommend to use both channels, for maximum reliability and robustness. ## Two typical scenarios for backups In order to make life easier for you, below we summarise the two most typical scenarios for a given PostgreSQL server in Barman. Bear in mind that this is a decision that you must make for every single server that you decide to back up with Barman. This means that you can have heterogeneous setups within the same installation. As mentioned before, we will only worry about the PostgreSQL server (`pg`) and the Barman server (`backup`). However, in real life, your architecture will most likely contain other technologies such as repmgr, pgBouncer, Nagios/Icinga, and so on. ### Scenario 1: Backup via streaming protocol If you are using PostgreSQL 9.4 or higher, and your database falls under a general use case scenario, you will likely end up deciding on a streaming backup installation - see figure \ref{scenario1-design} below. ![Streaming-only backup (Scenario 1)\label{scenario1-design}](../images/barman-architecture-scenario1.png){ width=80% } In this scenario, you will need to configure: 1. a standard connection to PostgreSQL, for management, coordination, and monitoring purposes 2. a streaming replication connection that will be used by both `pg_basebackup` (for base backup operations) and `pg_receivewal` (for WAL streaming) This setup, in Barman's terminology, is known as **streaming-only** setup, as it does not require any SSH connection for backup and archiving operations. This is particularly suitable and extremely practical for Docker environments. However, as mentioned before, you can configure standard archiving as well and implement a more robust architecture - see figure \ref{scenario1b-design} below. ![Streaming backup with WAL archiving (Scenario 1b)\label{scenario1b-design}](../images/barman-architecture-scenario1b.png){ width=80% } This alternate approach requires: - an additional SSH connection that allows the `postgres` user on the PostgreSQL server to connect as `barman` user on the Barman server - the `archive_command` in PostgreSQL be configured to ship WAL files to Barman This architecture is available also to PostgreSQL 9.2/9.3 users that do not use tablespaces. ### Scenario 2: Backup via `rsync`/SSH The _traditional_ setup of `rsync` over SSH is the only available option for: - PostgreSQL servers version 8.3, 8.4, 9.0 or 9.1 - PostgreSQL servers version 9.2 or 9.3 that are using tablespaces - incremental backup, parallel backup and deduplication - network compression during backups - finer control of bandwidth usage, including on a tablespace basis ![Scenario 2 - Backup via rsync/SSH](../images/barman-architecture-scenario2.png){ width=80% } In this scenario, you will need to configure: 1. a standard connection to PostgreSQL for management, coordination, and monitoring purposes 2. an SSH connection for base backup operations to be used by `rsync` that allows the `barman` user on the Barman server to connect as `postgres` user on the PostgreSQL server 3. an SSH connection for WAL archiving to be used by the `archive_command` in PostgreSQL and that allows the `postgres` user on the PostgreSQL server to connect as `barman` user on the Barman server Starting from PostgreSQL 9.2, you can add a streaming replication connection that is used for WAL streaming and significantly reduce RPO. This more robust implementation is depicted in figure \ref{scenario2b-design}. ![Backup via rsync/SSH with WAL streaming (Scenario 2b)\label{scenario2b-design}](../images/barman-architecture-scenario2b.png){ width=80% } barman-2.18/doc/manual/02-before_you_start.en.md0000644000621200062120000000175114172556763017643 0ustar 00000000000000\newpage # Before you start Before you start using Barman, it is fundamental that you get familiar with PostgreSQL and the concepts around physical backups, Point-In-Time-Recovery and replication, such as base backups, WAL archiving, etc. Below you can find a non exhaustive list of resources that we recommend for you to read: - _PostgreSQL documentation_: - [SQL Dump][sqldump][^pgdump] - [File System Level Backup][physicalbackup] - [Continuous Archiving and Point-in-Time Recovery (PITR)][pitr] - [Reliability and the Write-Ahead Log][wal] - _Book_: [PostgreSQL 10 Administration Cookbook][adminbook] [^pgdump]: It is important that you know the difference between logical and physical backup, therefore between `pg_dump` and a tool like Barman. Professional training on these topics is another effective way of learning these concepts. At any time of the year you can find many courses available all over the world, delivered by PostgreSQL companies such as EnterpriseDB. barman-2.18/doc/manual/25-streaming_backup.en.md0000644000621200062120000000256514172556763017617 0ustar 00000000000000## Streaming backup Barman can backup a PostgreSQL server using the streaming connection, relying on `pg_basebackup`, a utility that has been available from PostgreSQL 9.1. > **IMPORTANT:** Barman requires that `pg_basebackup` is installed in > the same server. For PostgreSQL 9.2 servers, you need the > `pg_basebackup` of version 9.2 installed alongside with Barman. For > PostgreSQL 9.3 and above, it is recommended to install the last > available version of `pg_basebackup`, as it is back compatible. You > can even install multiple versions of `pg_basebackup` on the Barman > server and properly point to the specific version for a server, > using the `path_prefix` option in the configuration file. To successfully backup your server with the streaming connection, you need to use `postgres` as your backup method: ``` ini backup_method = postgres ``` > **IMPORTANT:** You will not be able to start a backup if WAL is not > being correctly archived to Barman, either through the `archiver` or > the `streaming_archiver` To check if the server configuration is valid you can use the `barman check` command: ``` bash barman@backup$ barman check pg ``` To start a backup you can use the `barman backup` command: ``` bash barman@backup$ barman backup pg ``` > **IMPORTANT:** `pg_basebackup` 9.4 or higher is required for > tablespace support if you use the `postgres` backup method. barman-2.18/doc/manual/21-preliminary_steps.en.md0000644000621200062120000002106214172556763020037 0ustar 00000000000000## Preliminary steps This section contains some preliminary steps that you need to undertake before setting up your PostgreSQL server in Barman. > **IMPORTANT:** > Before you proceed, it is important that you have made your decision > in terms of WAL archiving and backup strategies, as outlined in the > _"Design and architecture"_ section. In particular, you should > decide which WAL archiving methods to use, as well as the backup > method. ### PostgreSQL connection You need to make sure that the `backup` server can connect to the PostgreSQL server on `pg` as superuser or, from PostgreSQL 10 or higher, that the correct set of privileges are granted to the user that connects to the database. You can create a specific superuser in PostgreSQL, named `barman`, as follows: ``` bash postgres@pg$ createuser -s -P barman ``` Or create a normal user with the required set of privileges as follows: ``` bash postgres@pg$ createuser -P barman ``` ``` sql GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) to barman; GRANT EXECUTE ON FUNCTION pg_stop_backup() to barman; GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean, boolean) to barman; GRANT EXECUTE ON FUNCTION pg_switch_wal() to barman; GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) to barman; GRANT pg_read_all_settings TO barman; GRANT pg_read_all_stats TO barman; ``` It is worth noting that without a real superuser, the `--force` option of the `barman switch-wal` command will not work. > **IMPORTANT:** The above `createuser` command will prompt for a password, > which you are then advised to add to the `~barman/.pgpass` file > on the `backup` server. For further information, please refer to > ["The Password File" section in the PostgreSQL Documentation][pgpass]. This connection is required by Barman in order to coordinate its activities with the server, as well as for monitoring purposes. You can choose your favourite client authentication method among those offered by PostgreSQL. More information can be found in the ["Client Authentication" section of the PostgreSQL Documentation][pghba]. Make sure you test the following command before proceeding: ``` bash barman@backup$ psql -c 'SELECT version()' -U barman -h pg postgres ``` Write down the above information (user name, host name and database name) and keep it for later. You will need it with in the `conninfo` option for your server configuration, like in this example: ``` ini [pg] ; ... conninfo = host=pg user=barman dbname=postgres ``` > **NOTE:** Barman honours the `application_name` connection option > for PostgreSQL servers 9.0 or higher. ### PostgreSQL WAL archiving and replication Before you proceed, you need to properly configure PostgreSQL on `pg` to accept streaming replication connections from the Barman server. Please read the following sections in the PostgreSQL documentation: - [Role attributes][roles] - [The pg_hba.conf file][authpghba] - [Setting up standby servers using streaming replication][streamprot] One configuration parameter that is crucially important is the `wal_level` parameter. This parameter must be configured to ensure that all the useful information necessary for a backup to be coherent are included in the transaction log file. ``` ini wal_level = 'replica' ``` For PostgreSQL 9.4 or higher, `wal_level` can also be set to `logical`, in case logical decoding is needed. For PostgreSQL versions older than 9.6, `wal_level` must be set to `hot_standby`. Restart the PostgreSQL server for the configuration to be refreshed. ### PostgreSQL streaming connection If you plan to use WAL streaming or streaming backup, you need to setup a streaming connection. We recommend creating a specific user in PostgreSQL, named `streaming_barman`, as follows: ``` bash postgres@pg$ createuser -P --replication streaming_barman ``` > **IMPORTANT:** The above command will prompt for a password, > which you are then advised to add to the `~barman/.pgpass` file > on the `backup` server. For further information, please refer to > ["The Password File" section in the PostgreSQL Documentation][pgpass]. You can manually verify that the streaming connection works through the following command: ``` bash barman@backup$ psql -U streaming_barman -h pg \ -c "IDENTIFY_SYSTEM" \ replication=1 ``` > **IMPORTANT:** > Please make sure you are able to connect via streaming replication > before going any further. You also need to configure the `max_wal_senders` parameter in the PostgreSQL configuration file. The number of WAL senders depends on the PostgreSQL architecture you have implemented. In this example, we are setting it to `2`: ``` ini max_wal_senders = 2 ``` This option represents the maximum number of concurrent streaming connections that the server will be allowed to manage. Another important parameter is `max_replication_slots`, which represents the maximum number of replication slots [^replslot94] that the server will be allowed to manage. This parameter is needed if you are planning to use the streaming connection to receive WAL files over the streaming connection: ``` ini max_replication_slots = 2 ``` [^replslot94]: Replication slots have been introduced in PostgreSQL 9.4. See section _"WAL Streaming / Replication slots"_ for details. The values proposed for `max_replication_slots` and `max_wal_senders` must be considered as examples, and the values you will use in your actual setup must be chosen after a careful evaluation of the architecture. Please consult the PostgreSQL documentation for guidelines and clarifications. ### SSH connections SSH is a protocol and a set of tools that allows you to open a remote shell to a remote server and copy files between the server and the local system. You can find more documentation about SSH usage in the article ["SSH Essentials"][ssh_essentials] by Digital Ocean. SSH key exchange is a very common practice that is used to implement secure passwordless connections between users on different machines, and it's needed to use `rsync` for WAL archiving and for backups. > **NOTE:** > This procedure is not needed if you plan to use the streaming > connection only to archive transaction logs and backup your PostgreSQL > server. [ssh_essentials]: https://www.digitalocean.com/community/tutorials/ssh-essentials-working-with-ssh-servers-clients-and-keys #### SSH configuration of postgres user Unless you have done it before, you need to create an SSH key for the PostgreSQL user. Log in as `postgres`, in the `pg` host and type: ``` bash postgres@pg$ ssh-keygen -t rsa ``` As this key must be used to connect from hosts without providing a password, no passphrase should be entered during the key pair creation. #### SSH configuration of barman user As in the previous paragraph, you need to create an SSH key for the Barman user. Log in as `barman` in the `backup` host and type: ``` bash barman@backup$ ssh-keygen -t rsa ``` For the same reason, no passphrase should be entered. #### From PostgreSQL to Barman The SSH connection from the PostgreSQL server to the backup server is needed to correctly archive WAL files using the `archive_command` setting. To successfully connect from the PostgreSQL server to the backup server, the PostgreSQL public key has to be configured into the authorized keys of the backup server for the `barman` user. The public key to be authorized is stored inside the `postgres` user home directory in a file named `.ssh/id_rsa.pub`, and its content should be included in a file named `.ssh/authorized_keys` inside the home directory of the `barman` user in the backup server. If the `authorized_keys` file doesn't exist, create it using `600` as permissions. The following command should succeed without any output if the SSH key pair exchange has been completed successfully: ``` bash postgres@pg$ ssh barman@backup -C true ``` The value of the `archive_command` configuration parameter will be discussed in the _"WAL archiving via archive_command section"_. #### From Barman to PostgreSQL The SSH connection between the backup server and the PostgreSQL server is used for the traditional backup over rsync. Just as with the connection from the PostgreSQL server to the backup server, we should authorize the public key of the backup server in the PostgreSQL server for the `postgres` user. The content of the file `.ssh/id_rsa.pub` in the `barman` server should be put in the file named `.ssh/authorized_keys` in the PostgreSQL server. The permissions of that file should be `600`. The following command should succeed without any output if the key pair exchange has been completed successfully. ``` bash barman@backup$ ssh postgres@pg -C true ``` barman-2.18/doc/manual/26-rsync_backup.en.md0000644000621200062120000000177114172556763016763 0ustar 00000000000000## Backup with `rsync`/SSH The backup over `rsync` was the only available method before 2.0, and is currently the only backup method that supports the incremental backup feature. Please consult the _"Features in detail"_ section for more information. To take a backup using `rsync` you need to put these parameters inside the Barman server configuration file: ``` ini backup_method = rsync ssh_command = ssh postgres@pg ``` The `backup_method` option activates the `rsync` backup method, and the `ssh_command` option is needed to correctly create an SSH connection from the Barman server to the PostgreSQL server. > **IMPORTANT:** You will not be able to start a backup if WAL is not > being correctly archived to Barman, either through the `archiver` or > the `streaming_archiver` To check if the server configuration is valid you can use the `barman check` command: ``` bash barman@backup$ barman check pg ``` To take a backup use the `barman backup` command: ``` bash barman@backup$ barman backup pg ``` barman-2.18/doc/manual/70-feature-matrix.en.md0000644000621200062120000000415114172556763017227 0ustar 00000000000000\newpage \appendix # Feature matrix Below you will find a matrix of PostgreSQL versions and Barman features for backup and archiving: | **Version** | **Backup with rsync/SSH** | **Backup with pg_basebackup** | **Standard WAL archiving** | **WAL Streaming** | **RPO=0** | |:---------:|:---------------------:|:-------------------------:|:----------------------:|:----------------------:|:-------:| | **14** | Yes | Yes | Yes | Yes | Yes | | **13** | Yes | Yes | Yes | Yes | Yes | | **12** | Yes | Yes | Yes | Yes | Yes | | **11** | Yes | Yes | Yes | Yes | Yes | | **10** | Yes | Yes | Yes | Yes | Yes | | **9.6** | Yes | Yes | Yes | Yes | Yes | | **9.5** | Yes | Yes | Yes | Yes | Yes ~(d)~ | | **9.4** | Yes | Yes | Yes | Yes | Yes ~(d)~ | | **9.3** | Yes | Yes ~(c)~ | Yes | Yes ~(b)~ | No | | **9.2** | Yes | Yes ~(a)~~(c)~ | Yes | Yes ~(a)~~(b)~ | No | | _9.1_ | Yes | No | Yes | No | No | | _9.0_ | Yes | No | Yes | No | No | | _8.4_ | Yes | No | Yes | No | No | | _8.3_ | Yes | No | Yes | No | No | **NOTE:** a) `pg_basebackup` and `pg_receivexlog` 9.2 required b) WAL streaming-only not supported (standard archiving required) c) Backup of tablespaces not supported d) When using `pg_receivexlog` 9.5, minor version 9.5.5 or higher required [^commitsync] [^commitsync]: The commit ["Fix pg_receivexlog --synchronous"][49340627f9821e447f135455d942f7d5e96cae6d] is required (included in version 9.5.5) It is required by Barman that `pg_basebackup` and `pg_receivewal`/`pg_receivexlog` of the same version of the PostgreSQL server (or higher) are installed on the same server where Barman resides. The only exception is that PostgreSQL 9.2 users are required to install version 9.2 of `pg_basebackup` and `pg_receivexlog` alongside with Barman. >> **TIP:** We recommend that the last major, stable version of the PostgreSQL clients (e.g. 11) is installed on the Barman server if you plan to use backup and WAL archiving over streaming replication through `pg_basebackup` and `pg_receivewal`, for PostgreSQL 9.3 or higher servers. >> **TIP:** For "RPO=0" architectures, it is recommended to have at least one synchronous standby server. barman-2.18/doc/manual/01-intro.en.md0000644000621200062120000000765714172556763015435 0ustar 00000000000000\newpage # Introduction In a perfect world, there would be no need for a backup. However, it is important, especially in business environments, to be prepared for when the _"unexpected"_ happens. In a database scenario, the unexpected could take any of the following forms: - data corruption - system failure (including hardware failure) - human error - natural disaster In such cases, any ICT manager or DBA should be able to fix the incident and recover the database in the shortest time possible. We normally refer to this discipline as **disaster recovery**, and more broadly *business continuity*. Within business continuity, it is important to familiarise with two fundamental metrics, as defined by Wikipedia: - [**Recovery Point Objective (RPO)**][rpo]: _"maximum targeted period in which data might be lost from an IT service due to a major incident"_ - [**Recovery Time Objective (RTO)**][rto]: _"the targeted duration of time and a service level within which a business process must be restored after a disaster (or disruption) in order to avoid unacceptable consequences associated with a break in business continuity"_ In a few words, RPO represents the maximum amount of data you can afford to lose, while RTO represents the maximum down-time you can afford for your service. Understandably, we all want **RPO=0** (*"zero data loss"*) and **RTO=0** (*zero down-time*, utopia) - even if it is our grandmothers's recipe website. In reality, a careful cost analysis phase allows you to determine your business continuity requirements. Fortunately, with an open source stack composed of **Barman** and **PostgreSQL**, you can achieve RPO=0 thanks to synchronous streaming replication. RTO is more the focus of a *High Availability* solution, like [**repmgr**][repmgr]. Therefore, by integrating Barman and repmgr, you can dramatically reduce RTO to nearly zero. Based on our experience at EnterpriseDB, we can confirm that PostgreSQL open source clusters with Barman and repmgr can easily achieve more than 99.99% uptime over a year, if properly configured and monitored. In any case, it is important for us to emphasise more on cultural aspects related to disaster recovery, rather than the actual tools. Tools without human beings are useless. Our mission with Barman is to promote a culture of disaster recovery that: - focuses on backup procedures - focuses even more on recovery procedures - relies on education and training on strong theoretical and practical concepts of PostgreSQL's crash recovery, backup, Point-In-Time-Recovery, and replication for your team members - promotes testing your backups (only a backup that is tested can be considered to be valid), either manually or automatically (be creative with Barman's hook scripts!) - fosters regular practice of recovery procedures, by all members of your devops team (yes, developers too, not just system administrators and DBAs) - solicites to regularly scheduled drills and disaster recovery simulations with the team every 3-6 months - relies on continuous monitoring of PostgreSQL and Barman, and that is able to promptly identify any anomalies Moreover, do everything you can to prepare yourself and your team for when the disaster happens (yes, *when*), because when it happens: - It is going to be a Friday evening, most likely right when you are about to leave the office. - It is going to be when you are on holiday (right in the middle of your cruise around the world) and somebody else has to deal with it. - It is certainly going to be stressful. - You will regret not being sure that the last available backup is valid. - Unless you know how long it approximately takes to recover, every second will seems like forever. Be prepared, don't be scared. In 2011, with these goals in mind, 2ndQuadrant started the development of Barman, now one of the most used backup tools for PostgreSQL. Barman is an acronym for "Backup and Recovery Manager". Currently, Barman works only on Linux and Unix operating systems. barman-2.18/doc/manual/41-global-commands.en.md0000644000621200062120000000446514172556763017337 0ustar 00000000000000\newpage # General commands Barman has many commands and, for the sake of exposition, we can organize them by scope. The scope of the **general commands** is the entire Barman server, that can backup many PostgreSQL servers. **Server commands**, instead, act only on a specified server. **Backup commands** work on a backup, which is taken from a certain server. The following list includes the general commands. ## `cron` `barman` doesn't include a long-running daemon or service file (there's nothing to `systemctl start`, `service start`, etc.). Instead, the `barman cron` subcommand is provided to perform `barman`'s background "steady-state" backup operations. You can perform maintenance operations, on both WAL files and backups, using the `cron` command: ``` bash barman cron ``` > **NOTE:** > This command should be executed in a _cron script_. Our > recommendation is to schedule `barman cron` to run every minute. If > you installed Barman using the rpm or debian package, a cron entry > running on every minute will be created for you. `barman cron` executes WAL archiving operations concurrently on a server basis, and this also enforces retention policies on those servers that have: - `retention_policy` not empty and valid; - `retention_policy_mode` set to `auto`. The `cron` command ensures that WAL streaming is started for those servers that have requested it, by transparently executing the `receive-wal` command. In order to stop the operations started by the `cron` command, comment out the cron entry and execute: ```bash barman receive-wal --stop SERVER_NAME ``` You might want to check `barman list-servers` to make sure you get all of your servers. ## `diagnose` The `diagnose` command creates a JSON report useful for diagnostic and support purposes. This report contains information for all configured servers. > **IMPORTANT:** > Even if the diagnose is written in JSON and that format is thought > to be machine readable, its structure is not to be considered part > of the interface. Format can change between different Barman versions. ## `list-servers` You can display the list of active servers that have been configured for your backup system with: ``` bash barman list-servers ``` A machine readable output can be obtained with the `--minimal` option: ``` bash barman list-servers --minimal ``` barman-2.18/doc/manual/99-references.en.md0000644000621200062120000000646014172556763016433 0ustar 00000000000000 [rpo]: https://en.wikipedia.org/wiki/Recovery_point_objective [rto]: https://en.wikipedia.org/wiki/Recovery_time_objective [repmgr]: http://www.repmgr.org/ [sqldump]: https://www.postgresql.org/docs/current/static/backup-dump.html [physicalbackup]: https://www.postgresql.org/docs/current/static/backup-file.html [pitr]: https://www.postgresql.org/docs/current/static/continuous-archiving.html [adminbook]: https://www.2ndquadrant.com/en/books/postgresql-10-administration-cookbook/ [wal]: https://www.postgresql.org/docs/current/static/wal.html [49340627f9821e447f135455d942f7d5e96cae6d]: https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=49340627f9821e447f135455d942f7d5e96cae6d [requirements_recovery]: https://www.postgresql.org/docs/current/static/warm-standby.html#STANDBY-PLANNING [yumpgdg]: http://yum.postgresql.org/ [aptpgdg]: http://apt.postgresql.org/ [aptpgdgwiki]: https://wiki.postgresql.org/wiki/Apt [epel]: http://fedoraproject.org/wiki/EPEL [man5]: https://docs.pgbarman.org/barman.5.html [setup_user]: https://docs.python.org/3/install/index.html#alternate-installation-the-user-scheme [pypi]: https://pypi.python.org/pypi/barman/ [pgpass]: https://www.postgresql.org/docs/current/static/libpq-pgpass.html [pghba]: http://www.postgresql.org/docs/current/static/client-authentication.html [authpghba]: http://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html [streamprot]: http://www.postgresql.org/docs/current/static/protocol-replication.html [roles]: http://www.postgresql.org/docs/current/static/role-attributes.html [replication-slots]: https://www.postgresql.org/docs/current/static/warm-standby.html#STREAMING-REPLICATION-SLOTS [synch]: http://www.postgresql.org/docs/current/static/warm-standby.html#SYNCHRONOUS-REPLICATION [target]: https://www.postgresql.org/docs/current/static/recovery-target-settings.html [2ndqrpmrepo]: https://rpm.2ndquadrant.com/ [2ndqdebrepo]: https://apt.2ndquadrant.com/ [boto3]: https://github.com/boto/boto3 [boto3creds]: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html [azure-identity]: https://docs.microsoft.com/en-us/python/api/azure-identity/?view=azure-python [azure-storage-blob]: https://docs.microsoft.com/en-us/python/api/azure-storage-blob/?view=azure-python [azure-storage-auth]: https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-data-operations-cli#set-environment-variables-for-authorization-parameters [3]: http://github.com/EnterpriseDB/barman [8]: http://en.wikipedia.org/wiki/Hard_link [9]: https://github.com/2ndquadrant-it/pgespresso [11]: http://www.pgbarman.org/ [12]: http://www.pgbarman.org/support/ [13]: https://www.enterprisedb.com/ [14]: http://www.pgbarman.org/faq/ [15]: http://blog.2ndquadrant.com/tag/barman/ [16]: https://github.com/hamann/check-barman [17]: https://github.com/2ndquadrant-it/puppet-barman [18]: http://4caast.morfeo-project.org/ [20]: http://www.postgresql.org/docs/current/static/functions-admin.html [24]: http://www.postgresql.org/docs/current/static/warm-standby.html#STREAMING-REPLICATION [25]: http://www.postgresql.org/docs/current/static/app-pgreceivewal.html [26]: https://goo.gl/218Ghl [27]: https://github.com/emin100/barmanapi [31]: http://www.postgresql.org/ barman-2.18/doc/manual/65-troubleshooting.en.md0000644000621200062120000000330414172556763017524 0ustar 00000000000000\newpage # Troubleshooting ## Diagnose a Barman installation You can gather important information about the status of all the configured servers using: ``` bash barman diagnose ``` The `diagnose` command output is a full snapshot of the barman server, providing useful information, such as global configuration, SSH version, Python version, `rsync` version, PostgreSQL clients version, as well as current configuration and status of all servers. The `diagnose` command is extremely useful for troubleshooting problems, as it gives a global view on the status of your Barman installation. ## Requesting help Although Barman is extensively documented, there are a lot of scenarios that are not covered. For any questions about Barman and disaster recovery scenarios using Barman, you can reach the dev team using the community mailing list: https://groups.google.com/group/pgbarman or the IRC channel on freenode: irc://irc.freenode.net/barman In the event you discover a bug, you can open a ticket using Github: https://github.com/EnterpriseDB/barman/issues EnterpriseDB provides professional support for Barman, including 24/7 service. ### Submitting a bug Barman has been extensively tested and is currently being used in several production environments. However, as any software, Barman is not bug free. If you discover a bug, please follow this procedure: - execute the `barman diagnose` command - file a bug through the Github issue tracker, by attaching the output obtained by the diagnostics command above (`barman diagnose`) > **WARNING:** > Be careful when submitting the output of the diagnose command > as it might disclose information that are potentially dangerous > from a security point of view. barman-2.18/doc/barman-cloud-restore.10000644000621200062120000001006314172556763015754 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-CLOUD\-RESTORE" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-cloud\-restore \- Restore a PostgreSQL backup from the Cloud .SH SYNOPSIS .PP barman\-cloud\-restore [\f[I]OPTIONS\f[]] \f[I]SOURCE_URL\f[] \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] \f[I]RECOVERY_DIR\f[] .SH DESCRIPTION .PP This script can be used to download a backup previouslymade with \f[C]barman\-cloud\-backup\f[] command. Currently AWS S3 and Azure Blob Storage are supported. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .SH POSITIONAL ARGUMENTS .TP .B SOURCE_URL URL of the cloud source, such as a bucket in AWS S3. For example: \f[C]s3://BUCKET_NAME/path/to/folder\f[] (where \f[C]BUCKET_NAME\f[] is the bucket you have created in AWS). .RS .RE .TP .B SERVER_NAME the name of the server as configured in Barman. .RS .RE .TP .B BACKUP_ID the ID of the backup to restore .RS .RE .TP .B RECOVERY_DIR the path to a local directory for recovery (used as PGDATA). .RS .RE .SH OPTIONS .TP .B \-h, \[en]help show a help message and exit .RS .RE .TP .B \-V, \[en]version show program's version number and exit .RS .RE .TP .B \-v, \[en]verbose increase output verbosity (e.g., \-vv is more than \-v) .RS .RE .TP .B \-q, \[en]quiet decrease output verbosity (e.g., \-qq is less than \-q) .RS .RE .TP .B \-t, \[en]test test connectivity to the cloud destination and exit .RS .RE .TP .B \[en]tablespace NAME:LOCATION extract the named tablespace to the given directory instead of its original location (you may repeat the option for multiple tablespaces) .RS .RE .TP .B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage} the cloud provider to which the backup should be uploaded .RS .RE .TP .B \-P, \[en]profile profile name (e.g.\ INI section in AWS credentials file) .RS .RE .TP .B \[en]endpoint\-url override the default S3 URL construction mechanism by specifying an endpoint. .RS .RE .TP .B \[en]credential {azure\-cli,managed\-identity} optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. .RS .RE .SH REFERENCES .PP For Boto: .IP \[bu] 2 https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html .PP For AWS: .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-set\-up.html .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-started.html. .PP For Azure Blob Storage: .IP \[bu] 2 https://docs.microsoft.com/en\-us/azure/storage/blobs/authorize\-data\-operations\-cli#set\-environment\-variables\-for\-authorization\-parameters .IP \[bu] 2 https://docs.microsoft.com/en\-us/python/api/azure\-storage\-blob/?view=azure\-python .SH DEPENDENCIES .PP If using \f[C]\-\-cloud\-provider=aws\-s3\f[]: .IP \[bu] 2 boto3 .PP If using \f[C]\-\-cloud\-provider=azure\-blob\-storage\f[]: .IP \[bu] 2 azure\-storage\-blob .IP \[bu] 2 azure\-identity (optional, if you wish to use DefaultAzureCredential) .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B 1 The restore was not successful .RS .RE .TP .B 2 The connection to the cloud provider failed .RS .RE .TP .B 3 There was an error in the command input .RS .RE .TP .B Other non\-zero codes Failure .RS .RE .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman.50000644000621200062120000006163714172556763013210 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN" "5" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman \- Backup and Recovery Manager for PostgreSQL .SH DESCRIPTION .PP Barman is an administration tool for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. Barman can perform remote backups of multiple servers in business critical environments and helps DBAs during the recovery phase. .SH CONFIGURATION FILE LOCATIONS .PP The system\-level Barman configuration file is located at .IP .nf \f[C] /etc/barman.conf \f[] .fi .PP or .IP .nf \f[C] /etc/barman/barman.conf \f[] .fi .PP and is overridden on a per\-user level by .IP .nf \f[C] $HOME/.barman.conf \f[] .fi .SH CONFIGURATION FILE SYNTAX .PP The Barman configuration file is a plain \f[C]INI\f[] file. There is a general section called \f[C][barman]\f[] and a section \f[C][servername]\f[] for each server you want to backup. Rows starting with \f[C];\f[] are comments. .SH CONFIGURATION FILE DIRECTORY .PP Barman supports the inclusion of multiple configuration files, through the \f[C]configuration_files_directory\f[] option. Included files must contain only server specifications, not global configurations. If the value of \f[C]configuration_files_directory\f[] is a directory, Barman reads all files with \f[C]\&.conf\f[] extension that exist in that folder. For example, if you set it to \f[C]/etc/barman.d\f[], you can specify your PostgreSQL servers placing each section in a separate \f[C]\&.conf\f[] file inside the \f[C]/etc/barman.d\f[] folder. .SH OPTIONS .TP .B active When set to \f[C]true\f[] (default), the server is in full operational state. When set to \f[C]false\f[], the server can be used for diagnostics, but any operational command such as backup execution or WAL archiving is temporarily disabled. When adding a new server to Barman, we suggest setting active=false at first, making sure that barman check shows no problems, and only then activating the server. This will avoid spamming the Barman logs with errors during the initial setup. .RS .RE .TP .B archiver This option allows you to activate log file shipping through PostgreSQL\[aq]s \f[C]archive_command\f[] for a server. If set to \f[C]true\f[] (default), Barman expects that continuous archiving for a server is in place and will activate checks as well as management (including compression) of WAL files that Postgres deposits in the \f[I]incoming\f[] directory. Setting it to \f[C]false\f[], will disable standard continuous archiving for a server. Global/Server. .RS .RE .TP .B archiver_batch_size This option allows you to activate batch processing of WAL files for the \f[C]archiver\f[] process, by setting it to a value > 0. Otherwise, the traditional unlimited processing of the WAL queue is enabled. When batch processing is activated, the \f[C]archive\-wal\f[] process would limit itself to maximum \f[C]archiver_batch_size\f[] WAL segments per single run. Integer. Global/Server. .RS .RE .TP .B backup_directory Directory where backup data for a server will be placed. Server. .RS .RE .TP .B backup_method Configure the method barman used for backup execution. If set to \f[C]rsync\f[] (default), barman will execute backup using the \f[C]rsync\f[] command over SSH (requires \f[C]ssh_command\f[]). If set to \f[C]postgres\f[] barman will use the \f[C]pg_basebackup\f[] command to execute the backup. If set to \f[C]local\-rsync\f[], barman will assume to be running on the same server as the the PostgreSQL instance and with the same user, then execute \f[C]rsync\f[] for the file system copy. Global/Server. .RS .RE .TP .B backup_options This option allows you to control the way Barman interacts with PostgreSQL for backups. It is a comma\-separated list of values that accepts the following options: .RS .IP \[bu] 2 \f[C]exclusive_backup\f[] (default when \f[C]backup_method\ =\ rsync\f[]): \f[C]barman\ backup\f[] executes backup operations using the standard exclusive backup approach (technically through \f[C]pg_start_backup\f[] and \f[C]pg_stop_backup\f[]) .IP \[bu] 2 \f[C]concurrent_backup\f[] (default when \f[C]backup_method\ =\ postgres\f[]): if using PostgreSQL 9.2, 9.3, 9.4, and 9.5, Barman requires the \f[C]pgespresso\f[] module to be installed on the PostgreSQL server and can be used to perform a backup from a standby server. Starting from PostgreSQL 9.6, Barman uses the new PostgreSQL API to perform backups from a standby server. .IP \[bu] 2 \f[C]external_configuration\f[]: if present, any warning regarding external configuration files is suppressed during the execution of a backup. .PP Note that \f[C]exclusive_backup\f[] and \f[C]concurrent_backup\f[] are mutually exclusive. Global/Server. .RE .TP .B bandwidth_limit This option allows you to specify a maximum transfer rate in kilobytes per second. A value of zero specifies no limit (default). Global/Server. .RS .RE .TP .B barman_home Main data directory for Barman. Global. .RS .RE .TP .B barman_lock_directory Directory for locks. Default: \f[C]%(barman_home)s\f[]. Global. .RS .RE .TP .B basebackup_retry_sleep Number of seconds of wait after a failed copy, before retrying Used during both backup and recovery operations. Positive integer, default 30. Global/Server. .RS .RE .TP .B basebackup_retry_times Number of retries of base backup copy, after an error. Used during both backup and recovery operations. Positive integer, default 0. Global/Server. .RS .RE .TP .B basebackups_directory Directory where base backups will be placed. Server. .RS .RE .TP .B check_timeout Maximum execution time, in seconds per server, for a barman check command. Set to 0 to disable the timeout. Positive integer, default 30. Global/Server. .RS .RE .TP .B compression Standard compression algorithm applied to WAL files. Possible values are: \f[C]gzip\f[] (requires \f[C]gzip\f[] to be installed on the system), \f[C]bzip2\f[] (requires \f[C]bzip2\f[]), \f[C]pigz\f[] (requires \f[C]pigz\f[]), \f[C]pygzip\f[] (Python\[aq]s internal gzip compressor) and \f[C]pybzip2\f[] (Python\[aq]s internal bzip2 compressor). Global/Server. .RS .RE .TP .B conninfo Connection string used by Barman to connect to the Postgres server. This is a libpq connection string, consult the PostgreSQL manual (https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING) for more information. Commonly used keys are: host, hostaddr, port, dbname, user, password. Server. .RS .RE .TP .B create_slot When set to \f[C]auto\f[] and \f[C]slot_name\f[] is defined, Barman automatically attempts to create the replication slot if not present. When set to \f[C]manual\f[] (default), the replication slot needs to be manually created. Global/Server. .RS .RE .TP .B custom_compression_filter Customised compression algorithm applied to WAL files. Global/Server. .RS .RE .TP .B custom_compression_magic Customised compression magic which is checked in the beginning of a WAL file to select the custom algorithm. If you are using a custom compression filter then setting this will prevent barman from applying the custom compression to WALs which have been pre\-compressed with that compression. If you do not configure this then custom compression will still be applied but any pre\-compressed WAL files will be compressed again during WAL archive. Global/Server. .RS .RE .TP .B custom_decompression_filter Customised decompression algorithm applied to compressed WAL files; this must match the compression algorithm. Global/Server. .RS .RE .TP .B description A human readable description of a server. Server. .RS .RE .TP .B errors_directory Directory that contains WAL files that contain an error; usually this is related to a conflict with an existing WAL file (e.g. a WAL file that has been archived after a streamed one). .RS .RE .TP .B forward_config_path Parameter which determines whether a passive node should forward its configuration file path to its primary node during cron or sync\-info commands. Set to true if you are invoking barman with the \f[C]\-c/\-\-config\f[] option and your configuration is in the same place on both the passive and primary barman servers. Defaults to false. .RS .RE .TP .B immediate_checkpoint This option allows you to control the way PostgreSQL handles checkpoint at the start of the backup. If set to \f[C]false\f[] (default), the I/O workload for the checkpoint will be limited, according to the \f[C]checkpoint_completion_target\f[] setting on the PostgreSQL server. If set to \f[C]true\f[], an immediate checkpoint will be requested, meaning that PostgreSQL will complete the checkpoint as soon as possible. Global/Server. .RS .RE .TP .B incoming_wals_directory Directory where incoming WAL files are archived into. Requires \f[C]archiver\f[] to be enabled. Server. .RS .RE .TP .B last_backup_maximum_age This option identifies a time frame that must contain the latest backup. If the latest backup is older than the time frame, barman check command will report an error to the user. If empty (default), latest backup is always considered valid. Syntax for this option is: "i (DAYS | WEEKS | MONTHS)" where i is a integer greater than zero, representing the number of days | weeks | months of the time frame. Global/Server. .RS .RE .TP .B last_backup_minimum_size This option identifies lower limit to the acceptable size of the latest successful backup. If the latest backup is smaller than the specified size, barman check command will report an error to the user. If empty (default), latest backup is always considered valid. Syntax for this option is: "i (k|Ki|M|Mi|G|Gi|T|Ti)" where i is an integer greater than zero, with an optional SI or IEC suffix. k=kilo=1000, Ki=Kibi=1024 and so forth. Note that the suffix is case\-sensitive. Global/Server. .RS .RE .TP .B last_wal_maximum_age This option identifies a time frame that must contain the latest WAL file archived. If the latest WAL file is older than the time frame, barman check command will report an error to the user. If empty (default), the age of the WAL files is not checked. Syntax is the same as last_backup_maximum_age (above). Global/Server. log_file .RS .RE Location of Barman\[aq]s log file. Global. .RS .RE .TP .B log_level Level of logging (DEBUG, INFO, WARNING, ERROR, CRITICAL). Global. .RS .RE .TP .B max_incoming_wals_queue Maximum number of WAL files in the incoming queue (in both streaming and archiving pools) that are allowed before barman check returns an error (that does not block backups). Global/Server. Default: None (disabled). .RS .RE .TP .B minimum_redundancy Minimum number of backups to be retained. Default 0. Global/Server. .RS .RE .TP .B network_compression This option allows you to enable data compression for network transfers. If set to \f[C]false\f[] (default), no compression is used. If set to \f[C]true\f[], compression is enabled, reducing network usage. Global/Server. .RS .RE .TP .B parallel_jobs This option controls how many parallel workers will copy files during a backup or recovery command. Default 1. Global/Server. For backup purposes, it works only when \f[C]backup_method\f[] is \f[C]rsync\f[]. .RS .RE .TP .B path_prefix One or more absolute paths, separated by colon, where Barman looks for executable files. The paths specified in \f[C]path_prefix\f[] are tried before the ones specified in \f[C]PATH\f[] environment variable. Global/server. .RS .RE .TP .B post_archive_retry_script Hook script launched after a WAL file is archived by maintenance. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post archive scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. .RS .RE .TP .B post_archive_script Hook script launched after a WAL file is archived by maintenance, after \[aq]post_archive_retry_script\[aq]. Global/Server. .RS .RE .TP .B post_backup_retry_script Hook script launched after a base backup. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post backup scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. .RS .RE .TP .B post_backup_script Hook script launched after a base backup, after \[aq]post_backup_retry_script\[aq]. Global/Server. .RS .RE .TP .B post_delete_retry_script Hook script launched after the deletion of a backup. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post delete scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. .RS .RE .TP .B post_delete_script Hook script launched after the deletion of a backup, after \[aq]post_delete_retry_script\[aq]. Global/Server. .RS .RE .TP .B post_recovery_retry_script Hook script launched after a recovery. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post recovery scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. .RS .RE .TP .B post_recovery_script Hook script launched after a recovery, after \[aq]post_recovery_retry_script\[aq]. Global/Server. .RS .RE .TP .B post_wal_delete_retry_script Hook script launched after the deletion of a WAL file. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post delete scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. .RS .RE .TP .B post_wal_delete_script Hook script launched after the deletion of a WAL file, after \[aq]post_wal)delete_retry_script\[aq]. Global/Server. .RS .RE .TP .B pre_archive_retry_script Hook script launched before a WAL file is archived by maintenance, after \[aq]pre_archive_script\[aq]. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the WAL archiving operation. Global/Server. .RS .RE .TP .B pre_archive_script Hook script launched before a WAL file is archived by maintenance. Global/Server. .RS .RE .TP .B pre_backup_retry_script Hook script launched before a base backup, after \[aq]pre_backup_script\[aq]. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the backup operation. Global/Server. .RS .RE .TP .B pre_backup_script Hook script launched before a base backup. Global/Server. .RS .RE .TP .B pre_delete_retry_script Hook script launched before the deletion of a backup, after \[aq]pre_delete_script\[aq]. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the backup deletion. Global/Server. .RS .RE .TP .B pre_delete_script Hook script launched before the deletion of a backup. Global/Server. .RS .RE .TP .B pre_recovery_retry_script Hook script launched before a recovery, after \[aq]pre_recovery_script\[aq]. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the recover operation. Global/Server. .RS .RE .TP .B pre_recovery_script Hook script launched before a recovery. Global/Server. .RS .RE .TP .B pre_wal_delete_retry_script Hook script launched before the deletion of a WAL file, after \[aq]pre_wal_delete_script\[aq]. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the WAL file deletion. Global/Server. .RS .RE .TP .B pre_wal_delete_script Hook script launched before the deletion of a WAL file. Global/Server. .RS .RE .TP .B primary_ssh_command Parameter that identifies a Barman server as \f[C]passive\f[]. In a passive node, the source of a backup server is a Barman installation rather than a PostgreSQL server. If \f[C]primary_ssh_command\f[] is specified, Barman uses it to establish a connection with the primary server. Empty by default, it can also be set globally. .RS .RE .TP .B recovery_options Options for recovery operations. Currently only supports \f[C]get\-wal\f[]. \f[C]get\-wal\f[] activates generation of a basic \f[C]restore_command\f[] in the resulting recovery configuration that uses the \f[C]barman\ get\-wal\f[] command to fetch WAL files directly from Barman\[aq]s archive of WALs. Comma separated list of values, default empty. Global/Server. .RS .RE .TP .B retention_policy Policy for retention of periodic backups and archive logs. If left empty, retention policies are not enforced. For redundancy based retention policy use "REDUNDANCY i" (where i is an integer > 0 and defines the number of backups to retain). For recovery window retention policy use "RECOVERY WINDOW OF i DAYS" or "RECOVERY WINDOW OF i WEEKS" or "RECOVERY WINDOW OF i MONTHS" where i is a positive integer representing, specifically, the number of days, weeks or months to retain your backups. For more detailed information, refer to the official documentation. Default value is empty. Global/Server. .RS .RE .TP .B retention_policy_mode Currently only "auto" is implemented. Global/Server. .RS .RE .TP .B reuse_backup This option controls incremental backup support. Global/Server. Possible values are: .RS .IP \[bu] 2 \f[C]off\f[]: disabled (default); .IP \[bu] 2 \f[C]copy\f[]: reuse the last available backup for a server and create a copy of the unchanged files (reduce backup time); .IP \[bu] 2 \f[C]link\f[]: reuse the last available backup for a server and create a hard link of the unchanged files (reduce backup time and space). Requires operating system and file system support for hard links. .RE .TP .B slot_name Physical replication slot to be used by the \f[C]receive\-wal\f[] command when \f[C]streaming_archiver\f[] is set to \f[C]on\f[]. Requires PostgreSQL >= 9.4. Global/Server. Default: None (disabled). .RS .RE .TP .B ssh_command Command used by Barman to login to the Postgres server via ssh. Server. .RS .RE .TP .B streaming_archiver This option allows you to use the PostgreSQL\[aq]s streaming protocol to receive transaction logs from a server. If set to \f[C]on\f[], Barman expects to find \f[C]pg_receivewal\f[] (known as \f[C]pg_receivexlog\f[] prior to PostgreSQL 10) in the PATH (see \f[C]path_prefix\f[] option) and that streaming connection for the server is working. This activates connection checks as well as management (including compression) of WAL files. If set to \f[C]off\f[] (default) barman will rely only on continuous archiving for a server WAL archive operations, eventually terminating any running \f[C]pg_receivexlog\f[] for the server. Global/Server. .RS .RE .TP .B streaming_archiver_batch_size This option allows you to activate batch processing of WAL files for the \f[C]streaming_archiver\f[] process, by setting it to a value > 0. Otherwise, the traditional unlimited processing of the WAL queue is enabled. When batch processing is activated, the \f[C]archive\-wal\f[] process would limit itself to maximum \f[C]streaming_archiver_batch_size\f[] WAL segments per single run. Integer. Global/Server. .RS .RE .TP .B streaming_archiver_name Identifier to be used as \f[C]application_name\f[] by the \f[C]receive\-wal\f[] command. Only available with \f[C]pg_receivewal\f[] (or \f[C]pg_receivexlog\f[] >= 9.3). By default it is set to \f[C]barman_receive_wal\f[]. Global/Server. .RS .RE .TP .B streaming_backup_name Identifier to be used as \f[C]application_name\f[] by the \f[C]pg_basebackup\f[] command. Only available with \f[C]pg_basebackup\f[] >= 9.3. By default it is set to \f[C]barman_streaming_backup\f[]. Global/Server. .RS .RE .TP .B streaming_conninfo Connection string used by Barman to connect to the Postgres server via streaming replication protocol. By default it is set to \f[C]conninfo\f[]. Server. .RS .RE .TP .B streaming_wals_directory Directory where WAL files are streamed from the PostgreSQL server to Barman. Requires \f[C]streaming_archiver\f[] to be enabled. Server. .RS .RE .TP .B tablespace_bandwidth_limit This option allows you to specify a maximum transfer rate in kilobytes per second, by specifying a comma separated list of tablespaces (pairs TBNAME:BWLIMIT). A value of zero specifies no limit (default). Global/Server. .RS .RE .TP .B wal_retention_policy Policy for retention of archive logs (WAL files). Currently only "MAIN" is available. Global/Server. .RS .RE .TP .B wals_directory Directory which contains WAL files. Server. .RS .RE .SH HOOK SCRIPTS .PP The script definition is passed to a shell and can return any exit code. .PP The shell environment will contain the following variables: .TP .B \f[C]BARMAN_CONFIGURATION\f[] configuration file used by barman .RS .RE .TP .B \f[C]BARMAN_ERROR\f[] error message, if any (only for the \[aq]post\[aq] phase) .RS .RE .TP .B \f[C]BARMAN_PHASE\f[] \[aq]pre\[aq] or \[aq]post\[aq] .RS .RE .TP .B \f[C]BARMAN_RETRY\f[] \f[C]1\f[] if it is a \f[I]retry script\f[] (from 1.5.0), \f[C]0\f[] if not .RS .RE .TP .B \f[C]BARMAN_SERVER\f[] name of the server .RS .RE .PP Backup scripts specific variables: .TP .B \f[C]BARMAN_BACKUP_DIR\f[] backup destination directory .RS .RE .TP .B \f[C]BARMAN_BACKUP_ID\f[] ID of the backup .RS .RE .TP .B \f[C]BARMAN_PREVIOUS_ID\f[] ID of the previous backup (if present) .RS .RE .TP .B \f[C]BARMAN_NEXT_ID\f[] ID of the next backup (if present) .RS .RE .TP .B \f[C]BARMAN_STATUS\f[] status of the backup .RS .RE .TP .B \f[C]BARMAN_VERSION\f[] version of Barman .RS .RE .PP Archive scripts specific variables: .TP .B \f[C]BARMAN_SEGMENT\f[] name of the WAL file .RS .RE .TP .B \f[C]BARMAN_FILE\f[] full path of the WAL file .RS .RE .TP .B \f[C]BARMAN_SIZE\f[] size of the WAL file .RS .RE .TP .B \f[C]BARMAN_TIMESTAMP\f[] WAL file timestamp .RS .RE .TP .B \f[C]BARMAN_COMPRESSION\f[] type of compression used for the WAL file .RS .RE .PP Recovery scripts specific variables: .TP .B \f[C]BARMAN_DESTINATION_DIRECTORY\f[] the directory where the new instance is recovered .RS .RE .TP .B \f[C]BARMAN_TABLESPACES\f[] tablespace relocation map (JSON, if present) .RS .RE .TP .B \f[C]BARMAN_REMOTE_COMMAND\f[] secure shell command used by the recovery (if present) .RS .RE .TP .B \f[C]BARMAN_RECOVER_OPTIONS\f[] recovery additional options (JSON, if present) .RS .RE .PP Only in case of retry hook scripts, the exit code of the script is checked by Barman. Output of hook scripts is simply written in the log file. .SH EXAMPLE .PP Here is an example of configuration file: .IP .nf \f[C] [barman] ;\ Main\ directory barman_home\ =\ /var/lib/barman ;\ System\ user barman_user\ =\ barman ;\ Log\ location log_file\ =\ /var/log/barman/barman.log ;\ Default\ compression\ level ;compression\ =\ gzip ;\ Incremental\ backup reuse_backup\ =\ link ;\ \[aq]main\[aq]\ PostgreSQL\ Server\ configuration [main] ;\ Human\ readable\ description description\ =\ \ "Main\ PostgreSQL\ Database" ;\ SSH\ options ssh_command\ =\ ssh\ postgres\@pg ;\ PostgreSQL\ connection\ string conninfo\ =\ host=pg\ user=postgres ;\ PostgreSQL\ streaming\ connection\ string streaming_conninfo\ =\ host=pg\ user=postgres ;\ Minimum\ number\ of\ required\ backups\ (redundancy) minimum_redundancy\ =\ 1 ;\ Retention\ policy\ (based\ on\ redundancy) retention_policy\ =\ REDUNDANCY\ 2 \f[] .fi .SH SEE ALSO .PP \f[C]barman\f[] (1). .SH AUTHORS .PP Barman maintainers (in alphabetical order): .IP \[bu] 2 Abhijit Menon\-Sen .IP \[bu] 2 Jane Threefoot .IP \[bu] 2 Michael Wallace .PP Past contributors (in alphabetical order): .IP \[bu] 2 Anna Bellandi (QA/testing) .IP \[bu] 2 Britt Cole (documentation reviewer) .IP \[bu] 2 Carlo Ascani (developer) .IP \[bu] 2 Francesco Canovai (QA/testing) .IP \[bu] 2 Gabriele Bartolini (architect) .IP \[bu] 2 Gianni Ciolli (QA/testing) .IP \[bu] 2 Giulio Calacoci (developer) .IP \[bu] 2 Giuseppe Broccolo (developer) .IP \[bu] 2 Jonathan Battiato (QA/testing) .IP \[bu] 2 Leonardo Cecchi (developer) .IP \[bu] 2 Marco Nenciarini (project leader) .IP \[bu] 2 Niccolò Fei (QA/testing) .IP \[bu] 2 Rubens Souza (QA/testing) .IP \[bu] 2 Stefano Bianucci (developer) .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman-cloud-wal-archive.10000644000621200062120000001517314172556763016502 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-CLOUD\-WAL\-ARCHIVE" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-cloud\-wal\-archive \- Archive PostgreSQL WAL files in the Cloud using \f[C]archive_command\f[] .SH SYNOPSIS .PP barman\-cloud\-wal\-archive [\f[I]OPTIONS\f[]] \f[I]DESTINATION_URL\f[] \f[I]SERVER_NAME\f[] \f[I]WAL_PATH\f[] .SH DESCRIPTION .PP This script can be used in the \f[C]archive_command\f[] of a PostgreSQL server to ship WAL files to the Cloud. Currently AWS S3 and Azure Blob Storage are supported. .PP Note: If you are running python 2 or older unsupported versions of python 3 then avoid the compression options \f[C]\-\-gzip\f[] or \f[C]\-\-bzip2\f[] as barman\-cloud\-wal\-restore is unable to restore gzip\-compressed WALs on python < 3.2 or bzip2\-compressed WALs on python < 3.3. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .SH POSITIONAL ARGUMENTS .TP .B DESTINATION_URL URL of the cloud destination, such as a bucket in AWS S3. For example: \f[C]s3://BUCKET_NAME/path/to/folder\f[] (where \f[C]BUCKET_NAME\f[] is the bucket you have created in AWS). .RS .RE .TP .B SERVER_NAME the name of the server as configured in Barman. .RS .RE .TP .B WAL_PATH the value of the `%p' keyword (according to `archive_command'). .RS .RE .SH OPTIONS .TP .B \-h, \[en]help show a help message and exit .RS .RE .TP .B \-V, \[en]version show program's version number and exit .RS .RE .TP .B \-v, \[en]verbose increase output verbosity (e.g., \-vv is more than \-v) .RS .RE .TP .B \-q, \[en]quiet decrease output verbosity (e.g., \-qq is less than \-q) .RS .RE .TP .B \-t, \[en]test test connectivity to the cloud destination and exit .RS .RE .TP .B \-z, \[en]gzip gzip\-compress the WAL while uploading to the cloud (should not be used with python < 3.2) .RS .RE .TP .B \-j, \[en]bzip2 bzip2\-compress the WAL while uploading to the cloud (should not be used with python < 3.3) .RS .RE .TP .B \[en]snappy snappy\-compress the WAL while uploading to the cloud (requires optional python\-snappy library and should not be used with python < 3.3) .RS .RE .TP .B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage} the cloud provider to which the backup should be uploaded .RS .RE .TP .B \[en]tags KEY1,VALUE1 KEY2,VALUE2 \&... A space\-separated list of comma\-separated key\-value pairs representing tags to be added to each WAL file archived to cloud storage. .RS .RE .TP .B \[en]history\-tags KEY1,VALUE1 KEY2,VALUE2 \&... A space\-separated list of comma\-separated key\-value pairs representing tags to be added to each history file archived to cloud storage. If this is provided alongside the \f[C]\-\-tags\f[] option then the value of \f[C]\-\-history\-tags\f[] will be used in place of \f[C]\-\-tags\f[] for history files. All other WAL files will continue to be tagged with the value of \f[C]\-\-tags\f[]. .RS .RE .TP .B \-P, \[en]profile profile name (e.g.\ INI section in AWS credentials file) .RS .RE .TP .B \[en]endpoint\-url override the default S3 URL construction mechanism by specifying an endpoint. .RS .RE .TP .B \-e, \[en]encryption the encryption algorithm used when storing the uploaded data in S3 Allowed values: `AES256'|`aws:kms' .RS .RE .TP .B \[en]encryption\-scope the name of an encryption scope defined in the Azure Blob Storage service which is to be used to encrypt the data in Azure .RS .RE .TP .B \[en]credential {azure\-cli,managed\-identity} optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. .RS .RE .TP .B \[en]max\-block\-size SIZE the chunk size to be used when uploading an object to Azure Blob Storage via the concurrent chunk method (default: 4MB). .RS .RE .TP .B \[en]max\-concurrency CONCURRENCY the maximum number of chunks to be uploaded concurrently to Azure Blob Storage (default: 1). Whether the maximum concurrency is achieved depends on the values of \[en]max\-block\-size (should be less than or equal to \f[C]WAL\ segment\ size\ after\ compression\ /\ max_concurrency\f[]) and \[en]max\-single\-put\-size (must be less than WAL segment size after compression). .RS .RE .TP .B \[en]max\-single\-put\-size SIZE maximum size for which the Azure client will upload an object to Azure Blob Storage in a single request (default: 64MB). If this is set lower than the WAL segment size after any applied compression then the concurrent chunk upload method for WAL archiving will be used. .RS .RE .SH REFERENCES .PP For Boto: .IP \[bu] 2 https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html .PP For AWS: .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-set\-up.html .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-started.html. .PP For Azure Blob Storage: .IP \[bu] 2 https://docs.microsoft.com/en\-us/azure/storage/blobs/authorize\-data\-operations\-cli#set\-environment\-variables\-for\-authorization\-parameters .IP \[bu] 2 https://docs.microsoft.com/en\-us/python/api/azure\-storage\-blob/?view=azure\-python .SH DEPENDENCIES .PP If using \f[C]\-\-cloud\-provider=aws\-s3\f[]: .IP \[bu] 2 boto3 .PP If using \f[C]\-\-cloud\-provider=azure\-blob\-storage\f[]: .IP \[bu] 2 azure\-storage\-blob .IP \[bu] 2 azure\-identity (optional, if you wish to use DefaultAzureCredential) .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B 1 The WAL archive operation was not successful .RS .RE .TP .B 2 The connection to the cloud provider failed .RS .RE .TP .B 3 There was an error in the command input .RS .RE .TP .B Other non\-zero codes Failure .RS .RE .SH SEE ALSO .PP This script can be used in conjunction with \f[C]pre_archive_retry_script\f[] to relay WAL files to S3, as follows: .IP .nf \f[C] pre_archive_retry_script\ =\ \[aq]barman\-cloud\-wal\-archive\ [*OPTIONS*]\ *DESTINATION_URL*\ ${BARMAN_SERVER}\[aq] \f[] .fi .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman.10000644000621200062120000005473114172556763013201 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman \- Backup and Recovery Manager for PostgreSQL .SH SYNOPSIS .PP barman [\f[I]OPTIONS\f[]] \f[I]COMMAND\f[] .SH DESCRIPTION .PP Barman is an administration tool for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. Barman can perform remote backups of multiple servers in business critical environments and helps DBAs during the recovery phase. .SH OPTIONS .TP .B \-h, \-\-help Show a help message and exit. .RS .RE .TP .B \-v, \-\-version Show program version number and exit. .RS .RE .TP .B \-c \f[I]CONFIG\f[], \-\-config \f[I]CONFIG\f[] Use the specified configuration file. .RS .RE .TP .B \-\-color \f[I]{never,always,auto}\f[], \-\-colour \f[I]{never,always,auto}\f[] Whether to use colors in the output (default: \f[I]auto\f[]) .RS .RE .TP .B \-q, \-\-quiet Do not output anything. Useful for cron scripts. .RS .RE .TP .B \-d, \-\-debug debug output (default: False) .RS .RE .TP .B \-\-log\-level {NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL} Override the default log level .RS .RE .TP .B \-f {json,console}, \-\-format {json,console} output format (default: \[aq]console\[aq]) .RS .RE .SH COMMANDS .PP Important: every command has a help option .TP .B archive\-wal \f[I]SERVER_NAME\f[] Get any incoming xlog file (both through standard \f[C]archive_command\f[] and streaming replication, where applicable) and moves them in the WAL archive for that server. If necessary, apply compression when requested by the user. .RS .RE .TP .B backup \f[I]SERVER_NAME\f[] Perform a backup of \f[C]SERVER_NAME\f[] using parameters specified in the configuration file. Specify \f[C]all\f[] as \f[C]SERVER_NAME\f[] to perform a backup of all the configured servers. .RS .TP .B \-\-immediate\-checkpoint forces the initial checkpoint to be done as quickly as possible. Overrides value of the parameter \f[C]immediate_checkpoint\f[], if present in the configuration file. .RS .RE .TP .B \-\-no\-immediate\-checkpoint forces to wait for the checkpoint. Overrides value of the parameter \f[C]immediate_checkpoint\f[], if present in the configuration file. .RS .RE .TP .B \-\-reuse\-backup [INCREMENTAL_TYPE] Overrides \f[C]reuse_backup\f[] option behaviour. Possible values for \f[C]INCREMENTAL_TYPE\f[] are: .RS .IP \[bu] 2 \f[I]off\f[]: do not reuse the last available backup; .IP \[bu] 2 \f[I]copy\f[]: reuse the last available backup for a server and create a copy of the unchanged files (reduce backup time); .IP \[bu] 2 \f[I]link\f[]: reuse the last available backup for a server and create a hard link of the unchanged files (reduce backup time and space); .PP \f[C]link\f[] is the default target if \f[C]\-\-reuse\-backup\f[] is used and \f[C]INCREMENTAL_TYPE\f[] is not explicit. .RE .TP .B \-\-retry\-times Number of retries of base backup copy, after an error. Used during both backup and recovery operations. Overrides value of the parameter \f[C]basebackup_retry_times\f[], if present in the configuration file. .RS .RE .TP .B \-\-no\-retry Same as \f[C]\-\-retry\-times\ 0\f[] .RS .RE .TP .B \-\-retry\-sleep Number of seconds of wait after a failed copy, before retrying. Used during both backup and recovery operations. Overrides value of the parameter \f[C]basebackup_retry_sleep\f[], if present in the configuration file. .RS .RE .TP .B \-j, \-\-jobs Number of parallel workers to copy files during backup. Overrides value of the parameter \f[C]parallel_jobs\f[], if present in the configuration file. .RS .RE .TP .B \-\-bwlimit KBPS maximum transfer rate in kilobytes per second. A value of 0 means no limit. Overrides \[aq]bandwidth_limit\[aq] configuration option. Default is undefined. .RS .RE .TP .B \-\-wait, \-w wait for all required WAL files by the base backup to be archived .RS .RE .TP .B \-\-wait\-timeout the time, in seconds, spent waiting for the required WAL files to be archived before timing out .RS .RE .RE .TP .B check\-backup \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] Make sure that all the required WAL files to check the consistency of a physical backup (that is, from the beginning to the end of the full backup) are correctly archived. This command is automatically invoked by the \f[C]cron\f[] command and at the end of every backup operation. .RS .RE .TP .B check\-wal\-archive \f[I]SERVER_NAME\f[] Check that the WAL archive destination for \f[I]SERVER_NAME\f[] is safe to use for a new PostgreSQL cluster. With no optional args (the default) this will pass if the WAL archive is empty and fail otherwise. .RS .TP .B \-\-timeline [TIMELINE] A positive integer specifying the earliest timeline for which associated WALs should cause the check to fail. The check will pass if all WAL content in the archive relates to earlier timelines. If any WAL files are on this timeline or greater then the check will fail. .RS .RE .RE .TP .B check \f[I]SERVER_NAME\f[] Show diagnostic information about \f[C]SERVER_NAME\f[], including: Ssh connection check, PostgreSQL version, configuration and backup directories, archiving process, streaming process, replication slots, etc. Specify \f[C]all\f[] as \f[C]SERVER_NAME\f[] to show diagnostic information about all the configured servers. .RS .TP .B \-\-nagios Nagios plugin compatible output .RS .RE .RE .TP .B cron Perform maintenance tasks, such as enforcing retention policies or WAL files management. .RS .TP .B \-\-keep\-descriptors Keep the stdout and the stderr streams of the Barman subprocesses attached to this one. This is useful for Docker based installations. .RS .RE .RE .TP .B delete \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] Delete the specified backup. Backup ID shortcuts section below for available shortcuts. .RS .RE .TP .B diagnose Collect diagnostic information about the server where barman is installed and all the configured servers, including: global configuration, SSH version, Python version, \f[C]rsync\f[] version, as well as current configuration and status of all servers. .RS .RE .TP .B get\-wal \f[I][OPTIONS]\f[] \f[I]SERVER_NAME\f[] \f[I]WAL_NAME\f[] Retrieve a WAL file from the \f[C]xlog\f[] archive of a given server. By default, the requested WAL file, if found, is returned as uncompressed content to \f[C]STDOUT\f[]. The following options allow users to change this behaviour: .RS .TP .B \-o \f[I]OUTPUT_DIRECTORY\f[] destination directory where the \f[C]get\-wal\f[] will deposit the requested WAL .RS .RE .TP .B \-P, \-\-partial retrieve also partial WAL files (.partial) .RS .RE .TP .B \-z output will be compressed using gzip .RS .RE .TP .B \-j output will be compressed using bzip2 .RS .RE .TP .B \-p \f[I]SIZE\f[] peek from the WAL archive up to \f[I]SIZE\f[] WAL files, starting from the requested one. \[aq]SIZE\[aq] must be an integer >= 1. When invoked with this option, get\-wal returns a list of zero to \[aq]SIZE\[aq] WAL segment names, one per row. .RS .RE .TP .B \-t, \-\-test test both the connection and the configuration of the requested PostgreSQL server in Barman for WAL retrieval. With this option, the \[aq]WAL_NAME\[aq] mandatory argument is ignored. .RS .RE .RE .TP .B keep \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] Flag the specified backup as an archival backup which should be kept forever, regardless of any retention policies in effect. See the Backup ID shortcuts section below for available shortcuts. .RS .TP .B \-\-target \f[I]RECOVERY_TARGET\f[] Specify the recovery target for the archival backup. Possible values for \f[I]RECOVERY_TARGET\f[] are: .RS .IP \[bu] 2 \f[I]full\f[]: The backup can always be used to recover to the latest point in time. To achieve this, Barman will retain all WALs needed to ensure consistency of the backup and all subsequent WALs. .IP \[bu] 2 \f[I]standalone\f[]: The backup can only be used to recover the server to its state at the time the backup was taken. Barman will only retain the WALs needed to ensure consistency of the backup. .RE .TP .B \-\-status Report the archival status of the backup. This will either be the recovery target of \f[I]full\f[] or \f[I]standalone\f[] for archival backups or \f[I]nokeep\f[] for backups which have not been flagged as archival. .RS .RE .TP .B \-\-release Release the keep flag from this backup. This will remove its archival status and make it available for deletion, either directly or by retention policy. .RS .RE .RE .TP .B list\-backups \f[I]SERVER_NAME\f[] Show available backups for \f[C]SERVER_NAME\f[]. This command is useful to retrieve a backup ID. For example: .RS .RE .IP .nf \f[C] servername\ 20111104T102647\ \-\ Fri\ Nov\ \ 4\ 10:26:48\ 2011\ \-\ Size:\ 17.0\ MiB\ \-\ WAL\ Size:\ 100\ B \f[] .fi .IP .nf \f[C] In\ this\ case,\ *20111104T102647*\ is\ the\ backup\ ID. \f[] .fi .TP .B list\-files \f[I][OPTIONS]\f[] \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] List all the files in a particular backup, identified by the server name and the backup ID. See the Backup ID shortcuts section below for available shortcuts. .RS .TP .B \-\-target \f[I]TARGET_TYPE\f[] Possible values for TARGET_TYPE are: .RS .IP \[bu] 2 \f[I]data\f[]: lists just the data files; .IP \[bu] 2 \f[I]standalone\f[]: lists the base backup files, including required WAL files; .IP \[bu] 2 \f[I]wal\f[]: lists all the WAL files between the start of the base backup and the end of the log / the start of the following base backup (depending on whether the specified base backup is the most recent one available); .IP \[bu] 2 \f[I]full\f[]: same as data + wal. .PP The default value is \f[C]standalone\f[]. .RE .RE .TP .B list\-servers Show all the configured servers, and their descriptions. .RS .RE .TP .B put\-wal \f[I][OPTIONS]\f[] \f[I]SERVER_NAME\f[] Receive a WAL file from a remote server and securely store it into the \f[C]SERVER_NAME\f[] incoming directory. The WAL file is retrieved from the \f[C]STDIN\f[], and must be encapsulated in a tar stream together with a \f[C]MD5SUMS\f[] file to validate it. This command is meant to be invoked through SSH from a remote \f[C]barman\-wal\-archive\f[] utility (part of \f[C]barman\-cli\f[] package). Do not use this command directly unless you take full responsibility of the content of files. .RS .TP .B \-t, \-\-test test both the connection and the configuration of the requested PostgreSQL server in Barman to make sure it is ready to receive WAL files. .RS .RE .RE .TP .B rebuild\-xlogdb \f[I]SERVER_NAME\f[] Perform a rebuild of the WAL file metadata for \f[C]SERVER_NAME\f[] (or every server, using the \f[C]all\f[] shortcut) guessing it from the disk content. The metadata of the WAL archive is contained in the \f[C]xlog.db\f[] file, and every Barman server has its own copy. .RS .RE .TP .B receive\-wal \f[I]SERVER_NAME\f[] Start the stream of transaction logs for a server. The process relies on \f[C]pg_receivewal\f[]/\f[C]pg_receivexlog\f[] to receive WAL files from the PostgreSQL servers through the streaming protocol. .RS .TP .B \-\-stop stop the receive\-wal process for the server .RS .RE .TP .B \-\-reset reset the status of receive\-wal, restarting the streaming from the current WAL file of the server .RS .RE .TP .B \-\-create\-slot create the physical replication slot configured with the \f[C]slot_name\f[] configuration parameter .RS .RE .TP .B \-\-drop\-slot drop the physical replication slot configured with the \f[C]slot_name\f[] configuration parameter .RS .RE .RE .TP .B recover \f[I][OPTIONS]\f[] \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] \f[I]DESTINATION_DIRECTORY\f[] Recover a backup in a given directory (local or remote, depending on the \f[C]\-\-remote\-ssh\-command\f[] option settings). See the Backup ID shortcuts section below for available shortcuts. .RS .TP .B \-\-target\-tli \f[I]TARGET_TLI\f[] Recover the specified timeline. .RS .RE .TP .B \-\-target\-time \f[I]TARGET_TIME\f[] Recover to the specified time. .RS .PP You can use any valid unambiguous representation (e.g: "YYYY\-MM\-DD HH:MM:SS.mmm"). .RE .TP .B \-\-target\-xid \f[I]TARGET_XID\f[] Recover to the specified transaction ID. .RS .RE .TP .B \-\-target\-lsn \f[I]TARGET_LSN\f[] Recover to the specified LSN (Log Sequence Number). Requires PostgreSQL 10 or above. .RS .RE .TP .B \-\-target\-name \f[I]TARGET_NAME\f[] Recover to the named restore point previously created with the \f[C]pg_create_restore_point(name)\f[] (for PostgreSQL 9.1 and above users). .RS .RE .TP .B \-\-target\-immediate Recover ends when a consistent state is reached (end of the base backup) .RS .RE .TP .B \-\-exclusive Set target (time, XID or LSN) to be non inclusive. .RS .RE .TP .B \-\-target\-action \f[I]ACTION\f[] Trigger the specified action once the recovery target is reached. Possible actions are: \f[C]pause\f[] (PostgreSQL 9.1 and above), \f[C]shutdown\f[] (PostgreSQL 9.5 and above) and \f[C]promote\f[] (ditto). This option requires a target to be defined, with one of the above options. .RS .RE .TP .B \-\-tablespace \f[I]NAME:LOCATION\f[] Specify tablespace relocation rule. .RS .RE .TP .B \-\-remote\-ssh\-command \f[I]SSH_COMMAND\f[] This options activates remote recovery, by specifying the secure shell command to be launched on a remote host. This is the equivalent of the "ssh_command" server option in the configuration file for remote recovery. Example: \[aq]ssh postgres\@db2\[aq]. .RS .RE .TP .B \-\-retry\-times \f[I]RETRY_TIMES\f[] Number of retries of data copy during base backup after an error. Overrides value of the parameter \f[C]basebackup_retry_times\f[], if present in the configuration file. .RS .RE .TP .B \-\-no\-retry Same as \f[C]\-\-retry\-times\ 0\f[] .RS .RE .TP .B \-\-retry\-sleep Number of seconds of wait after a failed copy, before retrying. Overrides value of the parameter \f[C]basebackup_retry_sleep\f[], if present in the configuration file. .RS .RE .TP .B \-\-bwlimit KBPS maximum transfer rate in kilobytes per second. A value of 0 means no limit. Overrides \[aq]bandwidth_limit\[aq] configuration option. Default is undefined. .RS .RE .TP .B \-j , \-\-jobs Number of parallel workers to copy files during recovery. Overrides value of the parameter \f[C]parallel_jobs\f[], if present in the configuration file. Works only for servers configured through \f[C]rsync\f[]/SSH. .RS .RE .TP .B \-\-get\-wal, \-\-no\-get\-wal Enable/Disable usage of \f[C]get\-wal\f[] for WAL fetching during recovery. Default is based on \f[C]recovery_options\f[] setting. .RS .RE .TP .B \-\-network\-compression, \-\-no\-network\-compression Enable/Disable network compression during remote recovery. Default is based on \f[C]network_compression\f[] configuration setting. .RS .RE .TP .B \-\-standby\-mode Specifies whether to start the PostgreSQL server as a standby. Default is undefined. .RS .RE .RE .TP .B replication\-status \f[I][OPTIONS]\f[] \f[I]SERVER_NAME\f[] Shows live information and status of any streaming client attached to the given server (or servers). Default behaviour can be changed through the following options: .RS .TP .B \-\-minimal machine readable output (default: False) .RS .RE .TP .B \-\-target \f[I]TARGET_TYPE\f[] Possible values for TARGET_TYPE are: .RS .IP \[bu] 2 \f[I]hot\-standby\f[]: lists only hot standby servers .IP \[bu] 2 \f[I]wal\-streamer\f[]: lists only WAL streaming clients, such as pg_receivewal .IP \[bu] 2 \f[I]all\f[]: any streaming client (default) .RE .RE .TP .B show\-backup \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] Show detailed information about a particular backup, identified by the server name and the backup ID. See the Backup ID shortcuts section below for available shortcuts. For example: .RS .RE .IP .nf \f[C] Backup\ 20150828T130001: \ \ Server\ Name\ \ \ \ \ \ \ \ \ \ \ \ :\ quagmire \ \ Status\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ :\ DONE \ \ PostgreSQL\ Version\ \ \ \ \ :\ 90402 \ \ PGDATA\ directory\ \ \ \ \ \ \ :\ /srv/postgresql/9.4/main/data \ \ Base\ backup\ information: \ \ \ \ Disk\ usage\ \ \ \ \ \ \ \ \ \ \ :\ 12.4\ TiB\ (12.4\ TiB\ with\ WALs) \ \ \ \ Incremental\ size\ \ \ \ \ :\ 4.9\ TiB\ (\-60.02%) \ \ \ \ Timeline\ \ \ \ \ \ \ \ \ \ \ \ \ :\ 1 \ \ \ \ Begin\ WAL\ \ \ \ \ \ \ \ \ \ \ \ :\ 0000000100000CFD000000AD \ \ \ \ End\ WAL\ \ \ \ \ \ \ \ \ \ \ \ \ \ :\ 0000000100000D0D00000008 \ \ \ \ WAL\ number\ \ \ \ \ \ \ \ \ \ \ :\ 3932 \ \ \ \ WAL\ compression\ ratio:\ 79.51% \ \ \ \ Begin\ time\ \ \ \ \ \ \ \ \ \ \ :\ 2015\-08\-28\ 13:00:01.633925+00:00 \ \ \ \ End\ time\ \ \ \ \ \ \ \ \ \ \ \ \ :\ 2015\-08\-29\ 10:27:06.522846+00:00 \ \ \ \ Begin\ Offset\ \ \ \ \ \ \ \ \ :\ 1575048 \ \ \ \ End\ Offset\ \ \ \ \ \ \ \ \ \ \ :\ 13853016 \ \ \ \ Begin\ XLOG\ \ \ \ \ \ \ \ \ \ \ :\ CFD/AD180888 \ \ \ \ End\ XLOG\ \ \ \ \ \ \ \ \ \ \ \ \ :\ D0D/8D36158 \ \ WAL\ information: \ \ \ \ No\ of\ files\ \ \ \ \ \ \ \ \ \ :\ 35039 \ \ \ \ Disk\ usage\ \ \ \ \ \ \ \ \ \ \ :\ 121.5\ GiB \ \ \ \ WAL\ rate\ \ \ \ \ \ \ \ \ \ \ \ \ :\ 275.50/hour \ \ \ \ Compression\ ratio\ \ \ \ :\ 77.81% \ \ \ \ Last\ available\ \ \ \ \ \ \ :\ 0000000100000D95000000E7 \ \ Catalog\ information: \ \ \ \ Retention\ Policy\ \ \ \ \ :\ not\ enforced \ \ \ \ Previous\ Backup\ \ \ \ \ \ :\ 20150821T130001 \ \ \ \ Next\ Backup\ \ \ \ \ \ \ \ \ \ :\ \-\ (this\ is\ the\ latest\ base\ backup) \f[] .fi .TP .B show\-servers \f[I]SERVER_NAME\f[] Show information about \f[C]SERVER_NAME\f[], including: \f[C]conninfo\f[], \f[C]backup_directory\f[], \f[C]wals_directory\f[] and many more. Specify \f[C]all\f[] as \f[C]SERVER_NAME\f[] to show information about all the configured servers. .RS .RE .TP .B status \f[I]SERVER_NAME\f[] Show information about the status of a server, including: number of available backups, \f[C]archive_command\f[], \f[C]archive_status\f[] and many more. For example: .RS .RE .IP .nf \f[C] Server\ quagmire: \ \ Description:\ The\ Giggity\ database \ \ Passive\ node:\ False \ \ PostgreSQL\ version:\ 9.3.9 \ \ pgespresso\ extension:\ Not\ available \ \ PostgreSQL\ Data\ directory:\ /srv/postgresql/9.3/data \ \ PostgreSQL\ \[aq]archive_command\[aq]\ setting:\ rsync\ \-a\ %p\ barman\@backup:/var/lib/barman/quagmire/incoming \ \ Last\ archived\ WAL:\ 0000000100003103000000AD \ \ Current\ WAL\ segment:\ 0000000100003103000000AE \ \ Retention\ policies:\ enforced\ (mode:\ auto,\ retention:\ REDUNDANCY\ 2,\ WAL\ retention:\ MAIN) \ \ No.\ of\ available\ backups:\ 2 \ \ First\ available\ backup:\ 20150908T003001 \ \ Last\ available\ backup:\ 20150909T003001 \ \ Minimum\ redundancy\ requirements:\ satisfied\ (2/1) \f[] .fi .TP .B switch\-wal \f[I]SERVER_NAME\f[] Execute pg_switch_wal() on the target server (from PostgreSQL 10), or pg_switch_xlog (for PostgreSQL 8.3 to 9.6). .RS .TP .B \-\-force Forces the switch by executing CHECKPOINT before pg_switch_xlog(). \f[I]IMPORTANT:\f[] executing a CHECKPOINT might increase I/O load on a PostgreSQL server. Use this option with care. .RS .RE .TP .B \-\-archive Wait for one xlog file to be archived. If after a defined amount of time (default: 30 seconds) no xlog file is archived, Barman will terminate with failure exit code. Available also on standby servers. .RS .RE .TP .B \-\-archive\-timeout \f[I]TIMEOUT\f[] Specifies the amount of time in seconds (default: 30 seconds) the archiver will wait for a new xlog file to be archived before timing out. Available also on standby servers. .RS .RE .RE .TP .B switch\-xlog \f[I]SERVER_NAME\f[] Alias for switch\-wal (kept for back\-compatibility) .RS .RE .TP .B sync\-backup \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] Command used for the synchronisation of a passive node with its primary. Executes a copy of all the files of a \f[C]BACKUP_ID\f[] that is present on \f[C]SERVER_NAME\f[] node. This command is available only for passive nodes, and uses the \f[C]primary_ssh_command\f[] option to establish a secure connection with the primary node. .RS .RE .TP .B sync\-info \f[I]SERVER_NAME\f[] [\f[I]LAST_WAL\f[] [\f[I]LAST_POSITION\f[]]] Collect information regarding the current status of a Barman server, to be used for synchronisation purposes. Returns a JSON output representing \f[C]SERVER_NAME\f[], that contains: all the successfully finished backup, all the archived WAL files, the configuration, last WAL file been read from the \f[C]xlog.db\f[] and the position in the file. .RS .TP .B LAST_WAL tells sync\-info to skip any WAL file previous to that (incremental synchronisation) .RS .RE .TP .B LAST_POSITION hint for quickly positioning in the \f[C]xlog.db\f[] file (incremental synchronisation) .RS .RE .RE .TP .B sync\-wals \f[I]SERVER_NAME\f[] Command used for the synchronisation of a passive node with its primary. Executes a copy of all the archived WAL files that are present on \f[C]SERVER_NAME\f[] node. This command is available only for passive nodes, and uses the \f[C]primary_ssh_command\f[] option to establish a secure connection with the primary node. .RS .RE .SH BACKUP ID SHORTCUTS .PP Rather than using the timestamp backup ID, you can use any of the following shortcuts/aliases to identity a backup for a given server: .TP .B first Oldest available backup for that server, in chronological order. .RS .RE .TP .B last Latest available backup for that server, in chronological order. .RS .RE .TP .B latest same ast \f[I]last\f[]. .RS .RE .TP .B oldest same ast \f[I]first\f[]. .RS .RE .TP .B last\-failed Latest failed backup, in chronological order. .RS .RE .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B Not zero Failure .RS .RE .SH SEE ALSO .PP \f[C]barman\f[] (5). .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github bug tracker. Along with the bug submission, users can provide developers with diagnostics information obtained through the \f[C]barman\ diagnose\f[] command. .SH AUTHORS .PP Barman maintainers (in alphabetical order): .IP \[bu] 2 Abhijit Menon\-Sen .IP \[bu] 2 Jane Threefoot .IP \[bu] 2 Michael Wallace .PP Past contributors (in alphabetical order): .IP \[bu] 2 Anna Bellandi (QA/testing) .IP \[bu] 2 Britt Cole (documentation reviewer) .IP \[bu] 2 Carlo Ascani (developer) .IP \[bu] 2 Francesco Canovai (QA/testing) .IP \[bu] 2 Gabriele Bartolini (architect) .IP \[bu] 2 Gianni Ciolli (QA/testing) .IP \[bu] 2 Giulio Calacoci (developer) .IP \[bu] 2 Giuseppe Broccolo (developer) .IP \[bu] 2 Jonathan Battiato (QA/testing) .IP \[bu] 2 Leonardo Cecchi (developer) .IP \[bu] 2 Marco Nenciarini (project leader) .IP \[bu] 2 Niccolò Fei (QA/testing) .IP \[bu] 2 Rubens Souza (QA/testing) .IP \[bu] 2 Stefano Bianucci (developer) .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman.5.d/0000755000621200062120000000000014172556766013475 5ustar 00000000000000barman-2.18/doc/barman.5.d/50-custom_compression_filter.md0000644000621200062120000000014414172556763021535 0ustar 00000000000000custom_compression_filter : Customised compression algorithm applied to WAL files. Global/Server. barman-2.18/doc/barman.5.d/50-basebackups_directory.md0000644000621200062120000000011714172556763020604 0ustar 00000000000000basebackups_directory : Directory where base backups will be placed. Server. barman-2.18/doc/barman.5.d/50-basebackup_retry_times.md0000644000621200062120000000026714172556763020771 0ustar 00000000000000basebackup_retry_times : Number of retries of base backup copy, after an error. Used during both backup and recovery operations. Positive integer, default 0. Global/Server. barman-2.18/doc/barman.5.d/50-post_archive_retry_script.md0000644000621200062120000000060414172556763021535 0ustar 00000000000000post_archive_retry_script : Hook script launched after a WAL file is archived by maintenance. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post archive scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. barman-2.18/doc/barman.5.d/50-forward-config-path.md0000644000621200062120000000056014172556763020100 0ustar 00000000000000forward_config_path : Parameter which determines whether a passive node should forward its configuration file path to its primary node during cron or sync-info commands. Set to true if you are invoking barman with the `-c/--config` option and your configuration is in the same place on both the passive and primary barman servers. Defaults to false. barman-2.18/doc/barman.5.d/50-compression.md0000644000621200062120000000051014172556763016573 0ustar 00000000000000compression : Standard compression algorithm applied to WAL files. Possible values are: `gzip` (requires `gzip` to be installed on the system), `bzip2` (requires `bzip2`), `pigz` (requires `pigz`), `pygzip` (Python's internal gzip compressor) and `pybzip2` (Python's internal bzip2 compressor). Global/Server. barman-2.18/doc/barman.5.d/50-description.md0000644000621200062120000000010214172556763016552 0ustar 00000000000000description : A human readable description of a server. Server. barman-2.18/doc/barman.5.d/50-post_backup_script.md0000644000621200062120000000016614172556763020137 0ustar 00000000000000post_backup_script : Hook script launched after a base backup, after 'post_backup_retry_script'. Global/Server. barman-2.18/doc/barman.5.d/50-log_level.md0000644000621200062120000000012014172556763016177 0ustar 00000000000000log_level : Level of logging (DEBUG, INFO, WARNING, ERROR, CRITICAL). Global. barman-2.18/doc/barman.5.d/50-backup_method.md0000644000621200062120000000076414172556763017052 0ustar 00000000000000backup_method : Configure the method barman used for backup execution. If set to `rsync` (default), barman will execute backup using the `rsync` command over SSH (requires `ssh_command`). If set to `postgres` barman will use the `pg_basebackup` command to execute the backup. If set to `local-rsync`, barman will assume to be running on the same server as the the PostgreSQL instance and with the same user, then execute `rsync` for the file system copy. Global/Server. barman-2.18/doc/barman.5.d/50-streaming_archiver_batch_size.md0000644000621200062120000000067114172556763022311 0ustar 00000000000000streaming_archiver_batch_size : This option allows you to activate batch processing of WAL files for the `streaming_archiver` process, by setting it to a value > 0. Otherwise, the traditional unlimited processing of the WAL queue is enabled. When batch processing is activated, the `archive-wal` process would limit itself to maximum `streaming_archiver_batch_size` WAL segments per single run. Integer. Global/Server. barman-2.18/doc/barman.5.d/50-pre_backup_retry_script.md0000644000621200062120000000062314172556763021163 0ustar 00000000000000pre_backup_retry_script : Hook script launched before a base backup, after 'pre_backup_script'. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the backup operation. Global/Server. barman-2.18/doc/barman.5.d/50-primary_ssh_command.md0000644000621200062120000000054714172556763020302 0ustar 00000000000000primary_ssh_command : Parameter that identifies a Barman server as `passive`. In a passive node, the source of a backup server is a Barman installation rather than a PostgreSQL server. If `primary_ssh_command` is specified, Barman uses it to establish a connection with the primary server. Empty by default, it can also be set globally. barman-2.18/doc/barman.5.d/50-recovery_options.md0000644000621200062120000000055514172556763017654 0ustar 00000000000000recovery_options : Options for recovery operations. Currently only supports `get-wal`. `get-wal` activates generation of a basic `restore_command` in the resulting recovery configuration that uses the `barman get-wal` command to fetch WAL files directly from Barman's archive of WALs. Comma separated list of values, default empty. Global/Server. barman-2.18/doc/barman.5.d/00-header.md0000644000621200062120000000016014172556763015456 0ustar 00000000000000% BARMAN(5) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 barman-2.18/doc/barman.5.d/50-incoming_wals_directory.md0000644000621200062120000000020114172556763021144 0ustar 00000000000000incoming_wals_directory : Directory where incoming WAL files are archived into. Requires `archiver` to be enabled. Server. barman-2.18/doc/barman.5.d/50-conninfo.md0000644000621200062120000000055614172556763016055 0ustar 00000000000000conninfo : Connection string used by Barman to connect to the Postgres server. This is a libpq connection string, consult the [PostgreSQL manual][conninfo] for more information. Commonly used keys are: host, hostaddr, port, dbname, user, password. Server. [conninfo]: https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING barman-2.18/doc/barman.5.d/50-pre_wal_delete_retry_script.md0000644000621200062120000000065114172556763022024 0ustar 00000000000000pre_wal_delete_retry_script : Hook script launched before the deletion of a WAL file, after 'pre_wal_delete_script'. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the WAL file deletion. Global/Server. barman-2.18/doc/barman.5.d/50-streaming_archiver.md0000644000621200062120000000121314172556763020107 0ustar 00000000000000streaming_archiver : This option allows you to use the PostgreSQL's streaming protocol to receive transaction logs from a server. If set to `on`, Barman expects to find `pg_receivewal` (known as `pg_receivexlog` prior to PostgreSQL 10) in the PATH (see `path_prefix` option) and that streaming connection for the server is working. This activates connection checks as well as management (including compression) of WAL files. If set to `off` (default) barman will rely only on continuous archiving for a server WAL archive operations, eventually terminating any running `pg_receivexlog` for the server. Global/Server. barman-2.18/doc/barman.5.d/50-post_recovery_retry_script.md0000644000621200062120000000055314172556763021755 0ustar 00000000000000post_recovery_retry_script : Hook script launched after a recovery. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post recovery scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. barman-2.18/doc/barman.5.d/20-configuration-file-locations.md0000644000621200062120000000032214172556763022005 0ustar 00000000000000# CONFIGURATION FILE LOCATIONS The system-level Barman configuration file is located at /etc/barman.conf or /etc/barman/barman.conf and is overridden on a per-user level by $HOME/.barman.conf barman-2.18/doc/barman.5.d/50-wals_directory.md0000644000621200062120000000007714172556763017274 0ustar 00000000000000wals_directory : Directory which contains WAL files. Server. barman-2.18/doc/barman.5.d/50-pre_wal_delete_script.md0000644000621200062120000000014114172556763020571 0ustar 00000000000000pre_wal_delete_script : Hook script launched before the deletion of a WAL file. Global/Server. barman-2.18/doc/barman.5.d/50-post_archive_script.md0000644000621200062120000000022014172556763020302 0ustar 00000000000000post_archive_script : Hook script launched after a WAL file is archived by maintenance, after 'post_archive_retry_script'. Global/Server. barman-2.18/doc/barman.5.d/50-pre_backup_script.md0000644000621200062120000000012014172556763017726 0ustar 00000000000000pre_backup_script : Hook script launched before a base backup. Global/Server. barman-2.18/doc/barman.5.d/50-retention_policy.md0000644000621200062120000000121514172556763017623 0ustar 00000000000000retention_policy : Policy for retention of periodic backups and archive logs. If left empty, retention policies are not enforced. For redundancy based retention policy use "REDUNDANCY i" (where i is an integer > 0 and defines the number of backups to retain). For recovery window retention policy use "RECOVERY WINDOW OF i DAYS" or "RECOVERY WINDOW OF i WEEKS" or "RECOVERY WINDOW OF i MONTHS" where i is a positive integer representing, specifically, the number of days, weeks or months to retain your backups. For more detailed information, refer to the official documentation. Default value is empty. Global/Server. barman-2.18/doc/barman.5.d/50-custom_decompression_filter.md0000644000621200062120000000024214172556763022045 0ustar 00000000000000custom_decompression_filter : Customised decompression algorithm applied to compressed WAL files; this must match the compression algorithm. Global/Server. barman-2.18/doc/barman.5.d/50-wal_retention_policy.md0000644000621200062120000000020214172556763020461 0ustar 00000000000000wal_retention_policy : Policy for retention of archive logs (WAL files). Currently only "MAIN" is available. Global/Server. barman-2.18/doc/barman.5.d/50-last_backup_maximum_age.md0000644000621200062120000000072714172556763021105 0ustar 00000000000000last_backup_maximum_age : This option identifies a time frame that must contain the latest backup. If the latest backup is older than the time frame, barman check command will report an error to the user. If empty (default), latest backup is always considered valid. Syntax for this option is: "i (DAYS | WEEKS | MONTHS)" where i is a integer greater than zero, representing the number of days | weeks | months of the time frame. Global/Server. barman-2.18/doc/barman.5.d/50-log_file.md0000644000621200062120000000006414172556763016016 0ustar 00000000000000log_file : Location of Barman's log file. Global. barman-2.18/doc/barman.5.d/50-post_wal_delete_retry_script.md0000644000621200062120000000057314172556763022226 0ustar 00000000000000post_wal_delete_retry_script : Hook script launched after the deletion of a WAL file. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post delete scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. barman-2.18/doc/barman.5.d/50-last_backup_minimum_size.md0000644000621200062120000000104214172556763021310 0ustar 00000000000000last_backup_minimum_size : This option identifies lower limit to the acceptable size of the latest successful backup. If the latest backup is smaller than the specified size, barman check command will report an error to the user. If empty (default), latest backup is always considered valid. Syntax for this option is: "i (k|Ki|M|Mi|G|Gi|T|Ti)" where i is an integer greater than zero, with an optional SI or IEC suffix. k=kilo=1000, Ki=Kibi=1024 and so forth. Note that the suffix is case-sensitive. Global/Server. barman-2.18/doc/barman.5.d/80-see-also.md0000644000621200062120000000003214172556763015744 0ustar 00000000000000# SEE ALSO `barman` (1). barman-2.18/doc/barman.5.d/50-pre_recovery_retry_script.md0000644000621200062120000000062514172556763021556 0ustar 00000000000000pre_recovery_retry_script : Hook script launched before a recovery, after 'pre_recovery_script'. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the recover operation. Global/Server. barman-2.18/doc/barman.5.d/50-custom_compression_magic.md0000644000621200062120000000100014172556763021320 0ustar 00000000000000custom_compression_magic : Customised compression magic which is checked in the beginning of a WAL file to select the custom algorithm. If you are using a custom compression filter then setting this will prevent barman from applying the custom compression to WALs which have been pre-compressed with that compression. If you do not configure this then custom compression will still be applied but any pre-compressed WAL files will be compressed again during WAL archive. Global/Server. barman-2.18/doc/barman.5.d/50-post_backup_retry_script.md0000644000621200062120000000055214172556763021363 0ustar 00000000000000post_backup_retry_script : Hook script launched after a base backup. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post backup scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. barman-2.18/doc/barman.5.d/50-backup_directory.md0000644000621200062120000000012614172556763017566 0ustar 00000000000000backup_directory : Directory where backup data for a server will be placed. Server. barman-2.18/doc/barman.5.d/50-barman_lock_directory.md0000644000621200062120000000012314172556763020566 0ustar 00000000000000barman_lock_directory : Directory for locks. Default: `%(barman_home)s`. Global. barman-2.18/doc/barman.5.d/75-example.md0000644000621200062120000000137214172556763015703 0ustar 00000000000000# EXAMPLE Here is an example of configuration file: ``` [barman] ; Main directory barman_home = /var/lib/barman ; System user barman_user = barman ; Log location log_file = /var/log/barman/barman.log ; Default compression level ;compression = gzip ; Incremental backup reuse_backup = link ; 'main' PostgreSQL Server configuration [main] ; Human readable description description = "Main PostgreSQL Database" ; SSH options ssh_command = ssh postgres@pg ; PostgreSQL connection string conninfo = host=pg user=postgres ; PostgreSQL streaming connection string streaming_conninfo = host=pg user=postgres ; Minimum number of required backups (redundancy) minimum_redundancy = 1 ; Retention policy (based on redundancy) retention_policy = REDUNDANCY 2 ``` barman-2.18/doc/barman.5.d/50-streaming_archiver_name.md0000644000621200062120000000035514172556763021115 0ustar 00000000000000streaming_archiver_name : Identifier to be used as `application_name` by the `receive-wal` command. Only available with `pg_receivewal` (or `pg_receivexlog` >= 9.3). By default it is set to `barman_receive_wal`. Global/Server. barman-2.18/doc/barman.5.d/50-basebackup_retry_sleep.md0000644000621200062120000000030014172556763020744 0ustar 00000000000000basebackup_retry_sleep : Number of seconds of wait after a failed copy, before retrying Used during both backup and recovery operations. Positive integer, default 30. Global/Server. barman-2.18/doc/barman.5.d/50-check_timeout.md0000644000621200062120000000026314172556763017062 0ustar 00000000000000check_timeout : Maximum execution time, in seconds per server, for a barman check command. Set to 0 to disable the timeout. Positive integer, default 30. Global/Server. barman-2.18/doc/barman.5.d/50-streaming_backup_name.md0000644000621200062120000000033414172556763020554 0ustar 00000000000000streaming_backup_name : Identifier to be used as `application_name` by the `pg_basebackup` command. Only available with `pg_basebackup` >= 9.3. By default it is set to `barman_streaming_backup`. Global/Server. barman-2.18/doc/barman.5.d/50-last_wal_maximum_age.md0000644000621200062120000000055714172556763020424 0ustar 00000000000000last_wal_maximum_age : This option identifies a time frame that must contain the latest WAL file archived. If the latest WAL file is older than the time frame, barman check command will report an error to the user. If empty (default), the age of the WAL files is not checked. Syntax is the same as last_backup_maximum_age (above). Global/Server.barman-2.18/doc/barman.5.d/50-tablespace_bandwidth_limit.md0000644000621200062120000000041314172556763021561 0ustar 00000000000000tablespace_bandwidth_limit : This option allows you to specify a maximum transfer rate in kilobytes per second, by specifying a comma separated list of tablespaces (pairs TBNAME:BWLIMIT). A value of zero specifies no limit (default). Global/Server. barman-2.18/doc/barman.5.d/30-configuration-file-directory.md0000644000621200062120000000103114172556763022015 0ustar 00000000000000# CONFIGURATION FILE DIRECTORY Barman supports the inclusion of multiple configuration files, through the `configuration_files_directory` option. Included files must contain only server specifications, not global configurations. If the value of `configuration_files_directory` is a directory, Barman reads all files with `.conf` extension that exist in that folder. For example, if you set it to `/etc/barman.d`, you can specify your PostgreSQL servers placing each section in a separate `.conf` file inside the `/etc/barman.d` folder. barman-2.18/doc/barman.5.d/50-post_recovery_script.md0000644000621200062120000000016714172556763020531 0ustar 00000000000000post_recovery_script : Hook script launched after a recovery, after 'post_recovery_retry_script'. Global/Server. barman-2.18/doc/barman.5.d/50-active.md0000644000621200062120000000077114172556763015516 0ustar 00000000000000active : When set to `true` (default), the server is in full operational state. When set to `false`, the server can be used for diagnostics, but any operational command such as backup execution or WAL archiving is temporarily disabled. When adding a new server to Barman, we suggest setting active=false at first, making sure that barman check shows no problems, and only then activating the server. This will avoid spamming the Barman logs with errors during the initial setup. barman-2.18/doc/barman.5.d/50-errors_directory.md0000644000621200062120000000032514172556763017636 0ustar 00000000000000errors_directory : Directory that contains WAL files that contain an error; usually this is related to a conflict with an existing WAL file (e.g. a WAL file that has been archived after a streamed one). barman-2.18/doc/barman.5.d/50-pre_recovery_script.md0000644000621200062120000000011714172556763020325 0ustar 00000000000000pre_recovery_script : Hook script launched before a recovery. Global/Server. barman-2.18/doc/barman.5.d/50-path_prefix.md0000644000621200062120000000035114172556763016546 0ustar 00000000000000path_prefix : One or more absolute paths, separated by colon, where Barman looks for executable files. The paths specified in `path_prefix` are tried before the ones specified in `PATH` environment variable. Global/server. barman-2.18/doc/barman.5.d/50-post_wal_delete_script.md0000644000621200062120000000021314172556763020770 0ustar 00000000000000post_wal_delete_script : Hook script launched after the deletion of a WAL file, after 'post_wal)delete_retry_script'. Global/Server. barman-2.18/doc/barman.5.d/99-copying.md0000644000621200062120000000025614172556763015726 0ustar 00000000000000# COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman.5.d/50-streaming_wals_directory.md0000644000621200062120000000024314172556763021340 0ustar 00000000000000streaming_wals_directory : Directory where WAL files are streamed from the PostgreSQL server to Barman. Requires `streaming_archiver` to be enabled. Server. barman-2.18/doc/barman.5.d/50-network_compression.md0000644000621200062120000000036714172556763020356 0ustar 00000000000000network_compression : This option allows you to enable data compression for network transfers. If set to `false` (default), no compression is used. If set to `true`, compression is enabled, reducing network usage. Global/Server. barman-2.18/doc/barman.5.d/90-authors.md0000644000621200062120000000112114172556763015722 0ustar 00000000000000# AUTHORS Barman maintainers (in alphabetical order): * Abhijit Menon-Sen * Jane Threefoot * Michael Wallace Past contributors (in alphabetical order): * Anna Bellandi (QA/testing) * Britt Cole (documentation reviewer) * Carlo Ascani (developer) * Francesco Canovai (QA/testing) * Gabriele Bartolini (architect) * Gianni Ciolli (QA/testing) * Giulio Calacoci (developer) * Giuseppe Broccolo (developer) * Jonathan Battiato (QA/testing) * Leonardo Cecchi (developer) * Marco Nenciarini (project leader) * Niccolò Fei (QA/testing) * Rubens Souza (QA/testing) * Stefano Bianucci (developer) barman-2.18/doc/barman.5.d/50-post_delete_script.md0000644000621200062120000000020114172556763020122 0ustar 00000000000000post_delete_script : Hook script launched after the deletion of a backup, after 'post_delete_retry_script'. Global/Server. barman-2.18/doc/barman.5.d/45-options.md0000644000621200062120000000001214172556763015726 0ustar 00000000000000# OPTIONS barman-2.18/doc/barman.5.d/95-resources.md0000644000621200062120000000023114172556763016255 0ustar 00000000000000# RESOURCES * Homepage: * Documentation: * Professional support: barman-2.18/doc/barman.5.d/50-pre_delete_script.md0000644000621200062120000000013314172556763017727 0ustar 00000000000000pre_delete_script : Hook script launched before the deletion of a backup. Global/Server. barman-2.18/doc/barman.5.d/50-parallel_jobs.md0000644000621200062120000000033414172556763017047 0ustar 00000000000000parallel_jobs : This option controls how many parallel workers will copy files during a backup or recovery command. Default 1. Global/Server. For backup purposes, it works only when `backup_method` is `rsync`. barman-2.18/doc/barman.5.d/50-immediate_checkpoint.md0000644000621200062120000000071714172556763020410 0ustar 00000000000000immediate_checkpoint : This option allows you to control the way PostgreSQL handles checkpoint at the start of the backup. If set to `false` (default), the I/O workload for the checkpoint will be limited, according to the `checkpoint_completion_target` setting on the PostgreSQL server. If set to `true`, an immediate checkpoint will be requested, meaning that PostgreSQL will complete the checkpoint as soon as possible. Global/Server. barman-2.18/doc/barman.5.d/50-create_slot.md0000644000621200062120000000037214172556763016544 0ustar 00000000000000create_slot : When set to `auto` and `slot_name` is defined, Barman automatically attempts to create the replication slot if not present. When set to `manual` (default), the replication slot needs to be manually created. Global/Server. barman-2.18/doc/barman.5.d/50-barman_home.md0000644000621200062120000000007014172556763016503 0ustar 00000000000000barman_home : Main data directory for Barman. Global. barman-2.18/doc/barman.5.d/50-ssh_command.md0000644000621200062120000000013014172556763016523 0ustar 00000000000000ssh_command : Command used by Barman to login to the Postgres server via ssh. Server. barman-2.18/doc/barman.5.d/50-bandwidth_limit.md0000644000621200062120000000026014172556763017376 0ustar 00000000000000bandwidth_limit : This option allows you to specify a maximum transfer rate in kilobytes per second. A value of zero specifies no limit (default). Global/Server. barman-2.18/doc/barman.5.d/70-hook-scripts.md0000644000621200062120000000301114172556763016660 0ustar 00000000000000# HOOK SCRIPTS The script definition is passed to a shell and can return any exit code. The shell environment will contain the following variables: `BARMAN_CONFIGURATION` : configuration file used by barman `BARMAN_ERROR` : error message, if any (only for the 'post' phase) `BARMAN_PHASE` : 'pre' or 'post' `BARMAN_RETRY` : `1` if it is a _retry script_ (from 1.5.0), `0` if not `BARMAN_SERVER` : name of the server Backup scripts specific variables: `BARMAN_BACKUP_DIR` : backup destination directory `BARMAN_BACKUP_ID` : ID of the backup `BARMAN_PREVIOUS_ID` : ID of the previous backup (if present) `BARMAN_NEXT_ID` : ID of the next backup (if present) `BARMAN_STATUS` : status of the backup `BARMAN_VERSION` : version of Barman Archive scripts specific variables: `BARMAN_SEGMENT` : name of the WAL file `BARMAN_FILE` : full path of the WAL file `BARMAN_SIZE` : size of the WAL file `BARMAN_TIMESTAMP` : WAL file timestamp `BARMAN_COMPRESSION` : type of compression used for the WAL file Recovery scripts specific variables: `BARMAN_DESTINATION_DIRECTORY` : the directory where the new instance is recovered `BARMAN_TABLESPACES` : tablespace relocation map (JSON, if present) `BARMAN_REMOTE_COMMAND` : secure shell command used by the recovery (if present) `BARMAN_RECOVER_OPTIONS` : recovery additional options (JSON, if present) Only in case of retry hook scripts, the exit code of the script is checked by Barman. Output of hook scripts is simply written in the log file. barman-2.18/doc/barman.5.d/50-retention_policy_mode.md0000644000621200062120000000011714172556763020627 0ustar 00000000000000retention_policy_mode : Currently only "auto" is implemented. Global/Server. barman-2.18/doc/barman.5.d/05-name.md0000644000621200062120000000007314172556763015156 0ustar 00000000000000# NAME barman - Backup and Recovery Manager for PostgreSQL barman-2.18/doc/barman.5.d/25-configuration-file-syntax.md0000644000621200062120000000034514172556763021352 0ustar 00000000000000# CONFIGURATION FILE SYNTAX The Barman configuration file is a plain `INI` file. There is a general section called `[barman]` and a section `[servername]` for each server you want to backup. Rows starting with `;` are comments. barman-2.18/doc/barman.5.d/50-archiver_batch_size.md0000644000621200062120000000063314172556763020236 0ustar 00000000000000archiver_batch_size : This option allows you to activate batch processing of WAL files for the `archiver` process, by setting it to a value > 0. Otherwise, the traditional unlimited processing of the WAL queue is enabled. When batch processing is activated, the `archive-wal` process would limit itself to maximum `archiver_batch_size` WAL segments per single run. Integer. Global/Server. barman-2.18/doc/barman.5.d/50-post_delete_retry_script.md0000644000621200062120000000056514172556763021364 0ustar 00000000000000post_delete_retry_script : Hook script launched after the deletion of a backup. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post delete scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. barman-2.18/doc/barman.5.d/50-pre_archive_script.md0000644000621200062120000000015514172556763020112 0ustar 00000000000000pre_archive_script : Hook script launched before a WAL file is archived by maintenance. Global/Server. barman-2.18/doc/barman.5.d/50-pre_archive_retry_script.md0000644000621200062120000000067014172556763021341 0ustar 00000000000000pre_archive_retry_script : Hook script launched before a WAL file is archived by maintenance, after 'pre_archive_script'. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the WAL archiving operation. Global/Server. barman-2.18/doc/barman.5.d/15-description.md0000644000621200062120000000042014172556763016556 0ustar 00000000000000# DESCRIPTION Barman is an administration tool for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. Barman can perform remote backups of multiple servers in business critical environments and helps DBAs during the recovery phase. barman-2.18/doc/barman.5.d/50-reuse_backup.md0000644000621200062120000000074714172556763016716 0ustar 00000000000000reuse_backup : This option controls incremental backup support. Global/Server. Possible values are: * `off`: disabled (default); * `copy`: reuse the last available backup for a server and create a copy of the unchanged files (reduce backup time); * `link`: reuse the last available backup for a server and create a hard link of the unchanged files (reduce backup time and space). Requires operating system and file system support for hard links. barman-2.18/doc/barman.5.d/50-pre_delete_retry_script.md0000644000621200062120000000063514172556763021163 0ustar 00000000000000pre_delete_retry_script : Hook script launched before the deletion of a backup, after 'pre_delete_script'. Being this a _retry_ hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the backup deletion. Global/Server. barman-2.18/doc/barman.5.d/50-minimum_redundancy.md0000644000621200062120000000013314172556763020122 0ustar 00000000000000minimum_redundancy : Minimum number of backups to be retained. Default 0. Global/Server. barman-2.18/doc/barman.5.d/50-max_incoming_wals_queue.md0000644000621200062120000000037414172556763021144 0ustar 00000000000000max_incoming_wals_queue : Maximum number of WAL files in the incoming queue (in both streaming and archiving pools) that are allowed before barman check returns an error (that does not block backups). Global/Server. Default: None (disabled). barman-2.18/doc/barman.5.d/50-streaming_conninfo.md0000644000621200062120000000025614172556763020123 0ustar 00000000000000streaming_conninfo : Connection string used by Barman to connect to the Postgres server via streaming replication protocol. By default it is set to `conninfo`. Server. barman-2.18/doc/barman.5.d/50-archiver.md0000644000621200062120000000072014172556763016040 0ustar 00000000000000archiver : This option allows you to activate log file shipping through PostgreSQL's `archive_command` for a server. If set to `true` (default), Barman expects that continuous archiving for a server is in place and will activate checks as well as management (including compression) of WAL files that Postgres deposits in the *incoming* directory. Setting it to `false`, will disable standard continuous archiving for a server. Global/Server. barman-2.18/doc/barman.5.d/50-slot_name.md0000644000621200062120000000030714172556763016217 0ustar 00000000000000slot_name : Physical replication slot to be used by the `receive-wal` command when `streaming_archiver` is set to `on`. Requires PostgreSQL >= 9.4. Global/Server. Default: None (disabled). barman-2.18/doc/barman.5.d/50-backup_options.md0000644000621200062120000000205514172556763017260 0ustar 00000000000000backup_options : This option allows you to control the way Barman interacts with PostgreSQL for backups. It is a comma-separated list of values that accepts the following options: * `exclusive_backup` (default when `backup_method = rsync`): `barman backup` executes backup operations using the standard exclusive backup approach (technically through `pg_start_backup` and `pg_stop_backup`) * `concurrent_backup` (default when `backup_method = postgres`): if using PostgreSQL 9.2, 9.3, 9.4, and 9.5, Barman requires the `pgespresso` module to be installed on the PostgreSQL server and can be used to perform a backup from a standby server. Starting from PostgreSQL 9.6, Barman uses the new PostgreSQL API to perform backups from a standby server. * `external_configuration`: if present, any warning regarding external configuration files is suppressed during the execution of a backup. Note that `exclusive_backup` and `concurrent_backup` are mutually exclusive. Global/Server. barman-2.18/doc/images/0000755000621200062120000000000014172556766013115 5ustar 00000000000000barman-2.18/doc/images/barman-architecture-scenario1.png0000644000621200062120000053663214172556763021441 0ustar 00000000000000PNG  IHDRIsRGB pHYsgRiTXtXML:com.adobe.xmp 5 2 1 2@IDATxU "H7HH؝]u5vuٱ`"J HH4*s;|Sy:39y;  @ @ eͲ0@ @ @%΍@ @ @YI<+/+ @ @ p@ @ @JYyY @ @  s@ @ @@V@ʤ @ @ @{ @ @xV^V&@ @ @ @ @ 2)@ @ @@ @ @ $IA @ @8 @ @ d%𬼬L  @ @ @ @ @ + geeR @ @ =@ @ @YI<+/+ @ @ p@ @ @JYyY @ @  s@ @ @@V@ʤ @ @ @{ @ @xV^V&@ @ @ @ @ 2)@ @ @@ @ @ $IA @ @8 @ @ d%𬼬L  @ @ @ @ @ + geeR @ @ P @0XvYzYfټSRiSrYӬFqfܕͪ;6ʘGlQ֔*O@J0-/3ٿҥ`VL @w/39 %^f[f|q̩mfl}0<Xj?SҦ~rO'3.r;SWؤzysg&񯷄{μ9r 3{XuvSӷg]>kO;M>Hjvcv& 1db*v*a @@\xC! vz'ߛY6١vlX1CVad_&!k67OdJ_t?qzݿE5qލ,ǟNZa4lc-WoL3^b3 @ Y IO4~O\fo L3aޏfڟx)yqB/t[ۻC/$CeS #6oyF!G$0EUؾa-Ԩ0%J'R x$q{K "A\ލWdBu E&t%-#wcy\7ƅ;ywRӵi%[^ cv,RN\ 8%~iiTuU @ <`Y]7KLZS:iZq’ Tp#z+JaתnrIGV<(4km$O~a]pӟ'u[qv?۷L]цmw?z cUq/Vz&_e gRO{7Lo{䟷2SEq~԰ސ'Lk6s%kC/蚉l/ R"S1|N @@8 #pޱ5F['Dףk]UXi5/aJyKFݿv274 nua.EFC .At ;;X?>I)_;,I n]\9?z hN7Γ]SWZ|ڸ `Ul3lsmu-(꒮g^&خ~S֮;<^^*]+-/bUJS&ƉJ9 @ xbFԀ D\P:}h' (5yl:DZb]{+ @ Oe Pݸ9r,Ƨx~OWaV[[1o?m3ט>LB(i1I.eU[tЉ<3viʂ ;"ڃpo^$ukm݃gwkD"^rPh{={bVhF?x|],㯟Kߜө;̓mP^~pT$똫^GK~ʓ$٨iQڴ헤?,'M7o6>9 @;{tZ 8a3֘kI\Ώ[̱%#]=-(%;ӣDC0>"r/Jrj;rqQe @^xe d1-0%^ԥ"y*z 8v5}Fs_^-yu|tSd=b{(CS)҂<:YY$8r".sڟzk9 r^F%80nYYj{<秳'<C<ߪF!S>zY׵tIgo\d{z s}w(u8c ׄj O^azQDj§Kg5g W^T嶎k-(:yzN>{/Rrs? b{Y:敡 W ~3֮af~O~qPJ (>|/κkORQek>C @~:.4? @ prca=n!sdWIH~F#GʯqrqKFF=^:ڴ'4k/5u/KBO]-Dp N?[/dN:Vv"U[hLS敍^|)iio~}3l(2y @1 @!pbԄ)9bT׮wLuܟ1wKEqi}M+-e)*xxG'<%cnҺYh(NR/dɪMEOLWȓ:Dy ߾k7͖M;-ڕʚ^8y(/zbY^o]Rj;TV{QlEJ*Hҽk=Ƴo6Km5>MI᤿ŷ!eJoQeoNA @8@/NWB ^q%yF٧I>rZ*IWtv5x k A @@'ݬϏ@ B`ُ3)'BՊC"e ACG:wq @(./.WyB|z qf=^ྡྷ]ߏ^vCvT> @ @(\%@w߬b:^kkZթ @ @@Q ^c !-H8 j/135,4 @ @ =cz(f͚e>C{b8{ jSjսA@xW/:@a&PNs!26@xIcǎfs>_~9}9B@~hҤ={v~tE(MfZhQ!@%YZ@&i& O7UT)0}&P !#pi.]Xt @w57n46/ cb  ?gNň7g1L PXnݺF[ @ ; G>#3sLQL8 6lݺz[n F`۶m^իԩSQ @@#^.   )oޔ.]:eƌO>l$Ϛ͛7۩s1[n8M PvmߢEW_}e7nlXL8C@ H): -_|經%JXߣpZyCزe%ܹO@cO?MLB @aB  &D<_7onʖ-5LyJ^uUQ@(4h`?pvZ3oK/T⦷~ۊt9Xb݃夓N2ڵgm۶5{*Yw}9]|~}qKHu|n0SNA~gqǛ}7+V> " 5jժK.Œ?st Y.%-؇ @~*_R@@p _yJM5)DO?SV{{9\%`:;VDƏ]}y{_!~DF 4rH_Fuҹ'O}$+ċ{@ Nd؉. ^ rHBKz5jIvIz"I/j)7@:U. @@lP@H&p]hhUυ#ѱ|su%Uy2E)؟~;|ߚu_JDqٟ^Tssӟ%~댃>_}@ @ I@  =r F-K< .qyESYAk]I2`w,?NNTU?/X/n|7Ò+m^&!K|y܇C=4"ZL2TJe|U߱\:<}=F;isbE{W^ywssӟ࿑C @ fC  #7_Csrma;?w([aK䩪,S.(Mk5v/Fb%(ZEaYtkQ /,7alRR/o`C @{ e  _3a)?)^pp7ZQS\š`ũo߾QSn,-^ORɳ]1;9e<_vM–+ 2% ]/k֬YSsRh#FD ͛76=ny W{ɒ%C @I@O @ o O=׭[7ja.nB stRۤId/zk֬ "/XB`4vS@BO>h efBaU }ګ^`~JdF^{m&D {$_u#.cX†IqLtݖxq3R^߿O;J×_~ރ*B~(uf/_nY)s_@ ' O$8i:PJxcc5CmȻ駟^򞔀j*[4lІdyU;lq(YySN _$27IOyK\t" HKy;Xc;̀ļz^?1cK:tpy-C|;YȊ+JѢK."GuTIoOIy7"opy#rvjct;qXDtM}]'^Suǹnꅍ~}-r){ 7~uб| @ q f͚Et%/Z4D Ǐ1Z rJ”BqhL<9">ʋ%Ց;(BdGF.E%dK 0a+ʛWL1(!s;jS~}n:;'Ŵ KUww8Pz)|;vheo.i <]TO/R=p@{/ ќ]jp:,sE^Č;LzvGӽOeՑ1F @ }; @' qωqPJOJ#V%/s FVs\xm}EE\/]2mM 9kгgOSzuX$$|_q9Y7}1e˖1 yJ "JJ(o>#_'oJ 7c+ԩS'/r[T]K.Ĵj*~nR9]w_ͥ^jwU,𫯾:.WŜꪫr,=jy+#=jR@ @ }z{ZTR"FեKΥ2;^z)ώ*štyF+܃~QbWxb@²¼O&S> \dz;u$ʋ RAG޺]ɉw r'F< *UXA;Q; Y}VP!c35>˖-{vNGP}}'zI(v@}'fĉwqeRE9 6yN&@#BǫΜ!b kŸH/pY\reKX_)>%N2%2D.Vu$Ϝ9Z/o`$v b+NiӦMfʕQ-[' @ (@<*T0{e˖E幃K. @ @x'o]t1 B @ͫj P??aÆV!@( %b@Ϟ=Mʕ3YӰaC+zq-x衇^zY!;n%>Ӧ~F_5jȨ}څec~wy4k~W#ѲAlٲm۶6LlvYq]})sm&> r.W_}+|9lAv^9:+VGy$>fժU)Ce@.{FsN#O^?9ԩSয়~2Mf_|<Ν;ѣG~g6:u2~}PEK—zi@ @A@͛7rV^mn̟?ߊa%f /Мzfǎa"y/6kǨaI,u4i$;>}I<}'3k?̮޵k+n۶-+[<δ3ۏ% 2u'8  A<|p͚5?Y3 F0Zdf1ւ;ϔ*U*.䒴?5 @ zW]u%*S%\aXڲeOI%7qDӢEWscKݻټys*q% ZEҥK06{^0?nܸ5vش$@w͒>#؈>o[B'6l`Qd<ˇLլSBsi嘓⃓ @@~Pdm6z7l0)t]X%Kڰ%,9, 6+馛̙c! a!ࡐ$2e< kX֭[L5@ "&bus=fԨQvq{`׿r4Q @OZ1ekF_p9 j _|1eN8 Mзnjf)H0 8L0!mW^D7xî7}?G%pk%qnmL(U|yiuI (;9=?>*4G>}lniTF҂0?{ލțWzБb2Zk׮~pjx+W6ċ & @@"]h6/^~P6]w]y)bŊ9IP?mx$OtbqI6w#[-駟jժE.b/к?IV=zc#G(WL+G~Aff/ȱtb&G @xfdf=ܔǴ>؅bS> @(/-AwԩS'fk6@@իW\Cu,/0cEC˔)Sen 2'|r)-L}G):x۶mFOn^uϕƲ)O?Sdo=t ҇~hfΜڱkH,L)6{aO~iӦFB7~W2@xv\Y4h 4es1D9 ;GaLVtjq$Žos&M֭[$@ $^jذah$o-j9tP3uT+z`M)xq9r光*[d_l”2aX3zP?1L @GiZj,^80ԸqHy9{F ` @@~8v[0OFNqyW_to (LOƛ;dR,T)3a( wK_ (c@ P$@,ńbM-v/WB%LOk&m}^m @9 Z=P/W\Tց"a¸(g }5ح[7sW"X=}K}„ 7mڴ ׯ4iyw ! ¼!@8Hdg:E|K.;}ҺbŊuj֬ɓ׏>ydB$ @!_9f'5zSZѣGO4w}6lٺu+tjX @(6~G+t}eGu^z$ʔ)}SbEKd&Mzzr-^~}T_:СC`Fǎ# }j\tE*vQG}4G2L-'|2q&l:r%ѣm^zElw +͗_~iM @Ř!Pw}F \{Fs*ͅ^j""1\qB9餓g2𴒺W^^Qs` DO?,YĬY(̌<'ό<?ps[/F @QNy*q|L"u>}BqQ^҇vX"~m?9w^ǹa Z7jǟFa4^9W x_K]vI1·z\r%QږXLBOª IyJr qnmdNh$=eG/G_{Ϟ=Mƍg3 @K<y{o2؇5jX#L|XҧW\qE[oz^D~ ȓ[13-Zd=GFi?UQ _u-cZJ2>M}d@@Q' Y kY?#|„ ,/7g}ESOY%zL{Q`#)Kŋ':r@?Ӯ]3'3QWHx92*7Q$qu &-+N6ɱBjnt+SCr2'JzGaTtS&mty$j+νs9\ޖsQ;=+g ذ9K߷o_3o޼^v( @ =[YH@ / K@3_ʑǃbH֮]N!9$#YBlڴ)̋s;0Bӟ̐!CqdH\yL6 z7*I]΁ @P'ꪫlL$AI[PJ2CwSƳ]2\xR?bAdΕ,)y+JmRvnTJ @@r ͞܈G-~ԨQ7K,,nw^deˡHO/]!u2(eE y*f}Z,3wygjP1C--*ZY:b![w,τk+[GPʓJ»<e˖YfQ @($TvW|SڅS S(ɭ$Kv1F-[fá| -z1`'H9 @ I@*UyD"Flnڴa>[bтALrdgӱ<ѣ`8%UzYp!Xm{A"_mSyL?_K 'ˊQ)-$p @#y|?4_~-'ZIVWn<-JqTd B~O /0 #Ot^J $OjiH &rKwuٳg*ELƫz rpFOӧOٳg[/sDZWƍ8,?P!  @YC@/r+p x8!6_|ObkA^s^ӯ_`Q?,oΊ8k֬HsqΝ7<4*wzˇ'#G؁ֹ GY37pgT<A!@"1cyʹiӬM,9h{ǚk׮m{d7ˑ%Y]^ܵjղ(ǍgÜklUʔ)>"xVLN( **$N?)&]Dp\x̠As|v 39L%7Ȼ[$E.Ox'^ [## a*¡H-#1}6oDI "l #<\}ն-xE9 @@rfOnŧl^>tP#;Y3rIvݭ/0[\~iDQ lwy+ld+kLT*OcSD_L7M 2幭2h~WK$/v/1\ױk׮ր֋ 8! @ 9e'7%7nҤi߾q0H<\++٭/Kmo=s$J|pRKI'd8N l&H$]?9}s1gq^z2S 2T(VoocʋD+P^={%}.0VXiW'CZ^$˗/7˖-^"2QCSe˖cǎsϵ7 @ rPor\]+{6Y]P·TٸR";9^y2\^Dx^:}sռE^OJb@YKY{iX<26%3C!CqYoyFе(Z3ק Me? ȆU+Vhlժ]_ ҝfs.]}hfyv/doܸI0ա j2$|XK Fyl~ՉWL2`cZo+^27nlxӺuk+ xP@ y/Xz|v_v0]<_x޾}{SB`ɫxL/vsL.!\?yׯ_`! d𬺜L& D {ʕfӦM֣$`d%xvⷿDi򰲰<xu㕩x2}B)}] @@ {S4~W_5 w3dHqBȱHu卯 rdQq=  P/HkWeI\߿]0Gd \2pꩧŻ 3LU+AIT깭ߕm2ՉW 6TB+w}z0'c_~֐  @ { vݨ .ХV HQ իݝXɜ:OTx+K4&wj!CXvl޽{ʲP P KcoW^yzHO&ړ1'ފOeP;OX`|cՉ΍WV nՍWvʕ'gZrԨQS˰X/&QH={ژ4O=y @lAY' }IN֭M6]c<3/4TۍW_Sʝ.!|ѢEfvO6Tlwe]fzeZ- @0@/W1%0o<2|bӽuXB/2R, +sD╇s+XI3Yf͛7'pARre}ViwB K@_ * /`Dq%PPo֬i߾}$RVMT'dƪo WUO*.("" .喙YYN3ʹ75ʩfڜ2'M3+MM\IMEPdQAܚ}ϗw{|{>yN8'Hʡ໓Ȳf_DM;&al4:da7= 13 Ch.X M<H:~iIԩS}е,Z{%1lzFK!fe0hhbOO8|(kf;8éN < ʪu֊#m0 C0 C`C?Ry;w_-H"eZK"ؠ}z@k}iFhrPv)OHRڵk}yس;v_~eudp7 ChNXxs6l.Pӛ*iӦyNjzy!N5q z959OkyBo*Y}L\8Fˢa7i$=y+o#GݺucV]|E|w缨7nWzZ!`@c#`FF?Θ1qҜP135X2ɮ 8cФth1Z<٢t<~O]I0'GuVAN1/,xq# {,7 C0 ChInv0/*3'x13p@oLWKϓNOָ̭,=_GtE /.)R{}̘1nĉއ'H?s9۱!`!X7{ KZlp9o<_RvEQ|@8iC&X7'm?666<=6vyYNIB\;l N9/0(Y#;/1k0 ChJ엨)7۵cCE pX.9yd0-deR+.YB c]k2ꤨéD fn(bOz=ц |8$N@|dP#={(ښ!`!`@!k.ᳱ?O_$M p{#%L+4?+v]f2nݼE;{>84߽W{ K$;6 Chl,7{sW\q(GsE#q(Å,-1ƼPdP}ѢEGEb_G {!s0 C0 C _}ϯÿ+3#^x/'<05>'k\dBh΢3ϒ-rMȒNSO=}w2@?{׮]}9>17 C0 7f#YSLO(Qԁ&'Lۑ ԓrƴ.9c4ׅ^T6OS14c|©1H,-H,:4Sa;1ElJ9xߝs>Ć4[xxO,qm/S-M_3:z>Z6Ϣan_VeN>Ci"ߝ(|秝vZ榦Et!`!P ^jM+d,XytӦMY$d߿M9q%CX6@dXY|i"#}OlL˶PV΋ޢv&-;;u+SrlٲQ/{:.:"g}gqji؏c1Z]ѳ5>iy;4}{_eŞO 5OV٘.6dYG<4… } cApCO7x?ϻ$c}y8{M}s1Nj?#Ho膀!`!`c7x{Gܚ5k|x pR.r}d'?ҪhysΣ՝ǧUc2Bcu۷o~ҥK݊+sI` epgZB@ori/BB}BcqآOƤg\^Z07o>?;"2zqCf y.CSD^gJfM3MK4KM~݇| C7m,MGX _Zi22yz-wE >;}Y+رc5pc0w{}U=CU(#=E/AΥcip@yxyL bnذ=n8ioCQ"5$&?L\~ʊ\][vSȆ6 $1pmqoEN'p;S]Ν+Gc|ĉ+,E_a~!cʖ˲-y9Z\0W'Y}LVqq=υ{9Yk׮<{>sA@9s>r ᄏ= kŖ=>!yT96 A2G d7sL~giԄ%\O귮~|b-Fh1 ~g^z{1<M67:^L(z_ȇr{b@Dk7'M6믿#e˖I6+A֭[W~pc41Z<=e'n^dY;ӓ+)Y(,q-œ5.1z-mLt>.ƒa=4>|w^6ʔpI`a'/V%%kߩ|:-I'KWv87sNE|:ߔG1QϛZ'fsX+P~|tA?>eS]YKkYiJ6͖"vOB'%d`ᨱ3=ey^|Efȑ>Vp!`!v@ߝ\G}+W,܊@VZ}z(SH$%+ڵkW+UʡZYyEN3ߔo% t~'hS0"X<#@@:m>w:qNjfV?&GYZOsyrmr,TzM![f8G}~O{'Rc]y&;,,{8+dpD/Xg,aGޑQ!`!`#o*{?}> ¦}9Cy(/ZcEmls3ZOsKF÷fzVHj}&lw̒nݺ7C0 C0)&wg8Xy\$ 6 |0@8|O=k\hyeb/K@IDAT?% ۇ`ҥK|H:|" /`HdVH[>HOPg%C/ivnwN=T>yvAq/tfxΝ (尃tl;q'1X%^*8G{ՋFJ-/ꮺ}S]Ags$L6ԽF糣+v0҆uuoV7k,GɓիWG3o5@Ov={őnyNXˇo5;4tn*Cl7ĦdW gSqc^8ldeT9oCpqnm0 C0 %'n_vbҤI>k׮™7a)Pw'\|r~2EG(+"4PVӐ/KϲxLw'YDVne.$~O?ӬlpdV|"g}?[BpkuC@{7OI[uNMυ8`@V^dfw[{ quxR{ [T\Cⴥ[9{=+*U݇~һw_4} 3$;d_SV_}d1ƹ'5<X'PHPq޼yXp{ϛq=lpI-PBth13)l2] uynzl"_">8<;Q,f5h7oϦȑTN" {Je݅0sgk(\+ݸW%-6})ϻ>rzjUԋu^ڗ;v>qS-cpx|ʵ=݀'^arNOOw/S 㻷u;t_xѭظgE~3;=+NA?NNȔ曽:g>3Sqe\]cWthz&Y}lS==uL=Ew(+ZNѵǚcڂ|)?{l9+yy:0Z/4rtO?+|F9ι5C0 C0ywy馛| cE>|w2iI*w dY/˸з e5 OHqyee2';4xciݡ,]cg}VSbs=+z)vZw7zK.q^x_m,|W./xqzH^ýF V?^W;}nR*gڷڳƲ,eI]pA^x@1e}!`&~_0s6do}[>8H gZƏ{R?E}w.fĈnԨQ<dj? >qMXEqiicBӲ OHqѥ1eZoP,4̜S]b['yZ~{ߝU j*N_(ý/~1K JICoM\?c9亥g\gu=;쇆Lέp;v&_լO2': ]-\v\l8~Б%׿H0yㅣI ki^}-[e 2-dQ#uu{:ɨܖ$g~<ߌe[~6-ݐ^5Uw%iq0{- Dnçv *<=|-[a}w=+ _?(uͲ?G%-=x7ߍOp\r}45u wx%s_^F_;MN񩚳]۸a8Uv 1w}sfg~Sރ3g~1>u?0GȲpӣݡnw[7s:^z o&?/+=_ qi^&@ pg S 'ۃ}Arzױgt TڸZ3C?8 ā`s޼Sț]c@S+෮v )?<ҧf!]l MqyqXe˖^ _j_>̃9т!`!!9}ɒ%~szw|A)|wWKw$n`ﻇAz|+.6uMr/sRQ"D|wV7@Okg*^Q#~gcbEgrm<r/xC<+NO6mԭC+k_#O#uYa:o~rܝolw{O\}wjI_:CZxz4'oMsg] T17~;ϭKGN$ӼCZP>:-:>i55%MwD&~x|Jūw&BM|s3A?K?~>SG#L;N2 wyV #Ҋ e5=FC_HG|Zoli[YZ>+Ͳ';MNhi6zOENhZ>F?$M74=2xEf(V5 :pY}k_k[ C0 C( ~SZb/ 797tP4<i&) Uɇ9a[mkr}pN s,NZ\(/2uƊxU&XP+}Hg,+~==dT'w7?.t\o/7p)v.]s>~mu{$ɀٛ??&)rdӦ,X+MYw ;* qS߫2*e`Sԟ9/\V+}rNcM/~Ndsx7w^iII6IVKu6y9 罎l$zNO' .施T Zڢa{!7oPP-_g<,N:$_OA0k$1 lhyXݐ(:P.yȣuh+C.e\OYb_6љ3g9/cƎ SއlxiӦZ8 ɳf!`@s@ 7_w)zWj^|ũ;z˳)2u4GpN\wH״zl(i19hDGh8ѡqϊ ddrSW^眲>erD׿K/THW3FϹdI35HlC^r|$q]z=0SK7yIiNJ2LnЖ&uJ\di+g:%eDs?Fm?[9W9OaM 8<,:/n.]W2Pۻ?ލ9gYfNcƌ5.'[7 dVߚ!`!`Me!p|˳e8ݩܽ{w_o9-xw\ai."O(iyt22f?ƧiEHЕGO'cyi b#bt|lP3jȐ!w <;;8t/`sGxV(߿< \%@\-ŵq? 8=ciq5|l&L6b/^P5NO_ŕ`E| X_=o:9: ^N t#\XJ[ƮxG'[yy%z^i۱kOOѷ By :rx'tqO~,q;k]Zk^ݗ/PM@_דR,E}"Z"`ſI%d/#lGIKgj{<Xbc?HQ}Eb6_nWmgvǎ~$/q(]M-zI=;uڵF8|1{:/;4]6f!`@5$g&;A"/sHTT!~ 'Aղʆu5)דe!3Ϧ~gSzWs_{LoVlߣ@?c>;w>=sAWZΎkKqcsoʵɵsO &u34-rW}wl2#Ȑ. [{J氃֭dLCϠ݂$+F0d F?(_F-lC0Yw{Ndݩejm@dIؖЧ>oXFojg?{'%#me[%O=9v@Z'~]^ٺbLtɗoeo|,7EX,ܼy/kx,u^ 袋IP˘֛&^MKK92dEgV/y6 dzlvC"6?XjyqDX"Iܹsݲe| 2D7nAsQGyg9MlLf;1Y!`!PWw׿{}/lGIoѣG1S`zFM<3d5=ԫii6OO(XEgL6fSˆ65=LVt~&Qy̙~%ԬwѣO\EG@ :B޺Yn\* =--}3@RUĩU# M'E{V.: =rV$z̍o/o| 3ݤ5?ҫIC0GY"msR[.kJ1%Vön۞2 9vwZ; mpNś 4:d{>~Huo ?v0^lf3ݷ>4$cCQi|֚ 7-j}֬Y~k"۷7\H_;98v4s-Go*Yp̗Vt^i124qdh!1 zv@'Ov#Jt6#v\8sWlwCzO37֬cէv &a&ހ pj~S+hY%(%A`_~>Zla;v?eo*Y/s)23v2zem,Y,Fd$N qRy;vX܉g C0 ChQ}6keEOY EZ]|;k2~lC2 }ל_暵ƒ mƮgNw|mW$Xo˱?ۆuɵqrLOl9oYhPy|&IMl]~㘷J<%[W\4^NÏ.cOOJvOU%>ii3tZ;vsw+$>5E=oeNQkOQBdN(s=vsUS*ngV2jr5J#툶KN!7o",Aʝfgywl878 dAuϑK:D?Wh\Sˆ864;/*[e\髕C.i2'A>#N-nEٔ9$pjFdl>|Yc)|f\3decI?4MH6sl}h G7ۿ_F1d_mn2=:y}K15$$WZ'c7(Q=k{:8hO2=vЮ]O)$@:&_?ZUO6qilM sIIɒG9RJFs?rkyȞVL4xrUEɓaCw7Nw[e_ 5Fʿl~i%בQ;-\{$crI'cb(+Ne<ϓP;[T^֑'[#olQXҲ1zHrʣ^[cef,'M- Щu{}0 C0 ԩSݝwn\ʵ=}{NQ71`7<ΓPȆސ^_x'[)K橋l:6˸4$ӛY}@[V1µil3Ir.}3:O$M^㤏5d?{q;w,f~AwΰnGwWlr 'v=8ӿc?wO}#->_CL?pJ#sZ+o'%@>dwNj~{?<ʽZMۧp{lk2|5߹ldyMV+HrL3 NO؊ oOiG>HR|;li"mn]c;&Y]eqg]HE$V^”rpOv]u.c8e\vdzC0 C0J#Bi7?Oݼy ";~ ;AD|w&;2 'œ5sLҺR6Nv<ѝuZߝgQWVZ}[z{]6?cwgn\pÇOkI 7n\"H~r>|x _>\^##?iu]sW^yT2+)3E ^6JsV;=n~퍿IkjRq%I ];ml|ɦ6cvnԱNJ 8;E2ZZͤtʖ$y[" ݩee#Hߒ߽L{*5x R7\7&ޱQAWp)_,Ym)yҷKk׮e͋,Y=7_8nؾcѧs6tnCVJceMvV]wS7)z@"hZoxܐvcc4E-sZw\?z8rL<"siБj9O>_6i&??JPZcƌYd|k͚!`!А,5\}w|y|~ V]ven>%tM1 i2.|!]n=8٨Z|4uSo0Coտfͯ^e[rC|l'"/^J6,-XOecuGuǰ54i~VXHx[soũ֡CZos#:hȔk;ᱶ-J[F6SYxZ|M㜖œ5^#-FO+|dp?S+pnz`]V AoOV%=!`!P{nrkJ7ʽi)d!ӤԉI4=q^LVW^vZwY٬k-;gŶZPVˉ<Ɋz2)OH{ƌ~Ƹ>cMe3acc4=G;6 A`]LU;ܻGu:NvAw'uSs-se^dȒ92m47|tid0SRdSZ26V4-gSH7O^sk]eDȗ9ey~?/à ˁ |{f7/mLח,vb!v/:828k=КJ>@Koٳ0`A̵7}i!`/G/~ >}FYBʝv~lڦ혿]į>lYYW}ɢȼŶ͓>?mLy}1PߝzϫW X7oݻw/iN92!:y1Gg_Ӯ+𺠇BgV_;}/ֹ7aSJ] ع!P Fy^xя~}|AN G>w }Z c [1z&6wk9ž2kPVӐc<ِݔa5{p9'wyIфzC0 C0:/j";uI `˴`"/.ChXtn~SdI u[_IJ!9wlnm\Êo0ٰ35oG,Lu 6|5JDPo3@@ʝ< ȸh1Zl&M >M\dNbzZla =뚸w,Y7ʡ ;f'^} 'tC!`!Pd~ack5)u2qDq_!hyitƲZb4SFP>.2i-O.FVGӲ7\/O{РA>r*+~V|1^ C` m eC  ҔC׬Y&O?sK(Q_j@lFvm1dCb[u/*^CPF.s.bWsBcwx|r_'L6AoѢ#1ziuMgc!`!F՚ |Snq*"~ *oӧd!w.*m-#[\ݼDyGu"'KZf&eOX*b|{'b'ϳ%x]dczf!B懝X3gt]ww< K@" dKQ [!3C:/)w"+6O{$-濤`Z^C=LɆvC)* /-ԡ5M1&i)c&cv7mw}}!Y# v )oJ#M1\0f!`@5PdَsE&+bz‡zؔ)n rS\;/hyć>}\%}}OcC0[ú}6a)弮fzpr1zbKږ>ukdlܸO=A7/hz%Ǐ6mreyGo1C0 C0@`Æ t~2oK|",.B/?_MBvk Z/cy5=Y}PwE.ϒyt^s稏׼1;%NXu9f]!6>|2d8۶m#bSV [J@8h@D1 =]1Pwl(<ҝ5Z25pN \z_@K-ZK%O)uBm۶^#}ּdE!`!` qe[l$!HdVQpCJ4BCc鳮);FC_=fY[Iic!]Ӓmq`}4o/yg@ <}1K! m7Ml4jqN[% u/7mF9f%M|;8Mi15g!`;%#دZó ^w٧zʱ+&O'}oCͲx}[km(؎مضǜo ~o޼'p%>=ªbwVsf]OC^8Z. 俪l[j{f͋mGV.K! n7݀۹L-an7?#W_Uw8׷KwCڛBEVh[2)Ôdh {ܳ;U'Eqnݚ-Y_eRotK_z&wdCև礱1 -1kWl|1n& F{"g}1,^ zz}o15{ H.Jj)㤔PVc4.Vi{T[ǯsGwl)C NsIY98I:妧`6Lpv2O]?27?ae6l6G{Ĵz!` P^c4.Rӵ\H#FN8rb?ץb49ر!`!`I&)Sﴕ$ӟ~S@c_擈d\1ytx>ΥjzCW^l(  >ZSh7F_/Zȯ8^p/UV&ා=گJhժO~5}h;Qzlh } \k~7ݍ?nqeC+fֽ&~wWƺqbs`^owz}3}gVeeH?{ҥIF6A;؅^-N"_MTo~UsUp.$Ё_vqQ~Yv]wb="سgOd 颣H4t+tcc5-M+nv5Mxd5>톶fN<ܑa@=Iz'[;ߣԳ%ziỵH eb4ZWx,҇t;7 C0 7lSfnX{+m@IDATo! 1YMCg̮Б +fOd5=MC_Ha_DNxB2.:CgZ>45ϕT`)ޔ*T y$ۛzG/iB=1yPW+'o~~CsGv|%uY8 D;s>8/q=/tWz{;g%ӯz~R伓rv'Rl틞ݯfq>T2; )߭s+z(œO=HwV'X_q{ ޲8t[]Ҋ??&)%rd給,[zjcp$e]~p"w}B[͗ӿcޥ= Ca۲e@bV#ȈM6i86|臎RhP6)l\k]o6:)uB@PR+`73&;wh?쫝C]|M0 C0[n+>>썂M4>e|o2Y:B, q2,Bo D{gL/٨ߝ$/۷-יW;4]eǚvٹ!P LH~ʩp/tKSWT/u'WuJY~laROtҳ£& J;'7S,$jedo|ZeOnn sGts];~t>mf_ûO֮ZQ\7^0Ny)1~zK>v@'|=Lr7I[5`'.( ƗyC#/oǍHpd>9MKў<22vd\fc"kݡ^MC,=&11ZlfYls| YNdLog6:tq7oi[:}ͯ"ر!`!`QlZ'gQfbpiȉB_JӠZlzIe5 =k;MNhѳxlk\o=͞ ]M}wwh;!M-z6b܉wopVWɫ۽2PF$]ڊ;!^M|n}uVSEiME{{r0Vl|,)ÇcpO?6;6"l}&τ~ fcqX8rJ&궵kΗEav4f^7ײliy-˱ZF,cyuCZYEk}5-:e MsҥW^>{e˖Ug5q4޴1m!`!`Y+=쳾\ٵ,duyd*E!HO}< o8?- -ϖײ˼вu[F6YYVGuv.9FJW %wYI7*;cA\[V.?mL۰cC`_F`]jglJꐃ|yʀ:ƶA)'ۻVFvԑR#6i┥xSb|>6}wFz6=)7w\9޵uݎ-H8oYpN:/WcAn|prpvxOPC%Qp+j0WYt򢗾#Exn9t.v׺CYAzK<vO?/8nZInݴ2JFF۔c#[o!`8> 6o23x@8Y ^Y\B߮/Sl(bRd/r2VF^˖[Z^Ƥ bOP{wW͛Wn}zx>ٳ3fݹZh]U#mԥڱ!XtlsH-S붾t^᭓^GaJ6$*eB:~DK歠?<>sz.ה'ƫ)0̾q܃sֹ?\&'uPV>? +{yƟ A&>0a?fGoyCemڑF8$iӴ|L/eqhyM솲2.1z|ɦj qM̟3fVtg~Sr>νB,xb-V8%5۹!`!`g cV;vO^Gw'+wŊ>ɀ:>%। -47s%7VUV5D>'WrL y5^#-E^l -f;FC^9&{ٲe|r 裏{F\H˜y^rh"c1ZL @~6)Ǩ\)@}cݺ=xcGVnY+sMݘ[Ҷ|׵.2' Eq*;0~%s{6ӿ##̚Iiߵ(ɞ%>2o}%xpKwlj 9fH%[d١, onh\odpuƔ2m; OV4C̢E}-d|SzgĈ~$fd(:e^n.mڴqgu#+̙3JtLC+S/1,z8.sp~ex؇rC!гƵ #r8V"kv(iHK/g?2IڵkoMÇN:R'YPw8dC[y|=g>Mr_}bƗJ7ud&NZ‘Ltt[Ք2nf}I5AT/Dг}v{vg^}W*:wT[)gpZWxFg/DlZ\}eS\bC"2Z6YSEB,lDXBV+06v.vM6f!`?e|ǧ.3+l$[Sn՜¥eӾY|GP!el<WZC]y:d?O&<ԃ}}C9L!ITᙏ(؄{--Q2ʇb@sB&໧Ev+vf,X2Ixbuj{l9wX~AwΰnЃtd_:UΛ`?3?~G=dO޽FMfsyk8!2( ss^N |ak* v۔U2F&I{mnҼ ʱC3wtfGoh$YƗCa)&+e-ZM$n)'} 'Y9/#C>&9ּXk=y6C!f#m,d,MYfL^gEj6 "#}lژ},ٺuڹ!`!`_' Jh@_ߍ@&>;{W'$'/ ȡ[r^Fņ>/cZ9l4lhۺaݻ=B)Vͳ nJzwOKZ6bic&6e"_[_ 7 ϋi޷;%|w$G7jgS[{F?wqEC[}#DɤI|Fٽ7)eY8I4,?ʡi=kZ]N/"xO8.:ؼb4izhW ,Zڼpy'}7K&yڵ;3|oZ8OWWEwlh C0 C(~l,;I:G>}{ǼN {%{ ;>uH/s9 2tl('Y]4MvΝwяB}I^ C˚!`!`="M,{1DCf/>G%e7k<&Ftgҙ/rExxƋϓCZx.'ЍNK^Ă}Я_?MV3b6dϒ햞=|?csve߾Q{]έܰއ;ylҵ/y&nwo۬iװq[e׻sk70mIc5V Nҫno;}-ő˜{aEI&;\#ZR{tl +ZDxOn^s #cM ~!No. *D&x`O<~[l5r `W;2rs̝>lچQW%,5JP'8Wdd-,;g_WZ9m.:5ر!`!`!n|(?NO*N6:ďco8\H_R,XDFYȉO/*Xl oW++r/f;E:<yvc&{e+B1{2X_zb;O䪜5{TK'3VۥτxFmA=O%&RS'{2癛 >]ZwDCyI0d4SB6Ld %Q7X&īq(DVQv "稏z7oy|rEOo(#d~Yך6^z|\ژ|uٹ!`!`"/O.{L`2. ~Ӊ/o}Ҳ:W+9AɳjYNf-|<͘1RUrO`"}Pm+Uh#M6m\ȓw^N+%]XmZV}r,z}!0T01%/NjJ2Xlj Doذ=ӾĉƇӚ1Zyb-ML!&z-c<ِ^f)Sܒ%K|AqVL0ge-ԫirœ5.rEx4>Γ'`dž!`!`7M㣑m6>#aER }ICVtf5 :eZ6O{£dBGg"'t-hy!]I|lNw~,g`# cڇgYjCΧ1t]o80!PXlrM8=zpQ 4)qq~SQ6,t@4=FCwHO'cZolX9kZ޼B٘MhZw(pyA#k׮wwjH-lgM; D"Qf-dĎ{2}퓤t&2d&鎓LNNNgڱNNb[VdmJHq)")Hp~,-TOzݺU{^X'_<*Oǎ#8# &{HYȚAp-[T(p>"×/_{SSSx3o1o_?;?#A|KuGZ|h]~ ' +a^JH؊+Ӕ"J yi2#HI ɭ_ZY˫7NX'Of(bMŒfڵ@X`AX8F䓬tK?'ϓ%ɰU$OLJkpGpkϜ߃CN)3|͛drKw7L|2=,n7[>7.byf,+g̸?^\V7OVθ-K5z!p_yNʝwX%m2yɾY~q<6YVe|[—%YzuqO{~w&~V&HorsRadqn®o#p} :U5K>.`Rر#N & i2 DžhxK~'ɳVyje6~~VQUuGpGp8R 6mZ~{4᫯;~ƍa};S-恴!듔YJi>y4}Cԍeȓl|3glmY[\"͓y}de"߉Xƿ9!LyA[>3]J6 +C8CwS@ÛpXl}uj%e{{{;~G-7Iryt˕/3d[r.X~lU2"Mn:!jɂ'=;e,>c=P FۛW&' !"/^5b9 sX\8#8@?<qqcQpGN['3*k3cڜ8q"7|g+O ʶ-kr0>W%7gm,0|;יZJE3-:12ؒ66%coG8mVR۰aXq?m8@#tHZ/>Љ!?8D 7"9Qw}7GJyEϾ=oH>O_8f>SAkzp=ΗFֆxbYNXqaxܳ~Y&,O)Xien1<!48f8mK:9ۜ78ٮZK;+㘒Slj}+k;"pk`qɓ+YT Z~駅|w,x7g9(sf eq;}cy=6lFi# 9>.И| fyH ҔAVZtS`:?;#8#0wh=Q8hڃxZ3, w'_5o}>O90i`2xJbG| ^3~47pS|JAJI{~\ YdvLH28;a7'8ܘO<|M8Ӝwn6m8DC I9&*K 2HDɑ#GB.< ӦM Ns)L $_ΗaRcu':"!zԩS $=T0SyXYnz6.e6jѯFt뎀#8#\O% &6RyGUMoqnX?ٳru<- ?aa޼y!]!CWɹe5zcZ?Tk=ӱE509Jvsƣ}-1mv-jK۱͟Yiu)m)ِ8נ̖/1V]՜_{@ qoذ!DC!=Pl拃-2{=^˓vc+Wkvi&ٸqc/z^OǟEy|3 o8,'ϓlϓa'O^Ȯ?Ͼr!sh㗑9NfpxӇҜ\WVGZmf)͙;iSq:p*GcW%~aYbEHA~ݻwapRGF^#g&qruͶŲ4ʓvDyp㛕I3v-+D×hmE}y2WdYݼu#8#8+}ն4|饗B>\0kXvWƺIXZm}˖-`! yI[wOWhIB5O'KI;.ϓy-im56*2{@n}pxAphg3V.|lVn;#\ٵrl3#04pи,PKrUC! W͈5dCf_ti_Fibi,ֵ2֯T8-G:+ă!rJ>}zx/Ѭqorue]֢_ne#8#\/݉'(eܹκ4puh'!i?.C|㶴>Y+VV?'Yҟo;iVkF~+9ݓsJ7=JtV:<*2/k[maJ>ooA6V7YG\Fݎ;;̙Ҏ)/3]JG@C:e}UkiD4ή]B$8dW!|9`PΗfR昧Ǘ8?0NovΝo ЭJa+w,ѯF'mZ n-c9#8#p$HơC4m4x:p8ᷬS>8Ta-vR Ώ<aͳ e˖dlC\V_ixz-խFH`vp'7n@,L6*9[sd8.c7vL?ӵ:r6J9]Gw׎[$[lٲeɍwڵo7a%QDs=l_vnavsKʭ_ZYk4&3qXC Wyc=Fxx ;-o8k}/Yd6N^jeӵQ4GpGࣁ@y`!C7 ܝYp7~CR9%y\˓vccdIWw'aέO:Lڊ!O6Xy|\;v؏J֋'q C@0>Opd#uvs3ninjG=kgȬɩ8Wl4@|AC*[h")ᄚQPa/HsN[,3`tiv>!gud֭!RHw"y]M/y͓%-GEv˙wwl12vnL׎gv[1M;f~iuls~0p6Bׇ//vMSC#ͯD\@>qC>!,×9WrLb\lLJ~"# |!o3 D̟??DT+0o̴stb'ZBlZɭ#8#"_<[mFx-ܝ6RМD}'.ss:wk֖ko͛ιcdžs;Bs9ɲaiim?.Z[{@V[Bε8vV2nMk3Y5e8Y/BΉEX$'<4`X?9^,/,-c3O2k~f#,nQ48#8@E"=l%eEc`oշYN5dʬCJ;3pwsxo69$蟔Ӗiφ78xC.|oέdYEke癬X~\q]zcǔ%}(v.%YsKckp6B'tZ!]d?6&b<,_\ȟo߾PJ b;D[48+9"yD|37W%W\)V DtL<4ɹ}d+Ƕ"jEz̻F8ǏrcB i ڵK6l]wӉǍLjtq\$Oֆ.N̏u3hv+Jxʕ/O7V/ҭE^n9Ud?|HHnv8^b]++і:s;8)蹺}G_޿x5:|C񃷜/24BgΜ\:::B$ K!S|Ϟ=!/_sss9g<ٳ! '>;Q/?$rĢ^-E1.crc쪁rEGpGH"% q4P$Wc~Ï4$!NgޘıL7kWsy8; YҜ0w~|ajRN޵-GwqT@7I=v~ksGsFh/3eȤ !^D:ͶAy->h^8sυ#8_@y9,1pK9be)G7i+>ϓaHV#/ұ9+/o$u͛[YZ>eY[$zJ>EǶ8#8@0cK_`AH h5䦱^c59;o,oY`Wbǿ9ARF7ܝߤ=azÛ.IŘ6aڭ̓W++gH@O^+y̽Hrh"Cs>+G"!te2ˁ6'pzv* kKS/Jlc˼y @}aٲefPMa,cȏ j" :i)hKnXIy! | 9BFԍ~ G.8Z뺎#8M> +mF"&Xg%k?Y&%啒XZ豳0zj#qPoڴIXiKt6sΙ9xNB9_~98!H@H>m6szl2O'mdՋy<<6YyErɱpGpJ0>ce g}Mlًm}Zmh[&74^۷4Rx3'!>qw9zhxyKO|ў,d6qbd=Vɭ#8@9X!2)$g182:&B9dclpM^?Ңh&[oC=$A6ib2 ;K~{Eؙ-Y/'Vj-ϓ獛̶4{imfrE}̖#8#Tݸ*D27^s Wc.sJ( f<N |ޓ'|2<[.nҖIO:;iccǎ`Z! >!oUt蟢sV^cYwxvD.9!#]҈2q 4ainQMl GOA_TH 6/wrNOɩޗK#ꤡi7YFS=mKK<Z[ia6-ҵEr#8##0 1dNd3DHĎJ~iEģZfB! NG|8‰~lj}ɰ$U!6660уCnD@Y(ȳوY=OF"I+tkim.ҵ1(N,˞z8#8D^ ?8IBj;q-7Yw.^۩Dgýqpx9n3Ol  kmذ!X@8V ܝ+8wy2aWiYm|/>.gad2e2E&jG27aކdEƴ .̋7C:OJۡrx.9g Nf( DZnώjG'+Gncx8#T;En ED"CHCɣnLz( -8-9Q3arxCx5HsD}qJ&ȹ $8zsƘrsk8#8~5Rxqr8[xSw(#jXqLb#,6' L"䄇 wyNp9|ҤI!xZb8qO?qH/ de2ei?QFh7ojװDz}aL<],^Qg۽]+fI}ӄ%5Gp ~]\'gmgJpcbIDEF }Mܲ?mf. _Dy!G8+ h_fMxm6N.\(<@ Ґ-(sEcu3FCGipG̐977K۱yje[ԇV߶yEȶɽtGpG?ԅ wt}#,yyq#ƯvԶNj!7MHχw8^8@'EƧ?knY>y2lSN%+Y{lI's.٠k%]`Μ9o^ݚ $<)c γԴiSyhܴ&Yzm>6lԍn75 6{jSz=̑uzpVf瞧o}tGp< e֖2 1$~GUk?Ϯx0XhQ qDc1O8"51$ d>)#d;yri6VE6NZY[4;V׭_\dzYvpGp>:'8EZ[x8bN`p6 \1j_4Zgٶvؼ9uTYb]6pwҤڵKp;\3.v,aUa(Mdsӛrh&3mn7ͼΓȩԪcE#8!АUH3=sicSmԳZHpjgnD$e^%j=,i9Ӝ5"k1.crMclpq܏GpG(B8qv9Ñىm2YGeŚ;}AϽ{SOˎ;BG^/u_PӻGaϏ+P/k̛=^Mڎr\l}J^zXN5hk;-{_ߩra{߽: rO,5%ʻJv{"ֿZ]|#8w-dݢG(![( rc7Nx`(TKұS}D&mBjDjŋkyxz~ {E1-]$kΛGpGoIc wD-U͆m8I>=yC*CQ8V,V9[yxz~ {I9)ONY&O/7?)i8Uɷ[hM@SO%É1\,Eԏ& 7e:f]}fHٱ?)7Ȯl09y^/<ܶV3#EK",M$zEN}GZf-3ƕ-yW$j/o8#QA߲3ٟ_qTk6+tuANmȮVtE$}sPDˬ_><$,Y$Ѷs̚SV{X^d#Knnt>igZS~x#8#8 П"6~kG8a="VoE]w)-N |6[Ecd`S'Ou?#mn]Hn ; ?-on}g^xAtIF ǝ]r~t9WLsKoOFHJrKcHiU/˖Ow Go=#.٦g{.?e6NӢLP45;o(+M3ʈR^F1doQ"yl뎀#8!pCb?.B" ͚:l#,1uH5%pM@`A͚OEs`xl^$ z,Ҥ̣hʹimN^]֢_qkՏ-GpGVKҼhNj"i 61RpJ8iBضtm+a{ rƷv\H[ت'8rlCzYJ^8wZN"7޼Br҅.[o??5 8.#Ci5{%COl?. ݥ ÛyqOti9!E1gAN)]=޷?!/k S>|d eؚg4}:TxYwpGp@ S*wxҾ!8&ʃх؞!ľg-CSg\MN;N{ZJʲvrqn͓Yf2tWI+k5i_Zkѵ{8#8@DJ^w6s07v-ᴳq^fmƍC0Ό34iRVYv5:iWci9gtpRpuz)>|Xو>/4[ڪ[';vz3?k]I#Gdޓr aLO":l:Rv>-.\:OzF{e?Oa7dK2et2l+>eHpr'j03 /Gp>:O %^d86r1$ iu4Y;$Fv;'<Ǐz!;a k6,[H7O'cZ覝g-~ǎ#8#Tom~ x;G/|9>[';pY \Z_sCm!μƨx СCan)S-wHly'wo+nEW; F|ߕՅ-C=LԶٵ -֬<%yn̟,cdv=I.{~UN9%1Cp~Rck<:u>=rNO?_-S&[Qeu%Mn9pɓaH~yp/GppxY0]N|_Fj:Nn2w 'Of381"1ID eŒEDcǃYg8~qNc>rMy5Չ:/GFD]z5}m*W_<.}{T7jŋ![Fc]šv Po;e=5]zz;lߖ'zͺ獚֥9|`^jc7GpG ; c#6CH4N^ҡ=!Yt]J#i!Pvk_-[~Yzb0[{h%[:x])lĉe 63Ci;p7aq++mݙOV/9&|خM*KxHX<2'/~Y^ MA?n\s[!/ dKmk}m_=Q>Bǩ vǭ?D|`wY"Gjw{*}}wKv_0G2yCO_~VN+M]W~Q ]8#pRDi%ԉ0Mun2HidA(qvBy]HfO۲l}m\;27Wb?K/=]NNN[['[HCC]G3)(old8袠,.6\ȸƉ>۶m _!X-0eYh\Rnf̛*kfYvGpG"/[ps8xqwc=F̾!CNH}Ƥ_r3v;N1Y-eѸN֖f~i{2q,p~=rD~'Yz;OcQofԩC#Q4ngd 4W(+./fI~p:ï89\:"»tD0zz:4;̞=K>Oh]sL5O-E6{MFj'kY$OcGpG wseYd/T1BqHB9kyl(SLS:v {D*㈅?Hѷ)K,5nE_'|28X`1ފ0V^뎀#8#T<#kU_åp=} 26sl|Ҹ<, #[4, O-F/,ҭV{Qvoe# Q67ʓ?zRF*s~#OU`X\zx~X§۫^}SG5'Upm@4yR;E]SMW&ӦM I2sb9vTs˸ً[vy`qGp@UvU!v0%bK/RM;;y6L p#!VE|hi_k\M}v׿<ҥK8Nc7W/:U9k"^ 5D{s5au~峟lHE2}m̝k=H!<[^Ry[:59^FQuG tOp;w;S>ɜ9sg9mbU^Ҏ;#8#@߀noN`eqgxcYI?xeyԱio2^|5d3ﴹUgO#SNy7ᗟL|rkwwִ'.U㜎#/6j_k +o+uvKp9۩k Na{NbέK#?8O2xεB:"7o*_}SxrBc8iqs˅2eRySc3{#8#p k;g3"m&3gwLdێ.u;]sC!אsVn,իWbBDS|_ `vkXEsr'DtLHi?ZM/=`ِs='6lF~YA^'j9kwI W j{BsL4.:rmO*/oܭK: J!dlCLR &ܩC$}'_җC3<?]ww߄j;2Wf8#8#O Z0n|npy:۱8mݍّN ^C0 ~e6K^i{~~_q-]A'ԏ:-Or2RWHW'}[٬|Dh#~[n峂{crC[+קuu%h@:Q'Fߐ__ 9M&ʦuoH}8~;g-#8@>@:.9Dz/gJ DBO ]ed:8S.cIB~(siɒc5V^ {g?? wq{'ټlzl}X?zJͥW`/*4&7m<.8'iʔtQL"1DO:5\KJ<>"Wl'3JƞG|z2V̘ +OSes{- eʦr|Y$!dK "-cGͭU 䮛7,.w_ gg!G I8Nn1Ȓ}pGpD 4=vx4|@vlvrCp}wNyFsP Nqxxqch,3j7`ttthrn]QF7O1긽b2}N|I&j ]+$. ?_zoiH -ԭD6ݤ*a(i+`Œ;RtzyC9s>Gp%9Pb'6@#49xOkԷTAn]Jz0X޺H#m'˘ɳJc8#  `1 #ՐJ: DxX("7y>)L5%Q% a"K*#GY4~AV0C>b̚rF^TƋ3Ϯ7:=j aarNGN-{ˏ^{GZ9U7}uُɋKwӜ j 0hc}z{xKn˖- yпo{d4MÕYWk\8#8Fy<8SN 61]0792;:嶶RPrtΓ#)l#/95rANc MW,'!࿗vw99{J-YPoUW]ԡbLqX2LrOuvE놫:unsQzovwJGt',r٬z2^aA5%qK 6>s`~`po_K%Z)Ə"c7HoT]D6DwqGp@o2.8qx! %"Gf%:};nE͸fk'uQ&⛨空@M+xU[3rhJЗ)]+3.9K4zN#vtʞCm1YiGpwhꔿ]Ac'χ (/6l\l۩HVXl9s]_|K>%w P駟H'd4ː[v9#8#P E|#ivq@w7N┱-g cT'`Ӂ\@T826oe|ڲjٍ֞egAT_FHcs5Թ F;u\NԴwɱ=oKrQcIø)et9!sz,bGo]ٶm[MEҢO&lsNc)NkP/8dޔ17ˤ& 8rt56}9JnE/sD %?A3Sdv9tRJNpuhkTx:q1g&.i;&?aM)S> M-2M#돼VR_ Լ#8#Ї;J_EFX!٥G?!<6szC9&Dc'NG8kNZ\֗z[ZE;v_uYdK#(eVz^hgY&ܺP:?^'~fO"1}dC۲ of-aqIi<n'-8zŦ9̑4Yך[4xќ n!kv/O%<}\MӜ13W,Ȣ~Li<]|?.ֻc2u?uU?o<0C< hA6YIG}l]x~z4:>-Q|ʷCw0sÖ;5 F}^8IirY >L#^ucgaޫ/t#/|>Lnh8c47m = ̓epGwWֵZ}52igE|YBb;19CJmX WHD^^ٸWF^Um4w#Co>Ϯ{ncu+}u^9Q5L5i$2AͿIvuɣB~u~?Ɏ;ݩncғAqBItv(׽xH橶J5qAKpњ+>яkU0G/=k0;砏t~dWGpF @I[Zq!ZDw4i;g]k6#F G`ӈ5%}Çhp_@c^ܓ知oKL?K[.D̐ g)95 Ơ7̝(,';nBxQg]nM{dҸF\Zubyg]OHsKZ*w!$lۏuvlKGpGpYe%ccn;S8c; 7^o<[ܿHـyT%qHl8MI:Ξʐ*pmۺY6nج@8؆/j%C~M#8OPzrh .cևLT(NjJٗ\ ^Tc'W^CBe-uJ*c~8# P)Ħ$m%LE4I:%qS a 7#HC݈5mRN~}ɑ#GB<8:yf2m˓,Mͤ?Y`t髕v^iZ!}|by3׷+ylrFSrկ<<_|~T!98';eb6" 67F1Ns! 1ii5t,Ŝ3ip->L\ 4Ln}cଦ7$ǏaFODtpGBsg!3ڳȀM"츒6$HDz@-?KJ:4a avQ7Œ#|pj%"ϸOH.Ӧ)P\s^>>-zɰ_$/wpGp>qmv8Qpl;<ۢy9Wv8sJxvbZpni%ܝ(t`"ls7F9~p糖vLLfΕSd5[Oyu$;,o#eTthZA^6MtXd!OU9 6̱_vrCzL,vl Yr+E=?vGp@os#fĘ6[uR96g%ȠIaШ/PذenJ\<üW\w脜s+YFH XS'4sO{~` 8/o#O}xbd46\Gm#8@#~:FЬ@Ҷq,OVfDuAd$ g5"mN{c6;P)'{؝0aB_Ċ˚sQOV"y.pŀ 7in=֭N35W邔;;u"}nSg;u;eѬIrͭruQ{䔞oѼJNvOX损/O"y޸.sGpGHrYel;Dz%ްMN86hBpvsdSn%u `a ó kE9-X[.a7ꛨ7NXw3|ٹu:uڊL9[f f̕S\6ݾe٦a}}X!/.ESS//tyޘ[8v.kKYxsrK|ltS:}rJ>ܹ3/gҨI??6_v]xREGp4jƥYPYY"PX\H dRK % BMd NkvFщ_v# fFyesܸqPC*"(ȓ.<ȩvBrES5uHluX:t>X3B#8v8&/l͐;̖'Y~,- כ{ym}d)/GpG(jxirJ嬳b,lfCF6lȆC6Ȭ/2P&y87T*' v3]$_Vd; &{WY9~BF7w*ן(3 ofJ]"mt}ʢR7^d2lF?d4ևHpMVq_Olu܏)mN}2iVz.b:= 35\{TSJ̼G +}n5oVVfGpGH"I_qrv 26:k,; `[ig&5xќcn}r]=׽iZ\ gd;{?3Һx;{RS&Uyo;AMms~_!q.}AO/s&/̾. Q?7*ΟL}K?OȠ~?7>VlqT|'_OݫgÌƇw앁AkZˣ??m.d#Ĝ,D&d1}hs< %bюvLf(J6lJ[|p~slDcԺ0i2^FɱSdLYxKIym^Y`8s0{T>eF9<ޱ'*٪f':#8Gj9I:m>o6;vVܴwN}hǦ͍9}a JeN5zVZq´tt,(yYҫ @:rD?є %i|!-z]Kfvۉg5Ht](ݚ.%B"'jڔiFHF_Puu@yRǻpBZӓ6 f:yF7/'NJc-Ղ@| ٴ6+ʬvTi[f?>fN1}mI٩n]eaa~NJoFoFIe~>Al_YlGC_8#<;؞576q=EZ! ԤK$ D}ڑܜi|-v7[NYdy2ʙdAҍp2po?ؒ/}0},4S!.>s|dFrCm]Og˲yd;tGgpǶQIc#Xy<ErKGpGp'8Sw4Lrws~g;<]?0:]]W) q^j?uٹAd 1~_6gL4#)wU l>Z>47 px<>Q>u,9yKfMP~;(i}ڸY0YRev.6uiK6]'+zFj7`|^igx'd魿[O m"{]4長/u;{t},{.P\'k;o=B4+>Fql_;͛T~8ԶOzwYu}Ct+w_sȳL~y${^i 9n9!DyCOkonQќEX;±(F/PR.y&m&1O%/LmV:O☨-cԡ~Ij^SgCwe|s4{,3U-OPB|Iy݃aő,|W1|pr_2ou\pndwRBL3ٙ;%.u\1Y;˖/ҹn^l{ԧW3{U[WZSG^3> [ޏӕ?efLݥQoBl݀:{.$ #X h:|P&yAۛ=]s$x46j7O_/nӃF8 ݍ1[TN}鼩Cѩh!VOW7Вug+om83).>eņ4->m(51P!T΁Ny(Q{e^/t9uL 05`jG6#/Ǘ{z2 @ UPh+mw9Ww4~G]ٞmVݖ F取4YnQ8ҏh y Vq2A2k ( 'u%aߞÛ|㜃jgv%1KT%S&=rZqDQz&~W;Wk^}t?sK|%_a{/\8BJ73]_h@)c@?"y^Or : y|y돀?XwII G${ms/%[1={`A#A0* +ȏ MGx(5?5O~a>6$LLJf -_ҶTZQ#V`|_6/{455^3JSbZs)77Wh: aǫIQːU.9#m1g-SX$H#UƔ#*٥4 A`*nض'تuǷL\S]UOA kme;#eOl,R΄3d71pW͎0Qzkguw03\{-eyhGŗn4g{;^^t']w:Hij)ky56 ~%ZG?->mH_&NgK羼YÃb7ҝOltSuTוKu?UŋYP/^&`~lʱu|}|- )n +2TQ:QRѰjTLuc07}%"^m!!!2yKcz;OGՇqJ^/x(!!Ax;w<޽c4л~ڜA+1oo)4iHhatYh^tʨժm4%I2aK6׿`ۋ*6ڲdRrl(4,Ex7vH˩ ߖ-`B?epo{b55`jԀS ЎiDtT&p4)GvxjK}?8[6siv駳_#`nБ# #egҮ2Jey Ӎzl9r< p@ˊjE\.(36_@HX\#2xk5 {QGe :s|u[)vcuǎ^)g;<9{4t!-`p X*@]O\bYW(9` OBgmržjoΔmk\k]pd>ҞmĜ1-`RQ=d:ٱ3J1q|мXF Z-;&=zmБ'ӊ2vځR@@xdh:Oju 2kÂEJ<oƳL5f^P'^:zkH{i*u}nڳ<:X#p%G~lYL6I4)[K M3G_r~z7$y=٣,Ds+Sw1^Jrĺ^Sbғao%k\vw9O"M?Z4;jmڻBx~XѨHY걤J2ZC"E]~}=c#ݺ@N!__Ƀe|LD8>G8 O)'Ӱh,G0K6Yf} $?@3| [MUހqv4s[s fnÐF 13FΦy?K z{O6tN;' GvE 0i@ ]3:6~`@ wkخdʼn&>Q fh  '# ^N W at"-m~I-1xonо8D-קj7c!hf19[1 Ln/F*g:XYq9[Wϑ?ޒ\#x>NbX\3j=|9Pt=RFf Q{Myh'l^ni]h€Y{ݥ6^uơ'  >OHД܋D6iN&.f5p& ҋ>z:?h.;U~;v&yyz].|CEFZa;t⍿ xC4XG K:ۻG{!;*>Za(88t dY[fcLߟyCL[.oܶ&% a[ΔFO! 7 i͒D`\{c[eթUj}0X_^ >VqƢxo̶i=uIm6Oip.kx¢Ɋ+ܥ-{6zC3-4F;pD{jwN 6Uڢ7'ʧ ?W5П#[Ǟ mo~R.]qa>vMv`KR> ɵUce^$G# hED=pf8J3;6aBnnp{ *)hpb{rKnW΁+WRK!Q2v1ńwz5az ԦX_@+{uT*qoIkmo m##zwrF'?}b/5u 1`-aMr_BIc`|*ɴ]k!xOj ~2־.xe#x3ߢ:d?UYfOQf1/=/_}Ns9?޺3uW6mӃ]ȞgDž,TSc̩YLGW&~to1:8?wnAZN\H&D;+Wn*e- ?8h5Z9xzif22298hC/#oGSm\5w00mZ?9>[UX""; N0ݎ50/;A7Wz*0~!?UxSsS 9>4o TXX( $ ܝNvlz k~k!-5M n->OCzOY/ڝJ+[_LIC2Grܭ7S9-/#U@0$D61 ?a2pxprРZA[Ѓ *P?2K*A;^0و[jׁzv| i2Bx GO{vb95Z1""#d/jg*jsӿHjL , 45[GH* 粬V"n8JLB8)\WZqt^L#"I:S,s[gv>2.12.z׷ЍLr=Z" p:9m_o[)+@YbTM|z^&p}2K|D,^l}pm\A^~WCcN4Ak-{vW'Yz).tt5`GWvG7Q[ܹvlgvC8PcSRR"ăeq3v߶n۞sx|4"$lh%^,S&dudXJ,Yy^V]KuQ 1m^!Пz`31|iiILLߑջZg qo͓AJ`Co]̚]Qb>E櫻_"ymzj}[⧫w{\]quu]RLL 0A{*H3(R!j=dG.v p Qޝdk^ձiyg]Sa%1('<KRFb8h\Gh!. F[sQy1 c+(-z;?l pf`s, 8/|S7/w$/~PeM T/Ngq^Hj H(?I8J첼sfs|A#ApUs9z'w6#񋭱zY `O,~.&#aNˆl{jkFxoG1F*Zgj-8SLXnp{y:zZ6ĥhF0"MD̓N: ㌾[`L 8liLit7鈔i4y.Go}Eᆌen{r6(*$Ο|~.3ؖE鹛몆 fAƀ.+@@_24ocǼ2yh oH9,^;̑SFЀq":0Xf:m\Q+z #L[.aX"5T쵴߸~g:̧WF&RxP,i.ۺH ^ ;!*$'>"sH|xa"ّJ[ }?M_g p}uyCzMFտ<( gQDPl1#j5ݣCthCA| ߥ;2Hkv}#80o ;Kwt8)2 NI0=5` z(R)"EݣQآ-9lvE[#%{.z65I`O V˟7&w'kbQG?dҐH9vL~%ln~J^,l{GIvm3ҺᲃE7X'iOs^`6ׂe0ӹoPE{ }! ;fePzFgچ*;5UM9LGW&~t~h7&YmɕF4nMŏ >0]11јFeq9X}<`߱cr=DŽ 5Dml^ضw97 {)IXA~Cx8ݟeg0XUJ#SWL(o^Gn#'2M}VНG˗L{::txa7nguWaշs*fcDمoi1먐$DʷrEv5[8x= ,';^̭LWo]KNj}y( 7nHw>]f8I|ݞ4o@g}m]xFKgxO8/~18[/YFo޾|ĜvM)=%Y׷ W3%{ˏǮB.76Ջ-CPџMڐL>tMxӨ"ߢkQ/:L~,rWZw,6 ~%{/.<^W Qo.(_sN ]Sz5;t `Wzd=jLբ<94_}.:zC.-e/T)A a!s;"z\G6DZdcm%e!08vǧZΡ##=2lryTy{ ?yC DEPYE :5Pf+$DVw2ĩnA[>vvjt S$R Fo `CE(7BpdOsP-)-Hbm< AfO=zB %w5 ܋GNch '?޸m_*?,by}$y~nfZ樊S)YWoFm-O*;Z97iMgYAG=qqN-|j^1TpھT3 آ|t5ѸZ ۥ Kl7ȄDeS^Q%/h`c."6n*V5<˽Z7IjB溞b *((g:T}2Nr xj)dBtwYeͯL )oU xêO{?<VQ# vѭL_s dv&j@u9:\|KPn ~ݏ[>"xXUe;#wawawx`Q'b/nhZstx(*l4=g8ZX]?k^a3׼&^>rm:hz=ƒ}ۛr{W1?%]*>WپhೃߎteG0H4p)谳AkYM\veސ~- lw}BÙ2GsvT1.۫ ˟=ePUY)F $6dٗCYe<'X = ue?d*tOZh">-juh`x?{^ om̃SA p3!yLRTv=𷀞5<9P\ ߿"*$=x'{y⹩S=TԹ7έ=X[} 9"A?A]~>ط>@C9tijn׾GO{й" ȏV>+G= v w$T J "c-@~S\n;9XtcƔ ;}N;LXܲV.K YEwmLIݴ2[?ܯHs_O?nP/o=jK8P<_OdKOy d/%1owՓ^[X&n7ig+7rGmɔ? ^%0aHgZu{s߸q# @oyా3yFm=mOR@oиAQsW__:)y HbڟY_.jdm f&-ShÆ "f̘1"24mյ,850m2P :Al kjͼq[#xܰVb'9%ٯ/bz貏ʢdJ* $Gm&_=mh潅=Y>gw28ԛmڻBxXHY5;5~w)̣5"!Q?iᬧu%[㙖D)}iU/ST&J;\v3@/)R,?S\7'ڨ|xsenOjJE!s(+ީ>iTTI{ʗ(~=g?gty;g[vɺ*x8b&pFm0[&ˣ=j#y=,/Σ(ٿ޾4ixZaZ|#ChtD ;b ֍Yv~N^Syr3Nt~P-k5L->7,ꁇٳsDLǖ;j?L yP[u F5\ί=e+sNg3\]@34y*ie :K[)6<'K_ߡ7o=vŗ|ݥ@S'8 ;d_y6맬QWzaXَ®}1-æ$:PE;% k/%@t?y %ļM0%6g7ղ3 iQv?a<|?4s^\E{oM\V{>W*)wf/Ik]om|ba[DpWjfsAu 70 5;ACi„ yH΢ R#zA8I`Ue(2&M&Prl(o)//RSSO>bˬs=}zYLFe<.es aGq֝itږxZr&N_w!;z{xU˄:x @ .T(+qD "=*" wҀ1:6q#-W2@MU"ු`j\(ە^|E/F.f%T3fkiU(X/:.ζg=<XAxoEp.yO$;ֺ=FaCL Xk{/fF,[ s⟥E44ly읩QR *0X8FeF?IYsGhE3IM7&!lwOoqցe}6"sCp3+QrzcY-Z80<-J:׳r!-~ܚOY5r_Asusi'~=Zjε&̥eC̘2Oo?қ&N7<»*J /z2*+2\޹\=7x#z(:_z+V2;ur,ɻ)~mv3ѭ̈sTS݂]x_wvyoP`}33"ƍFOvq%?iĶP_2xWM6my8̞xhg+=b޽$_XO$40g}NUgĽLGKGk`sܶ7϶5Z Jjc&|WlK*VoApGk׮\/FM2la?_ 7MٝTB}mm3FC֞, noHϡ {`' KY{}ecN-ObA_oO9&B闟W4rHi=7W띹cǷ侳ADz4'"x:rwAv\GOk;@rW8U]i324j<'*ؓ_|rwErŻHUՖlt7nF+sxL p~xF?27FRy0FE(3L[mlK۫W^~AT?s)>u_FdYgE/[E|z"f#D=?85Йj8zsroz)?`k28 2m I9Ým-*  ӎl$ hf:8vJPp M2uT`9?x3 뀅Ig'`z;sy{j[3k#ʤ3kUu W5lƙsg88&ʽx7a=os@8 wP~ T(F颓8FwPYu3Bخ{SKGx7a~Kv%Plq.3\%J\ܸ?a%| ~WH<>o(1-9xkt˹ #(Sb􇽝|&`¹+fqS+ĉOiLCٖYv$WUo\-G=z4-[6oL0=c)-o9 ?Peo VYŁ*‚8o*-]+8s_n8DCc>i]~޲Wخ /pߎmIQ91~Ab!C(00P}Cζ'df{ 6Tc0HwrPDxOwV!8(i` 7_^g2|We|{I?&:e/sv>dj7MG" ɕ9{^*-4jB4{d/Q{2\/e@GM5X]Ai*NPXx&٣.pQnA~z )Ȭ8a4` EqqdrO;mKFFeFk7jg?K*@IDATTUo\k+ߓ_lzҟ^j8ڏ;i>ZϞPSƠ/WKv9jGeZzw@8J$ $Y(g$ƫyN2s{!Kh!&Ջw{6dIh晖j%ib{x2 kTP.2p @R2y\#8<Ώ$9/׋6R/(׏s[_٦3)ƍ@@Il_>E%lDeaɲʥ'l.I|ڛ} ޶O AؙvfY[ x[%Yƍޝ^g _Р:OznuVs CkvϛĆP-ӹ+ouXk` zm$F1D߾/fTyE "{rG+WR5kǷwյv4i`=oA#Stp`C =LO玿B.qi[-d `K '`hToP|B~c_7bY iD+&鞜 b䃁򰇇KBD?19T_h.w>Ȝ/Xt=RKsxt&Sm{ngx0Aq26l> =n>ֵͱbwZݹΎOӀ" w/Qpx ]xyvv*d;=4V Z}ʼnƭUl@(pN ؚI e=Ay.mp1aT)2I kHO=E8v TCy8W pj K5`sY% ,ub_B]+<ьcj4wI/"s̴a<~[awxcLG_&~>3wwּ!HQUo]/^zΝ;itܵ!?QmwGz1ثWg4uG=|(;=#NeGigM'-]}dK{8=B% 8'HDZ@L@,G9g69K|D9F p?q.e`=&Қ5&/qRoAX[ߙ{ϟ}@ &r~XJ^̺70Z#1I(Mқb#7ylcOp/ݻwO?D4bbom%g-ؒ굶%,?~4O͞p]||u TU[j+ۭKxߛ3I //wqTYtmjn~iZi/%hr{z5xWnT?Gfc/8?@ j7NNG_ D'Wd]zټ#k."Ź>gl\("C zy< g=%^6kN] lZsLbT%O87ON< 0rt&]?(΋Komi;ʶ=ړ)+Sm/evɳ.kLLw9?&nLU$P/+Gғa+ Y\#u2'8F Z0/@o,;9Gs `'B=i|Fn,f0GDќϣ[w51@Zw)_s ?;P|.Fz(_W=f6];\/r;*a-6,':d6!8_a%ި,35Щ>u갦pW5 o[ ڪuF{Wcudy1j^֫Gw!kر޽{iϞt>b$(6SrXTYEٛ;/[!4yx {{&/@z2ɞi}wIc$= `㘃1GyxpMԋқ׿DPۀ:BUyj^mcOL ¿^:c4`Y=Y(Ȣޠ% 3Aݺ鷝/sE Zm xL#g\2yۻQ8 1WE>9ͯ׼&J7.ޣץ/i>u7CHKhc|d^̋7nhRV3e?!vn\K+-9Y =WCCc.S]?^VEBUu;9ͽ/&Mt/hõ[[YFY{vPe=Kan$ПhyjZ0-"Ci@y #Gul 6\'F` )'r3iy+jGԳw4`h. .]li7h 矙c4 , ޲ZڙO4uXo-4axz$@jXo'<93L%%IuG3eغU.c[gS7p0?@03y|3"e8gB=Xˌuo {q4N / CgNvõ7ϮApg$=4)F"4C`~iux7}m9k5g<&<3r73f)ZAk :wA/ o/~ J;(33'#}:w4}7Jʭn*]҆wi{T֠E6,>(yrVKs,/< 21r^O[RUQƶMT/wO~uKbhΪyQ8G^ >)zqDGqL R/P 3cM:^Pm=#VHuT0Zt) 9W״;`t6`w<٘ޑhד=*15ɳ7;Kh өiO|8xD:}L?6bjә'fO裏G}Dz*]z-ӹ:un*G_?U"Udd$]}hGlkK Jg=I&7ԉyleС}a1"B:x|CvPJ\V]H< @k + ؞/o*>0*lJ|up(!'18bqU[`{2P#t#B8{3R2p"NХ}G1V/rdžg>ӻݤʨ+,,ԉ.Xbbb"y^ЅcQ3(`#]xfBt$f?blɰUoj4yƹkpʺ#M [| 9>OS} 5Ԗ9fs/B`_k%85Z;!zBO{qHB(ME~l`kOvxce^˷p| \"o 'GK0GnQXC0/| {Y[]A6MkWSC@ A.'~O8vZ_o`%'lTml/8VD3Ԧۨ7׎c(Ɖ$_!///PPBA)1*r ˄=89||rAԩ%V2 0Bh>5ZˀYQ}s[mUW3Ns⟥"l<75 4ۀmzzd]q !NQ$Lu 8SZGcގƴWO@~1hLOx=xW?_/uأL ' @8\z0PM=su<$<)(ć}"uw 'mV XIcFP \&\›{оja{ղ3̬Cꖛ,G[7QbI|߹L,ku{i@~#\zwYwl:{)*-ϥxnjfJ15`j+4`]e'ǐ7UG7v1'9Qo{~~Fr)sFX&MD| ; NW[ & ܚO~$@_wwHâ;_]<<5׹k_%96.>Bi<{Һk믿Cg͑#(//OxSaWDAA(@Q(11QMW_kެ]˪^C6mx@Y{xFymSK' ,ITKlk2y1xx/&Ҳ# `aH l‰ N_6>1A4y@ENCr~SOǟz؁EMU#ga*qĜl󳃦gE̥eN\wt=d;y5Kkǣ?#s橂eCO\H6hۨ)^.ۺzn:p}K?f̘AcdXJ,mNeΰV#Pq'Ӎ>b^bӆ'iST}q>C_\}g$n<~k)F>coz8o@?ڽ{7ڵKp-))`7x $A}}vڻw/}駢hg/{=6˨\Fyۄi*u0ʟZ ͜SZ5;qpw܄nsrL pdj; Mvl{cZޝ-SnߏoXK~2x4u>DA zm'TR#A PS}C-&IioV =f*mchd fzZ92-s~@>+,# |,7ȗ}  XÞD!Ks4G+boi4n`~g%\"DA񐂙i˚h"Zz=L'8f X$ g99* ^vQQ '66z۶mt-ܹRSSWԎܹVSD41z3?b&fz9E)njU/AB3@<jZ{sׯ/o!+6<#D iSntuL:~ĎG:vwՑu4O [ hƇ0ɤQI-(5*pEų~U#x;B唾y ں}ڃ zН`ߖ[#;ܠ#Cr5 ,yDֹ6J葉yWːpM}ۤF,n&S׀ ;cMh!F7jgAQ_1ck5?餓x~~>!)5)d:gc'@7ϙH' 駟jYBÆ zH(=)5!>k.wl̹r8FCNL1ud䒟錣^}(=ߥWnx0 (Y`t8ߟi"8d#e40簬>?찝u $3pg /N #50g(){jZwOD;j;{lpG}P~Rrf},\aϨb0nM+ -Piμkwr ƛMTɈuOoj]*+B "&=٣wb6Q- FeRܦK+멎Uz ވ`_'' ړsh*J?\hQt7YgK=3ю-;zxҝooك=y1JبY~ v{?=]n91in&S4&HY &kF]U{s]#ʵS[c*W6 .ppc/Xౝگ?͛1265 u, i$:s@/(uӈ#\ZYCF|.F\~gG] Mx6?ZM ŏ6nu3 x7 ^ dΚ#Fk^dS/zitGG!o8 Vpl_rH$8F񉌌w-XWnK,fj6wϧ xhTׇ_L9E~5mݛ'v8K)C)w=S$,,dzJ:5w53һQòShN305`j@k=liadO ٝzG}V߰e}/KϜܖQ};BL?\KyE R#޵Ö|D %LQ~X\^H1Hq5iJoKEy-USMC#Eј(&ZTSNI=5p`OYoM K>E݃Yˋ|H{n;^6[_"޺&g@{zK%xP qFt0]T_g1=5*r^ Z`?,{p^4SWу]H5sEy eE g<H)7܄$H>i҄"OC}ʳ!_9f/dmOM*W0nQrͧ#[mu<0:}KA'yiCg7OuƅD 8+>*E˭f} s}agb 8P>hNN;M&xxEqk@lfp987_gӨ'?ݻСCeȑ6 yǎk^FEL+iٰqylQKnZDT [{d1sT^^D{fnok6ߺe l"ׇ4/ 'Gme Ѓʏcyg9QGɠA8l9/sprGu8Xø@@ j_4sg1tΪk/50wd1_~qٰnpn[mlaܢJ=H89ǍL~EgÓ[LCoo0)O6\&ɠߞBfnF/\m['6I;ɌlRb&jYEV"k7n3/l!=L@ ȩ?M?Hi]> IUV/["o|MDլeyo֋smc~iI;+@!G'D}߯0{8.g{9v>26mg]u-~K.`-|mޛ}.r~(/߽Qu~3i&e=cq/xXnrũuO7Lnst%49d}Bq,xJ}s$seQ|SO=eH}M?qϝOȺƐab4N3P.k>F.<5j}Vc"@9KךDQ4C'/aS0`qԷ4 ƞ#/2`D}Wni6 nۙ|[m7xCN[d޼yҦM͍;6J¤yD{x5' ٥k&ɿ:* F>NȋYIlq~+?O2??@iދGSt"<J@`w@Ә{ o8//2\~bW$ӓȾ#yZUwǜ,ryg^ޓ6v(e[dʼbbg9Ae&&F1 avHLsؾO&c;F?AZnM93EKV--,nuc1XǷ`J#8[ [n[Na5Ǐ4=oKVͳ={u5F?بqew'MJhYvHcuW۽I V D^wL*WN^^?|FSMs]sKeIAڕ_榛f $8/'ɡO3r[R]< ct}b[еtog^bӂl5/ؙk804Q+3f̰S;89#ylZ~1cٲK>4߫Jgãe@h|yl !R)o}ʸˤO>׏c%-luWȬצXgfMgRV8: 4ZTvT3L>}jSsmz :Ziߦ=|B;N8xiSJoi$7ZgsuN/T'՟_Q'mڜeO 3oa}1f SL$3OaԽ.}̬68yϯq#לfz8{寖zr"ұ{a|1ry'9ݼx*c5|$myu;% 9v}U&::tG 8+jb Cfӻ|r]=xt}#? NO~9\ұZwZf͚%_m8Ds5N??yyFmDa[%];1/,gyYC-xn4]o19yyQ8NjӲkgwsѢE2|dj* '{}ÇřQxǭ'?c~rGwOjq͋/xoS%7&5>8(t=Z 3gdj 45nܸ^'*McC>{ 쎬rɍ_ny||yi䕭}%ޝ%˖%f%#"4[jbL {l\R_̟;W-\ +k&D۬3׫I]g^2`@?ƴI vidӆwi/ҥe&mf^v_Nr|fs=mmxILExRQ̒? hSծ?s+g/&\\gL7:RKXݵI~Zn7Nr}QߴPA'MTo g O*+83y)'8]'~ p6*N:AeU -N$C)8DkĻ?Fzͺ 5;˜͋uh&tڵrgȘcH-?M5\#DۦO9w&j| E vrVҷ{G7 ׽6țoŋey @*UcmU^Z~c׌!L'߃7(O(;0/(ѣHik.HزIyI9Wyiw BBAy<@@r4h5zd8B ʃ@}S]Pc;lH1=Y9U3{g?4ztGZ4ּ'nIgǡn훷G' ! زD|oڸA{WfI7[4o'{ukS/oc67دSjo-oo鿟j9߀@E`@E`D}ڍUkvD-BŐ-X>[FKm:8w;u诘!e7DԮof"ݢ2Ǩ]X::OXuڒUO]ďwry򗿴?꯸ y3ϔY~}A~;yw'-brq&Hyh1q z۴i5 O>WgyDkmS%mL &d͚56>ZN~{-D{Ĩ2󸒆i-NV\_}hW[9meniӼk+nW< ~Ҿ}ŪϹ}0'!B ;Lnv+ .I `SvEؕJV>̕Uv ՗~WbƸ] oL7)cgŲ~VY2eQ2`g:w W 6f&EI<[lɒʜɢɆfFqL5o:;{{/i K]:̚y)IAO@#@28~|%&fպe#L|#/eL2Zlm3Ox&gLjjɼxKu"m^rx[VHpt-[?9S@pWᨫ>ˇ_gh}:6v4yi4dhQ^#Gz}w 9+$*G2/$e7M4^(|3fu*w8[J+bL\}v^ݺ,_>:w.sՇypg-ꑆ[VΑOMy}u]-ɓ';vl`bA;  m} i#}]b_KxZt?ZUn̙#K,1/ ^S _l|>k`ث*%xBx#k׮OCcNjs< VsyIeŊ~<}G(ޮ "O;5NJ,i|oRPqn0O+tt牶r\3/\>ӺukܹXM֍ 8\;hCڙ#sG1rNqnọ|Eѣw͛e QQ~c"vi$oߧ{kLF+F44Z}9ėȲdŶ*Yr,~i+;:B3Ǯ=d3ս}uG+%լ&節=D+,EsgɼY3d=e4oWU*8 s:w̎\=8 }iTV@5wn_:^&2( 9FgYOv}ryUsO3Rhu?k]7I;<#Z8S"/#uKS_1tdžv# u5}*۷Vߺi4߼Гq 6̦@?h7+ Q!D{7:N % Qp89pp&r'N䳹Mբ[{gD@IDATm2|8p^5JHƤ^T'] M^ʍ% `n@i!С/r(8?iD_wqG'yair;OUL!9 k^{-"UNO|ZIdvi6Ӽd=Z u#CMO?t,pxo\wu.2?=jInl/n&D 78S ΅w%i-ބq i~_>#ȧpN1Odc|Ħr.':eڴi! PhW<i }C 1IߓƟFK)~&'8P} >-sI{ڳ39Vwf:Od6L9nmңHm7kue_`YBRWUKk]pd[FOķp8X#ep|CK/ׇPM 9MSZO-mؼF^Ϳg,}Bʑc?ǟ݇ /YBujQb_3ZGrg{ъ9E;(#]V- Ne؁f=n͵u5/ͬ$R FE#txbrc WD$ 9y1+u>1Qro&'xb!Bm8ԁ}Zg΋/hi8qvlE#ȹC7ް_:T]ϚZ4vխ1q~/rٕMq,OSo+8J:?ѫD^ _MK4hۛnI/F@HP":+D:tע|_.8yFD/OƸeI|-|Oz{ 7X'87/!׈>瞳1׿I|l%I>58|9q琞tI.9 @ݙjEa5Y҃,Ξ#Vj!"?˼0s,Xf;WZ_m۴;Hݥcgi]^әU-[Í[>ز:q6m(֭5rYe׬M;y[*㍫i8[ ;|5ڝ2i酱_K~n"ϵ\~ʵr>gEk]7y~@/k\C]&?Avyft59ҩ_/, A~2[ b Wä#΍+^+WTuЈvC.rW~GyFբ)=tNY7H ot)W5 ]#nQЗ45Dk!W 7An,ܴf:qn-p i_i REr})M/7L87ؐ {W.Xm4?CQl8*xb(itb(GMϛ[,}&ώa|aᓝU.sCOf=M/?V ѹtߕ-]3h̛~GYhs!YlnLMꁙb/ғl nui]x~` IMUW:#6:9 Nk(;~Dwin'xl>[u9?ʘ/Lb.&X]!&-_&HxK3_DD/*L],@c-MlRk[?04/(&+.i+*&!ITP1_IC2|rS@G9h:w~⇿Îc k5[ւ8ۛ0 Q;&_ )F 3uҸ(g} aK׾hb-8H1Z3`pdG.*$j <36?I%}ҧh$&Ư4սKZȚ n hk. ¾W紥MuhcxVWdqԵz 5p|vuСm7#??Yq״u 7AnMD?]rՙhD_\YG^]j䲓~f]&إxkA@=5QO=l{?݃ߒw$KWϗ15A#N_$]H4qZ5B]Hȋm%[um5/j![ a Zt-q~wlڦ1F^^~dx FuNRa:V^"_z%Ŋ8-5=$Dph8ɑQq4!Q#룠/Vg[vHK;I{An&:qjGC9अW_ڽ>PktuE9np;m[T)[f/TqKϴ u@p!Yt {RqV/m坿?aK+5=S{`I̤-kw|Ү4ָx_gv 4é 4}Y\V}rb:_hy]Ư/`^|cxppd^޼xgݺ8qGuOEF}>:7>4Bq:qqޫ6-h奏Qgr `_|[qHGrUuށMRppKjRs:Zpr!-\=\{썇('WHSSOq\3|yZc4:xS)saR┫$EW{ '44eBhH>\+鋮yt.EO7ԟ6W&#wYn];dյ) Mgm:G!W#gO:Moü^]˷9&;&IOqGI8CB3 $ E[) hfwnnōr2k?6[eɪڧe^^pֹ!1e.Wm ?@z|.\t*^Г$#W|Y>>򍉣cay&c%w[i<1;`kS%_nzDWrǥhKwsPhU_ QjРA6t/icgU!7)o%Տ#ρ#nJ 81c&)yy:Ip'2Z:qX:y9:q`tgsiE(ivܗ4*ֈ-8Q.=Mjp&MkN-z3>OHhs@}*}zFڟV#ZvhGIjxf/6hnk8u.SC+8@m;~0AI<U7w]:G-KwC; "U9|M`>e,Z'|@pW)Yb8#s}뛋e%I/84ɋ=q:rBp-(C=cƌBĵ'[Œ.:+`Ĭnڸ(q>O=V94%|H}KC뛋$I/<Şzua݈¶s4:dAjtxTv|v9*A&t֎ yI& 60h})&@5|ViSJӷ}k*qi+o1c(?u\!(A8sZlfwg /uoeDn٢#B8=}>,.OV!MsNnLƑJbˢE>%ATr}{キNT3Nle8u5ZX_ɼ E:q`u׮/ĩ]q[շo߾j0&niBuRϰp ?2F%'tR6ۧOB?sGyտ(]b^wi^H .[ԡOqy+R87ᇕͫc[_555ڴLk kW4|.U^%^ReeR֮oWmo> ;m_~i6݉IgB*hl6*'qnv18w % ^.yL}ׯP/# E`gW* iCO&d''~$||>:rdI~ZGTgNݥ[ӳMK^:5I<:w}ߘ$zRPC#hMV%PT]G#LκQiѸ#2q-ntk}ϦVA6/t#i9昂:JNjǹX "}_i77߬Ni0(pGߔ)SDׁAٱ ztA'8ȵ]Bjʄ 瞳QȻ;,>v*\[%49Onj#Z" "_Y8ݛGwZR ?y ZɹCi1Im嘀;~&y!z}b'PTB@מ|jKvr˺|tS\joz^MOOZu.7.7md? 6i૛>u#kF<8MwRgrWtlk@}"Чؗ\,]=\Cɀ#Vݺ{s |x-dAKvc-eq8d0xDQ~ Gjѩ͑:ѥI=.ichIkON M}ݷwq+"t4M}8qRΝk#UFݭYgq@Y8]G")P>TA> :7чrq3S_63zh}DM7)ǁ փC(onVnuMQ]QrgM7dq#7ްK?OľLֈ>\y=o>y/w qmrFgl74RFmdq;)-;7?!B?jCOA(ވ륬!]+mcJmI1OzbNÁk%NkRnݺ=n;wm7G8sZ;V] .U62J\Yx\"KB-% qp/swTq Pѫ;8n,^͍s(A8ZW^]K[6y-ep߮үG'ט=DՖb􎙾[L:.{D˪^4r`җ 6RTgj4J'+?Hs/RuhFO}Sˏa-|qUq㗱8=s 0@ς]tϕ &ruYcļD^vi8]Y NG}TwObqG'(G}t2E/Z|v_nGo5rmq8qc#Hs*8O>d8q.ǃ)\rrjνSO=Nr}|&v=M+N<(jKh]yU_w>RKcg1s'=ڶmk󄚍6nostQ~u|Sۍq36M6+0Ϋ4;ԪKsp_if #"ǪI@H+s^}dß(ի%i lH"#o@t ccR'LD+xD03wݮ.P4r'Euk5&j$q( /`\^:×38oAyg#Y7H+s!gǽj414%g)|-_|՗'IF_!?~uF)M ӈm%a##uk~"* 0>`qㅿc' kz8HsqV{ vt|S޷r>ik]1H[s"^Gg65>}0f$I~RΗ}#50 䵝gH}C$3-Q>0۴GmK#8#?B\K-~_I&|&ZnDͨRm #x:gΜiSLu4Zgg(z./18عYVj[RO^C=peɷι> QliM 4('>gketfY1Ib6&+M3GW9q:x}6e43:3ǷkG}N I1֥yb.1ØA=Y[hQYR\LT3P@pWα(Yb,b,2uw:Yiyx}'XtE܋/ nދcm̈1B15r!ck*GxI|Eς U1T"?O>YP…6anjւc\#m!F%x`la|@y5_<nhjkd}T|R6s%͗ԏirVsƵ8rNjn;->]1}DyRpxÀxR.;wN@#gG`џ6wwӝtu*[[Y\](O'Zgܜt'zny!㑭mWy]Y]<qy/ik4un4VLB^%4$ާ44fx8/zO^d5/r'&1V%Z^믿^?C_o+NyGf-X.6l˾]u+ݹ EG 졇&ˍ7hl„ unWRqq}q2 #&+555{y"ӟ}hTx83Dr-P wq;%}J@PD먮Вl$>ħ˩OvZ"7o\_ŎM[ܚߙ9K}cT!MJYK)cuP7 s@/51w4]'}T&r\+ oKvs=V36\!SqZ3>'vgzGeB>X~m;#2ϝz^E6l2pm*iӺlٴFݽ #X>RF3`-9]}c|t}|>zT^8CQgf>ut'g}}#)Px r4sptcXs W;U ~֗;'ȅ';ׇ,-Y>>Ƥh{ZğQ:ñ{/zםOXt_&^'?г#ti~vͩvkw_٫3۰y,]=x<ё_х0nQ>I1i4;GR;/q;{oYx !?/8qzm5!lx޴iӄY^:F##;vLV)zT) @@ m*(7]ژ;ygn?aDC!8})Ӯ}K6CYdû^ywҮuGչF>4ھOT|Xf _M[6^ rTlk 2' ;^l(L]f.|Ŧ{m>tжkajޱz?Q:#ra, SsZ'O?+_^hiY o9.F04pGڇC}j8nEl[q!y֥iomlZע5H>pqrd톕r.|C(8o~:CF"pIFyrI7o$7WL5yhivFT6T(}ޒ #g%.wr?\m߶wdW3rg*-A8_O݆O_[p~1iOs2fqz#O]kU1>wФ%<@@/<_u5)^5լ1o9]k~WkW@@ c4]Ou,uc͋a,G,k:9ej6.nѭX8xX݊R37Mắc9Gh:Oy˦˃/0D>)7./]牫ѐ᣻v@ <쎦fYţYIǛQ]||t3Fsb$ii4diZ KN_mSƑ:iӏ㘢S3Yyעu?otKTVO}*>Q3oJ4o8kE^;b2:|g5C3-ٕKtc;w.m[Xޕ:O)7tlyҁ'p<4˘P@@ 7>%j29dѧ]ݶ):F..q!SKV1i靦Es\qn8568Oy ~ҞLf 㵦㵮ՠg_Npj66cY'ԬI7(^:o\=:}M[x'&}Uc&RN,~WVKu A^:q^ulXu,mxasBkԦ:ߌa=ʯk>]C1u4})@)[ѤLj+|5uS㫍rJVߘ~%*"RW_p#7ۿ^⇤uGwHh@p TV~|OcM>yOFCgL(@@ KN[+k'=7Wظ>=Vi4M'7%MFVZt:&x>x]'3tNoUʰWkv莶M,]kT]ieD V]}%)L\JAD.l{yy$/J$1v&o7e-r}_Q4clzHg/zM&vB\kYq*r`֦l,Mxӑ/c7Y%e_ {so%_UupݬU߲|8~,2u,]}nFO#ǟFOs'hѓ,'G3wo@@ hu>Yd*&Yd'ПF9$>Ʀ\YI7FS:?L=.izZ>%R[k$LmAG:GY9YT7ti.u~Gۍ\y*CsҘۨ^}ec'+(g?08߹]Or/-~O|NV-Q3O+d>X ^go9ߴףypf_lݶŪץ}/;Yqɑy?vsK+.]ۍ1yRoʭfǾ5\{I^48iᅘdC 8V.8C؋iY BKB $71B՘s@@`C$ {8_Cj:4Ɯ.=Kh'#1:uŒruçomGPݙݴV5fcMZ#\kZUm?W_t52mrK1q^O3Grՙ02k灈|d؅+O]k݆e'jvg*WxlڲA6od[k7Wvm:՟ycBwV 9Z737b`yxm¶ߠSLX7;efc;&D`秩H0Ѫ!SטG˫:Fse$}i4MZyj'3@@ (Έk##iLEt :/^ImX} m%qw/0g٥ OTKn& t2_"ߜ|Z"s̑aÆ͛c90_B]Ű\ȼd֬YK.Ν;/Ԥ(f $ѓ1I2}|J֮<1i4]t\}ղ~zUPsvMO.;v,F@ 7VC vB꯻:޽{Yc *rgΜ){{K鎋ckxYii@KU[GEݱI"+ͨ|eҤIqW, +&ִi7m"/Z1EIZkںOM86z҇,Qw[B /&͐>]~lٶI[W{k]TkrW]h׻OeqwfijRY~aг@R5Mgˢs=; 0/J"=;z@HLctE_q5֘}i:W̷.ק}u^d/7i\Ro w1z1@@ A'T8Mg]zo]'I͙&޼4.˜|p G0j8ݚpv38nRg9YXm}tv}jQVJ]6^XJiټ{?c;%*Y,85kO@IDATB|f,0uqx͢r v%\e^*C먮Iq|<2C,Q}Srꐅ? Otye]Gώ5`8qxS;}m 4-Ek֢n[#ZیMGP@pWE3ߗCoSyMG}><<ƟFӵii4^>z}I[{@@ HBgcvG˥wܤ~֐F+Θ oL=+7oܤii4^:G~_t赃co#X:YcOn5-^}nM[f]lѶ@pWa/Tr/%e| Ovߧʉ>>yicT4~*#*;+Gbx1@@vYUM'LWO>cᓛE7m>zTJǺ]hyוWЮ_,s|Ա;L4S:ZDqݯ_kZ۵>֠f_ی(uʹOkB *~!ĩXJ9aZ`[SshsV؅v@ ,JYg%.>}|&+Vb0,My]jR}u >z>޼r͝W44Z^@@ "[CSL-w:IFRC[:.Zf|Ҙ~w1Yii:sv#㪟OcupkF}q}.۪e?M4W~v@ PxC[/=IvRpԱIx&+uژ4[e!7MF@@ Aõ=$Zqsׯ}n\wv Yyyx;nW}r;MFME@?z>4hon?ZWkoZڝ?:ƹ*cSV|_<ыY)Axz3>'5<2cN/__^گON7yc=Ol3GחMJ * Q./({vS]Ic4z R˫Wkޓ+VmժUaÆ:q5#:Ot_Ru9:1M6R]]-۷ݻK~SNҡCܹ#+ik]x@@  $#wz՞4zMQ:8A+ׯl˗[;~ɒ%vݺui&K߼ylٲ:5u#S7íuN]m=φ}A)Op ;v]J.] |۶m qW$tL=k5UgjmUBcNjZ#G!޺ukkd>ciݮ];aL=diN_1%bd1@@ ʉ@16UQldfE>54v=Ԍ.&mŶ޸qnNwux6GەDk;(Gok iq}ZUV۽e˖vm۝[nYN 4St=Z㶋@@ Cr/zO<¸ũ 㗚A͛g#Bp3:|l9zxqbB NӛB _|` cp]ҫW/>l0aO qj-*[C@@)"PMžV,|t}Ջ:_ǔJWڝ8qtϚ5.UݴyjNiYh9_,hw QL<›oYxsĈ)ote""@j!|k>F]l/Ab]ǁ'N޹sZc4')kp|x+kM[#xӺ:5RqSgϞCk*xܒv V ʢpNr85Ոf=8)jh9PVep cԌ18ՐvB3 @#EҲي2MjicX oا6Q9V>N@ ;[4w0.5[)pKub^\޷o_+6Q41\vЎ?Q9^fyH\@< j ̃QCC I#\m7 <:d'*e>$:X?2}6n. _85Y+{ok<;ֲHfLwqF}ZGu@W֡tjmXj ˯dp~.~oQtֶwǸ4llsqf&=m Nmשs?v;O(N#>zA u@  YP+_jAT8phF@>}dDjQ~ݏ]q>J|tdF`Lao|2dh#Q3 O6¸V U6hC[Oh:SY%>wjL4_lej~Pb Ņ̀zʎa~H`xtMrGȤIMh*?@@ P{Bm"xp,AA,!m@ԡ2se'ojrF؎=mGMFॸ(2nܸ?7tlw> n3AP ٯ1g:`}(2tA @@ Ȅ@pgL|kiJt0aP' 4j)_RʎQ1K49i4wn܈xs~>6e QFF#Sqֶ΅>4m|vi#'Os)گmdEϏh3.vjo}G #Z8?r<ǏSN9%v@ \Dm8iȍYfy&jؕ7O_t-F49i4 `gyF8'NhDcgrNvkF׀L6UҠ+w$>ʣ]\x_\\;y0q/X";u#1ofCbbjq'1Fpz%g PVCUv ,scwAz񑫚~1"￿V8Bץi7վ4^dDC봹t>.͝/M7 9QW;G8;&NDnRM h R\J c@@ 3vM{``;X4[#WIvRzMD~ۤÖ< pn<]ڪ*+88>;8X64w>剛+*G|cэsK7tPD3c\<=)Qjb?':GT{6?? (Nr='k jA#~ rO_W_Oq`N~6ňywkW'~O\m(Mk־$w,m-WiRд펍D6?Txg+~SQgE +ͱ,6:+NF @@`C6Qܤ<)x#Npjlxؑ8pmbS{(i|Imbt|RG<}#H/~QFmw#J::/|\zR[y悇tu-xGZ|hױVc_(Mjӧ^w}v'Z}[W^y鍉"U@@@$8+, P@r1IwLĠ!q0uTKe&*,vŴ:FyȎʪ}>z>sꪫ}БVSMe84ܶΡ5tGStֵv㎥s62t|Zӧ4gRvsf,ZbD'?bkIg$CO讎u@@ X%ԮpAP;hWlvWpAu'q2h8]<]9htgδ1<<18;ߑUߕm|n?}!|]V~jKs.-;Wtҩ):]SsVZZ ;s)ʣvN7 S|B @phT`[UǨ> zD`'D#UF?{wUZV$ $'3 I@Ƅ)(m jV{nu5wKhn۠BK+Wmf2$dSBCPs)?y9{UoUk~ꩧJ{ݸv=f|>ze1~`^7^|yϮ?vͼ6z*:q}՗}iCloi.~ g>!Oq@@@JKU'zBlxW2KA|o}Ʈ܏;kZt]?ΗiNzk"k:WcD 39իqSekSN-KݺYK_}q?:~SQ ߶}wRؗQ}ȴ1}փq@@&ի➎6 ^D7A0_f>^^u]=pUVb}i `Ny T?sfHΫ̾gߗ:ګjoc-}mm7b^6$0Khs0[M*SuKXM-ەm[G4ڙngrg\rIfxOxm_f*r -v\{5}_UVmuU]ӗMSix|_uU׶_>DtӗT; "iUhoo޼yv|TmV)-58W]}tfmq_ǐiV>O8aLTWs뺺W_Vcz}YSUzf߻P'?}G8.Bx*o>Yj&+%3B B`e nXJԞh`#] :}^xa x(^~Yy6EmWE}L3W/*1/c=R-^0g kMbs4<-|Xg~||\c9e:}K׾U:cu^{AB8{@tIYp?bȫOySb)!!!h^(hyv/C8C0(Soxsʴ{m@->wcowԧ=o8䓧!!!;tz!7hxۼEH|Pc4Wqjm}hV[ի=k?>[? ]tю4eUꌓmqio]!ʓo|m/ɷUŦYڽt6uK紹rVw9++m/ϱM]ߓ8:7ǒۯ=0;4-Ϭ?$?B B` ~Z/cCQf ^CSo-im/joQy?yǸcyG<hbhĝW{au 2{Ήظʸ]|\VsW>Ť~\NdK"雰' z4~ rHcZ k1w\U>|iqi I!!!!0,mT׬U0nґ 4g{Ґ]vYȲꮥo~^2}utPgqF3J`0<)kϠ}kʯq]㭲UWϬ}=ǵol6F{o(0l|sO}/>k,k'?1"0`0$$!a#adtd+C%o4KeTXcn#Sm|kA^ U( N]c>=!n_y Sy Ծls]cwƘ-~Ol ͸ı:Ǐ&h-c3~N3oDofS-B B B 6 BKPNu"=D0>Ͱ/֏e*/:^5Sy]=xiF §ͩrڽKۗ-=qq׹KW]Uo̦Dˣ͝4|婯nSQu]+xYpb5UM 5"MGU"Tf *D B@`4k;WHL}?c;smk Wlm{k am#/BGX~*Sϫ^?6zi~"+ɯsc&oH@?T{5WF< ')B B B C&+RjV;S6:nbR G'_m=YU1ݺ+ÒJ#Jզ}m9Mnm像{0nirtXWSuOU/UGS]M;ߢ2ݵяjøN;m8t@@^Ф7TϽ_BX4Byɬ0%u9j! 9VU# pԞѲӸyeEcٶ{}ߋ]T޷QJY،W*:˪>ʟϫk>?שwM7!!!,[zʈYݵhcmV}^:.<@p p8;4k<[fژU^LYTݨc m۬S?:z5>oV}xV{w1gx;K YbEfb񛑚f1qpԷJWc$aQsop"EBDݭ\l߈ߵ9+_yevrw77?svw0ؕbߕt6!7s'Ls13eīx6c4nk<)[bu<^L%}EHOƯM;{z<-tݩE}o-~7u) /x fц@@@*3~;7XN2Rv_*}2 ^$}DS bnQ/O۽gp^{p衇m^-B B B`W(Is@ZMCCa޳TN1ݷ}yiwC:^޼vѢ1N1ICN+O~򓇓O>eZw}f!!b%xu)GwVn^^ iXҗշۋ-Hn >5ލF]N?#Щ3+o2$@@@ @;Guݛii֪^VporCq޵2vm.33O<Ħ xHz b$\a/AKn۶y7|Oy80%<@+G rV-Ҿ4OL1uN^ඇ=a;>9yMq|9^V]kQyjO"d׿q@@@֠KhsZVijN%v[;BKoۮiwXvp/s[`pJD 86&c?v~vgژ~v-V|+[kz  Ij /;, BD{V&g!5qZ2{  5Nw)_x>/ˎWڪsQumc}=袋'EJX6r~7'>-;":)B B B 6,]:+Np%C5zv/?笶鯴;p}_Wv|G7ךz]׼v-j{)X 1o#Tg` 48Mhm 㵠 ~?Xz%oy#^p^ئ.p>qZ|}T“Wc,"fxPEo߮0[y]w]2Z\KߡQ%@@@"PS(d{څveI?6:vmڧE8XږϑּyV@@@6O0^` .Trq[qzTQ'I(ɣsW Q]kۭϮeQsLoIy[LXx $%\4 +~>(nnb"9^{mxmACq~o]W ciO{Zee!!!{=-MO~MӘ4 -Oӏ7kR֩2y?}/m s.v7>:Jڞ5YcYoq,*_o.B B B`k|7BmX[, e'nVɀzzRĴW9C33<^j{VE<1]TߘSƸY--&0.k^l1=:7Vgfy䁍ߎP&B8&7 ,xG_Fo|}:cGG5B B B`5zcj9N,6v/+T ôP%CqZ:.0!h+1돾TmϪKXq{9X-bKtJ닌D+Ybu̳Ppxq;v=.lBP] S˛cV٬|+S~嗷supI' nu6^[ ,x'{ɕW^˯+z0ʫRV;umyvC`|fLvlsmU?6l9y^co!)tP[$??N?ؽe/<B B B v :i^* ]u2!O9CUW]t%-Mۏۨ]ciGN9cߍڂ\ObQyjweSm%/B B B`"e8de$jH+euZthH]=}!p' PZݷOo%fTekaN0u`ԵP a+6Ä1W6הAܱ6뼎W7r_@oH`2v5VYsQ.O}s9ߜevw.i~G_Ec6GQΏzԣFkc"P]X$c⵷ MHt4C3t]E˧qOMPNt6.zP_Si5G1X =A =A}FDMZ맺 RLyo0֫wԌodV S-ӟ<&1}-P!^d}m 8ŸYlfW/ye–/.q-+񞲸K %@FuZۭ6FIz]x86diw&я~tӥV(}Я i\o|U㹏;Y\7/_Tocu!!![@,A~-k;#,ͰMӌ -/2ʉj{g#&Q=^df]\d<3,\[[=oÂ-|qoWlcc՜,1"oZumU{hO{{ի^նߗ#uwd B B B v"}Qe=]hiviDM]ߕvOM9G wo6ҧs8m=g|Po\[[=o#!!!G ջ;f\9282f&`1nĮSipmujB 2V,f[kٯ-0 [|Nq?n~n"@[>^aѻ QyƢ:_}W?6u/7W}ߙQ;FXp);觎f=6&')B B B (Mъ9=M+҇1dv3(5wNgQਠZ# /gJ[E׍wq[9{ E"-/p–vB: 2KH/:mӄ:!m̈/-;1]FQ2s[4E]ߗʅW;c-[}?m1t^eڕK@ZUUܟ/sMZlV۳]G뇳Bi[^CjN2-F@@@.7ѽ'nq4ywb/{QH༲z͈=KHN+ ^ϰ>0Ffg<&˖M.f'< ö9,V6صzuM_V}uf]Wu+5}񢾪ߍW[מUW]5ܼ=6>|![xJ>B B B 6QotVx5}MtW_ݼ9Rp8]N%9O|MÛwiWnLΤyLsQΌ+׆@@@l=1{F eE٢z^Tڮqp Ԍb2Z.Ovy`뛱XLC[|pWꋁޞxѷ?uӟ~0@Ѿ4uovE?i3B B B`sJm2[ >'c8W\qE3DlA2@IDATb}Hq\?_FIlc&-l-4\ч:13|[hVNLqxQ?M Gۛ} 1>}6CoJ;ʯiht>i\ڐΣךqW^e_kߩ!!!w|7O/|KH#>&#) 4tݛiye<5j۞mNҔJ7OK{YuJsQcָYeC B B V@ 3(Gͽ恍"Rio°>;<-TlmhVگ>u<[kAW -nnjm~xLW!!!!hp:Lz b(q2#zkvEޔ^l=-Fw1~Ow:nc0W>e_ft6vu3@@@l=ϊx32`^yp'C? >sǶp k߼յ$wq2x=m/W 9eӢqeښWfʗϢzcс[Vyu>'vTWcڴ帮q@@@ ~}ݷywݬ(oׇpM,S1+dNQf}پ{v}^[];KC B B ^1{KD>lFlw5#|\e!!!{i|K_|9C!c]ӏ8-LsN: zupZGsdw  oB k蜡x3)f}z<B B B`>',h-nƶ>/fLAPG<4Ѿy7uj^CX#- zCT3ڈF@@@# }Kk_s5MӘt0opFd{!hwuA2^=} 8Os`1^덪cNТΌ{rx _8x%mo{wrHW|Y(g[i3lD"F ֩1I{*J泟l Fψ_Ƶ|j,Sy!!!!w` ~Yg ?=_>/oڝ 0cDmxsX?/mGis<-N^XorZw_4f}miye"0_m=2c9}\/~7ix8tMM-t S^(vZkk۶mb?EmW", ^9c߽V\|p1D7>+[4Y_N_?!!!!{or4Ç?cO͞C;;-$мgyfծqS=)NׇٽJs`[_fqz=yeڏy@@j|oB?#?2wq͛-oyp$yoyC>x(5Y}UEb՞H}(XxB(Pw{-oxωejܵWqmSuus!!!Zb$0"ӫ^ˊHO}Fp<9yĴ 9:>Ok9"fD Gӷͮ$iw!+vߓgf7ZiwsuN'=}OY@@""?_1;sկ~ukP=bs^Ruw]å^:<я< D)3z!ڗGH kFs;-l0ޏe9l*۩sZo|oi1B B B f<}}xu788+1"'>8GF>MK_jcQzh_sb^H K9ix80[sZ4F9l**oS'B B B`%&tۿC𲗽y ;j\0{ ' <9][X#^]}& w{msoU*QAsxiVyf=@]tQ[Xy%>~M O|{iQ!!!M-}p{7\yM*~ꛦ%~G rHӕzZy]6~P(W98Pم~צqkgksY!!!w|OV?~@T}{_{M,'G8 ū^$? چmge[b9bUP C |unkSʯ*0ꟁO׼5mϋpIK=;M|L[omQv,[铇.oI!!!!!Ah֓O>yDo}kӶT 9ssLN(U~w=1Ş7;y[o|sk=]mSsX4?,Sg@@@b^=|cۄ%!I`2y%Mdg0'0KdX!bb2~+Ϧ.i`םu>O43xT2Ώ픁O1p.gS ^oG#e?-bUhTZ[Bhz`O׿kֽ|O&ei]~̋s,,sw\!!!w| GOySSO=ux}yIAp{94SN9eضmpX;C?ˈeSxcў]BX{_yjSyS,:&??#=c{C(\rI÷~p7ތ\[-[Lbrk_R1eQXo:6-=xӞx3LÆ/x 38cxk_<;FuiweVRNUWo}ãuiwO9-lY9ph#! VpZ1 `Y}Y$?B B B`5 ;#+CK^fx}Wy2rqEfaHf/0]Qy^_Ol!O ߌĵ9N9O 㘪Wy <Ś,1z)2e71zo|1=By 7乗Y/)Wr߀1/(ʋ,i>9ϩC B B B`ݿwЄzիQYF2$ rW(;)mJrx#ٴ;'sɬ>gc3zW@@@=qQ:/p6nVJSfP.A;Gv^}pO%B׬W(gʧx'<$y{Xfypkn 6mu>23;VE\^㡯k뼎q-w\εYĠ\ ͻ@\*/ó<+ϹajO|-kq`7!69a_^رקހO?)B B B vcG[m+MiPڊyGC=tض=8QNiP#4[9o?`~_?% 6!61c/tyx~Mj&weZ$TQADP/#g acԭDEͣAhaj`u C7ny72Ie2`kœ:M}ju.ٻ"E2:6^c7gE<ʠ]FmֱM;վsI21GP= ;F+,0瑱d饗6'O@@@N־;OTs> -6ڝGYch?c:Ql2jhwU2&ZmiQm7ׅ@@@]k\bO~(o|ce\F8^q-TY!QxiOW{1oM~m22f/}i{!ƥOoFdBoeqy+aep.ñ1A{KҢY~Z^:*Mp}ڽ?!!!K 38߼‰iFU³i0G}ip8o.S7ye/o.WoܞG <^g)//jO qy435m+Ojy 箷^~|.uR?n罷}PfS_C!ö=#7a ccVV'_[k8*ϱqXpi0O}>?!!!! =t// [6څvK']("G veNom4æ-v@@@]b߻# }!fv̳[ QYP~ nUm^QLϺ5ڏכw [o)#;Odzs ی6ef.EZXḶj۾ݎOW}Wܼ*O=r2;7wyE['+'^>վW{u}w9gUc0[!@gyf{p^R@@@lv ¿=ykU;ц|54^[>>Nr`Qa%^Z/n@@@l}1o{p 61OdCekod "ָ՗riͩx|_uUc|\0' O<>)B B B >//e?:f͉>*R'@@@V%P!Q8㌦w=C:nj3of?޾WAvӢ+of|!!!@ ໖kg |32C6oyJDxԌ^뮻c_Ϻ~w<;lٱZiM75CXbx37¤F}e 0ܺ/*vo$7~H<5*믫k4^/Zݟ/愒I oz{oXq;SŹzǞ<;ƴbw]0B B B  aDm;$C)#F8#opFFbzQ|pBGy㺆!WkH^z3`\$gqYX}G6+K/߂>dAĆa'Az?fq⇗!~ڗcu󪍩Zu1@cWCq$c‡, 'B B B (pۜNF1}ȳk8kzTN|侴;YRzY boGπxq o|}C-4$"!P&/?Oo^ 5-עy1XWƳ:kW_=\p͠{!aWankca|pyc W׹zų~X 2W*;Q/âD4 zh{0 &5㰍QXj!gxW,ke.c<܌6y cyUks-Ǖ(vո}m_VWYͥ B<m9cM|B B B B`pKa_~yshKc`΃G63YhwXW@@&?utD|8b>PX9/1*@}} 1X֋xXy0euE o\|;h+uq]:vns= Ȓ~VT^ܾg]Tu1gԶ//v ~^h4>G!!!!+ տM{+n%ppS.<^ڬԵWu\g<B B B z14r|/<8c / x;naG25Kx oh)ܶ=aγdW j E}[}l B CyWؾΧEڨ:}Fթ:_ucڴ㸮vXL<&Q^W_yЛ0}]X38浳ҀSi>\T>fB B B V@ s<2LL"Yzwsxӛ޴+XQ^%v[Px= JLdz;+g|yROq9/:KU?kgԟum?>v!!!!6tΉEc99_e XGG rH{qm#Zvڻ?^UJr!!!b_Ż9F>UJpa[yBZw[k]0|{RB!W'|m6Cpz7|s {2ơnY-Ň=a&ˬwe~C B B`$y_w٬|xpi vxӋb *G? 2~? Oh)>ITZ>ytR!!!! O{ӆSN9e??l`=( >G>QGqhٕN,5<}@@5nsef`ML@OӇ߿}p#6z[6=G]iQ|Qg %kkc}~]w5׾-"Eo]_SmVT?U7y!!!{x}t͈GW";e/{YW_=a(4/<pJ>¡]j'nW۩YڼUcyg>։V{Waݽ~VxRd!!jL5ܝ#hqG ԧtX1nkdV{^ۜgļ+Z$T߱6E宽 /| K~ y}n6EG??9眶 k'P/ cئYie1\D^;mVwɷ@d\ߢr}h7M+؄.c%Я?܏ßɟ ~ۺOMzÏ~I!!!^ ^rn)!D,^SF ~U^q(mg>R"61?->"#Vhg!6FpoJ>IOZh/>K#7p:k~@ȏ s6I }g;~3t0֏iC B`b`9 *w ^ >vP7y<9z%=hx^zO:Nj㬳jL?{=o;&y{ !x*rI'^{Xj:J}^^Y}Yձ'2ꧯW>UVW0>bK^~ma;IߖEOq'~'?ĸ.ƚpţk ڝSvi>ɨz3. .:O߿9owXjYdzfg]}qq}Ƀo!oC=r\q~ X SN ZN(?ͻc歷޺<Pq.G̺C&P kb ƒ`-I_>)qgQ"l 1zej$ڹq>huxUX?c[(isn8D  @=3뚑F.Y].KBDZ^Z]Wd< dp֘j[r/l]Í}y[gЕr¡UKD%w}ws^Xd$Dmm=QG%@@|gu n"'6 aQF"t#}?ak`&DӤ7"k 8v#>xSa;J僠JhҾg`.(t}ZKV1sʓo1dS^HpB^# $ <uDR@@@РrJ )'Hi2ʤyI9#5C(MG BH9Ӻ %oFV1ٷaY!ciEt|i{lf9dN=?>a~w$=1OiuySo|: Ck( :!|Q2o\LO?SOmBɢ1<B B`kH}KBY8K|-F_xP#ECR Y{-'xC%H"y#9$[woFB B B B` GWå^Z;f;oй;Mfc8 MkNeki.>`:U9ߧ~,S}+/mw`.T%s6vcls;{֦'F{!!bxiq'0}hxk^C|`<aO9䓛1x+X0?bM?<9spE W3ុ>|qnވNQ2V|Q7EڙZԢq O6=6OlZǽǃY XLvgzo}>Bҭ n3r}  oqVB>Om {32>g 6u^ht>4E86wlSwr^̻Ou^ۅq? Czh[y2oַX.R  !2Z£xQ,¸Kb6cbM#6F|}@ $ڌQ{^#ҽy! φO~߰}vǰ6|9|sK<4w&˜lgqI2)%/ČBbC B B B`9lqM׷}IYF>pt|E1z }14Oe_F|۬lh؄B3vA=P?)B B 6@<7h0$3CXt딨eb6Auecg9Q[b7?{{.9on ?tt yseQs͘İϝ:`{5IF~#B B B B`k|;ƁE"Cx?Ror Q}6uDG҆;tG ڭ0}>p3í?Kַ=u]ZKnOû|psPp&*S@@ 1 is |skb7~7Zxks+vaXyQw?|a78w^'ߏi}ÿG[\=RNBLt['xƹ - noTX34B B B VC8ҿ+Ҍkzڝ°ofMYVolyÓf rQe0Θo_}; BR-ڨ9׾yzk=_4ʳ/7θLk"BxĻ o{u!!I v"ak>??l“xDoeb2VmX sG?em97 7cwyLyŸEC-Ҿu kJ`׾8>{1\ח(v' 6<ıE(rI#ω|@ '$ ۾wg>6A\\ Ρ._Oie>hw q;yw6Ѳe0[g%FlaYK_nڽ4|9ػ4kQ\tx卵y:/z^?UGuO B *w*ADW!+"8(qCb؏Xka8{wU$*m,b.>!!!!! xvGm^Š|l]v 6k;y~?Oo#˶%#lI!!!@Bl{,L'>9o / Mjkǽ23C7ES5RZ>}xߧ_g-u|o?qRuS)B B B B ;3݅I.^Z{A34 sh9eeI!!!!Pb/oY^$}b_wu9>T#LHN8$/ 1Ja~ֳ^[E{==~pkv᷎X.R!B B B B l*wqGewyp1 d*1lBrz+Cm=)j H<BW8ypIǓZhwI}X5 ڄNj^bo:\۶!!!!{ ^߶ ׶x7xcN 活pZ7-I}~oay@@@l,xo,ϴ̷~pW]v}@nFo1},Gr֒Ou~+Vcx~@@@@ -4 ~p8s;S}i>v#5W]!!!@ xk^5#OȤm=>/җ~?+^;<,}M*@@@@*y|].c<)B B B %(˒J-I뒶]%n򇆻K9cRR)B B B B`c'I!!!!y썠6VwÿBOzKS+ B@@@@@@@@HVV=p?u/۾Á{$B B B B B B B B B`|M+Fosƛ;|k ?-@@@@@@@@ (>d{ ƇG~ ÷ی},G w2 /&r!!!b_廟@@@@@^B߼gxmvw%34B B B 6@ A1m@@@@@@Q/| _=:t!!!GF!!!!!!| u\KB B B V1w6 !pnnm)9:IDAT'y!!!@ <B B B B B B`Sh{ؓonRD!!!17!!!!!! \q_?ͯ+/'!!!I ռu@@@@@nʹÝwr?|qW^NB B B V@ y3+ wr߰W/91\!!!!!!| L_}(y!!!+D ٙj@@@@@M/×&䣘ፓe X1νLC B B B B B`"0+IM1+?!4B B B B B w}/=w>5'uR!!!w|﾿]@@@@@.DEiS!!! _F!!!!!+I/ayH׿եR@@G fF!!!!!!W+ w}C{_!4B B B B B  &{~ܽyp@@@W^g!!!!!! | šTbf(oϿ~v~ݗ;;C B B V@ +t3>~__4^<^Fo.̤#8B B B B B , 2]*>B B B`59 -Oய:k3_fW>REه@@@WFg!!!!!! \=o{dz#E_%?B B B`/%^zc3|]/#gjeg!!!b_)@@@@@V'p\v^?/-^z!!!{sB B B B B B r}߇ mI!!!C s3X)1dC B B B B B B B B B`u::3 "JL6B B B B B B B B B V@ s3X)1dC B B B B B B B B B`u::3 "JL6B B B B B B B B B V@ s3X)1dC B B B B B B B B B`u::3 "JL6B B B B B B B B B V@ s3X)1dC B B B B B B B B B`u::3 "JL6B B B B B B B B B V@ s3X)1dC B B B B B B B B B`u::3 "JL6B B B B B B B B B V@ s3X)1dC B B B B B B B B B`u::3 "JL6B B B B B B B B B V}Wgi@@@"pחw>pO %@@@@@Wfg!!{'>ypΕ9{Ktp7~O Ϸg惇~+w oo{ˈ.߯;\wvyҋW8W姇w_:ۿ{SNg oQ_r!8%'>6^=;!!!!!r])g!!!ʿ8Kޔ&*(E  {Ɩ?|M1&J=j,H,HSD. H }}s7f5k֬{̻?{ܥSLŊE6-62#zɧ3نMk|*a1YB+Gw{gI& @ |> NsL6ǃ/Y uo>ٕmݍ\SZsW-^5= *ѬN{١(#\ǖt,_3eCdQ]mZm}tl&.>r}P.v}hn1ٲ+])mk3^6J(gR%mۏYtZE&G_ ".Mlܼ&}?3V_lͣgZbvܣ_{}ʙ򵢹la z.oVLwEŢ N͔f,jֹy_Siؒs†ƧݺUX/ODظeWfکXeShyE軦˾F,]jTo7::6?|UQ;9+mFֹr_S@ c% 0#ѱrnT e\?5߲m;?AA׌=$hת]Qd/Qnucf$z\dɶ*7f+q2o}n.mt|QoL:7ؕ'cgtZ4Ѯ$ww_y"[a+N~t-Nm^C>~ֻW/OX@pF4rٻN@e:ع5Ǫ;Rd޿ -~/pQAm/?w]oG޺)~^'UߝuiqbB.&6qU\|xsx2箋%q]`{4_dfV$9 @ 6< 2:@(tچܶ}f\(lg$5<*~^/3-XYɊF5woaGMkw۟ԫ̉$ż(>'/Lzy;h}"͊v\ƴ/Q~bmi_<C?y8|,>]ɭ HW"ϯ^Cjrqo;Նw~К?!J @@%^x瞑C@"yg1$vpw[a&.u~iǦdRq[e᪙N qG{]Z&fL&Ph{;;v/dwƯ9ȋ]?n~>6e!?kRs[Gt?+oPeB.ޛr,.Y6&Ѽ~q eY`w]4x>!tlykf~2e*ˁq\^дG\M}wV|MS5vKN|:9KX ҾwbgvߕYsJfk\֪QSŲUc۞' @@"Jn @gOO^R#kaHY+<д55[+z_ lE䁽`W&O+_ݵI? aovg[G4;qlܼ?Z=~;9'߸Q/j>]FN~Dd!ۑ e:r\d(OToeEQ 1~kښ`-ɽ2}z3Q6>2N{ݞdx݀ԮW3]kqdG{\w^=5.pw䕞(Vx$6 vrG۳n@}廨K#}8bއkhb-R̕i;F>໾;qEN @ BC B3  M[6دm~[Wo[mnŽu];ۜœo>vۺYh13rnU u[~Xч[ #ʔ, O6h'u~ȇ epL?_NfXcZFu5~:ԮWF>[2ig-m΄'l7gFqApҶQDRMZm5WoCO%!o6 c]pYqIw'A*Mb?9 @ Px^x暑B@&P7˾mޔo /' `^Qoz(;>Ƌ+۟g\<:~ehce:eZ_['q`}2*ʺvlXu4񦞲xjcɖqϹ퐄xo ?wK|0k~ѪY>bs&x \T*W#NZn[|WE-VD9:wcn2 Ïysb''D-\5 @ P f Pp ȞDV!:^`|z nDpm-e(,QV p"eU:ufvوbS}/m[s`E?Fv~M=2ІoȾdu2)Tzj!WV2{g}7dH?Q$I],4 )[*JWʴ9A @xT@{S^4o8ʘ`ELC7俽?p%oL_[h;{ɤH(eYz߲xP>B|xky*p ?}6]SV{(F+3\(ZE#[TQrCiʳ)UZ:(K=5'v(Z$}.ǝ:'5#>>drhcW_p @ P f P@ 2%*79.I7[:bjmAbGu]th& ˿7Ti0&=&.Ek-k/4,^.ٴ;8{4傏]nm8I5Z6ThaeM;[]8;_ ,_ vRM/Y}?k劰>6$ @ @p@/\h!@hRm,k3oVNw(jxʨ6ᩄ6!oնy.[مSdRxISֳbs퉿ܢo&'?0[iXwNϷ[w/mמMIh,J5<:zN_{khѢ- zVd]R=W/Nrvkѱ^mϊN{2}'m^Cyzk{_o9{?6xdy}+yNд;WdY$ @@% /F@ :,f%֧V:s|78N<"\f|-xæZKGjsދ,]l d慲mXT9{[Y=vkq;գv]Wܢ}R|=Žjup{_e{ʎ/k[>Έ'kkF+&G;>&A_!^. ).te˯[ vykۨM7~/VP۴WMQ\ϾeK,8g;i[rwOl߹=>϶چ{u] 7=GCO϶_@ @` ;CZ tMkOd޼$̮nsF( ١޼N<1x6l^h70J~z,Z6(p'֫6W\SsLzEfV]I|Os{]6ԧC %"ە[yƮmٶ }94b04~Nʗl/g=[s ]B:p@.Nn47.{G>&)@8 @ k Pp~̵U);;IG^lO39xTy#˕\|-#'%ڟnnnm$l6&Wۯ׷]NUae^Ҿw&ܮX]u#QbByn/Zb<=6юzyشw|_7h>׮gCEuur0< e߮eWtҦT2F"*%ӞljzC @@; P:ܖo~}kZW= ,o[bn7,[Zm"z/*Y`_-+&Zeviǎl"ίVFѱd{f2l-]3/IofMkJj}dæ56cx[q蘄 eK"Q>wmy.U]Nکn,+_J /彂P_D<JpmjZG@ d& @(hdG}ևMxN;־qZ9hбp~|  @ P8 yf @y=k/lf^S0 @ @`̇ @'_>[fA,QwmWR@ d *: @ ~egs\] @@ AO>$~ܹ.]z5M;@ wqm۶6l.$URŝkc9sĘeFkpB_}+$mٲŝK_;YfiUVMWnjTRa=rJWGb{fLԩSM jSF_{jSt @ϟ:uq~Л7o ď}gϞ/Ԯ]*WlK.ӧt>|׮XoŊK^|wN8ƧUh"\~ؚ=_ԫW4h`+VQW`zlg͟ƣ}UlYF}7o{C\+T/]_Ene2w\D\.{}ОbŦ}WVժUseoH|>uY{.={ @@>$'.A@!0eʔx(ZxqgvZw_mݖC@~M"B|ʹ`JHlѱڈwaqiӦ} ?OhA a{rKX$!ѷO>7|=Jp;[cڡ.^1_E%'7~m|O齍7NEz&NP -x9<╙jL j3/x _.Xm_$.q[-[L_z饄K/4-NOUW6%6I&%|k&O/6u9眸&Ђr  @ =Ϟ P(;WY>B\e>,_ a(׋*?#ie2bĈRDO?4mnH4}gR~t߆'|2U%Pf#J9UF?x??l0',es# @@#A@!!_&TVa(5dӋ#]vaaֹ,$ `W_}vi W_}t7+a_cٳuQZ}YI!=ΐ lK|ʁ nݺyW.BxqYСCzqkY:,kӦM\l +;\VƸ{ZhyeFRć\EM^g5FE*^/T&)#\C@G}t܌CZsZND E͛EPp1oć~@@ @E`wZPvB@,a.58 otPBx߾}ݹ =MH[;9a`)Y! ^[6mjڄӋ{=CfDu%C~*]>,ln\dZ2C;/Z>ìRJJFRv--Q[Bc5|:urY>_BŲ!_ZΝ{le4K[H)X q8/K_$˲ ʮ~GLw8p^}fokG<}{QF"}" >]6? _7<}j @5_ P\M+< [߬O(e̾oejC &$X׋߾L't(Y<ʜC $4_d2?ÅW?{_l.h\Zl4.Aއ~U7E T¿-C#>y)P/$ @sB @ *l)M+MB6dح$p)J*Z Ir},Ю7 3|Md{T֭[7s@|O%}`xhlg]ީ6딵>a$PkpK ߥm}$)+q!".k_PC^}=C //UۄG- [cm  @ @ ;D/!@% mLf{3UfLC?~WSzSE"ax Ѯ{bd˾($>S'^'}5D.՗К. ?,1%#ދ WG_=JAfeS}LQvbo߯s@ OdyW P([Cצ}~BPeν?HTЖh{GxvҕkcGš5; Y(}c˖-nZmy #]!e/!V^N=rPl}䣲״Yc ';֖/_m6j1gBaZ8^V&y~$co @8@(BQq8mСnDegJ 3ӵ̙O|W'dy_BY*fm/N&g K׆a|WA;Wֺ6THf&\YaIߘRb/~ ?֑F Kw 1cƸWnm_$|zmv 7XrVuX'zizӧNj |U{'}~qǙ_Qۃ r\$kqaҥcjSW>ݚC_Q鎡uJn~m=!@ K ˛Ab͚5KSu˖- ݴRUʞ>yh7e{;*YR6FٗxQ϶h"G ' Z<%~G}z*ώ-/Gf̘=C/sL_SLqߡݽ{le-rزOIB[yHU:y鋄hekũQ  b1n8%YyY-Lx[֩S'x[⩾Ѿ}{ @ 8K:L7!@ЮaO&o=Q駟 /N9_[_| %.H@O :R6 rx)9ճo$: կZՓ;p6Ds,82e' .H2^k}4?>dt(^'>#^m|ɱ߶la_+w;S+}BI׆j뮋7 $sk=W7}ܾ~ OB{C @@( fg= lb凢jժ_ XِDg y e(>=.;p/[Y{zDMG ;3|㨬ay^ڔmLt5}_r{NBh_77FXd="kpXvC~޲Bѯ->zhˉܡ%f߮8ꨣL~ @ }o1 0SgΜi .,4^ֆcƌqM"{t3,P(dNRS|C@YSNg͚e$2 [^E> y7!@ ,!%E7!@`8l#FM wN6Q5kvL%;wvM K*eY\y6l0>}zKի9c׏;:幮{ @3_L iAmȫO~Xϲ292oDpS̼3&@U'}IU/ " @ }o1 @ @ d@hu@ @ @:Y7et @ @2! %@ @ @@@Ϻ) @ @ L(Q @ @xM @ @ L gB: @ @ un0 @ @ dB<Jԁ @ @ #uSF!@ @ @ P @ @ d2: @ @ @@τu @ @ @  gݔa@ @ @Ȅx&@ @ @YG<릌C @ @@&3D@ @ @:Y7et @ @2! %@ @ @@@Ϻ) @ @ L(Q @ @xM @ @ L gB: @ @ un0 @ @ dB<Jԁ @ @ #uSF!@ @ @ P @ @ d2: @ @ @@τu @ @ @  gݔa@ @ @Ȅ"]NIENDB`barman-2.18/doc/images/barman-architecture-scenario2.png0000644000621200062120000061216514172556763021436 0ustar 00000000000000PNG  IHDR^>sRGB pHYsgR iTXtXML:com.adobe.xmp 2 5 1 2 Ү$@IDATxquwB[ 6`Ȋ3 QB)mRwww gFc~r'OH#G?@@@@@"     /D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    @A@@@ȉtO 7\m}vؾ=.Pd% W-W]VlPTFwpcfhˆ{7ܿiǾ+TB"KWrɞ-*oPPA*dP9A ߑ#Gr~|v݂`|VxT+Ud3/Ysu{!3:\L2-lj&J}ZV\̵[ 8/'.kӭY%G  w)ZyMs=//]bW(>ѓr3o}Ҥ-Q`[dzҖGL[g?ppt|Q:_;DXo Vל {Եi=jP3|AVqvj3cwi+.깔 Ar^z~wRU8M:sAYM[7&{ey03՛H{]O2sVn۵{-b>9E@O1mޱQfpEX>.Z3]7 kU1.UW&f?,{?3mHhSՅ^ҺF)|/2ߵXmmӥfU7wዜUM5:@ $v]){E9@@q>opν`A!C('D :j,rkP]t JrБɋ6M=NS\N4'\„DCuiWwpJyֺքШDCuVzU/Mzאy|_3yJuypyT`O>ñݣI2yTFꥉsWn\;g fL@߼W]JOs,5 wH|4;_ޜ8W.aݴ}P+-UyMSȩ?+jghw^~iyPh]o=-ʭU>ͲR-Z߃#K +䝽)Wpؘr׾߽f'>q/nNRUaPn>viV$1bZˮ!XmY0=`;! p<?֤Sv2DM7NY%:mL VX-:kӯ~`YG&@ 'oyna@+0aܻs9lo-) ō0vi 7s]vS?77 z.b&5Z#"pSϹWD{@h ;hϗ&Kg.qJGhx4K5zzV5Q*p߇3^+VxԦd=Vxw7&O|dL-;xkU,^\ {hݶ{ P_F]fٷ4Auy9RXU,G9[qדWѱgnD\)P`uyb+\LrʩP;K{_X2Ep!S<\uJV`Ͱvɢ/YԯRʑlզio޼DZQAI 6z]T WV}kWn^s +Ӻ>}jBAsgDrAdп9vY*`7um|y;4$j3&PlQAW]{}z4D.i2cj~}TA2谩goP' kd~bܰBE߻NcV_:;ڿu~ݲT.STs+7w )66ا|*U/Rh>5m_"T+-ZS-,*)*f5˴42I(Wm_ cuBtH1՛wkv޲G{׳y5x53uFnwTBW\QuSR+-*9u"~K{tٚc奾d M 6v]) T_"JiYE dwr%L.*f7`hm^1dmZ;aifoDќ꺫լXE2{qs7.Xcֽ jVlUH|z%}0o@L.WWp~w!ܕۆX뷖{z*mHc_zv޽߃#g,joVW[pM3F<%@.Onv6 _^A.={=;f*ֈ_NZ'=>S 8 qmje0DQ htqiMZԊ4ؐIEo}d@Ҽ1l.KɅ=V8s6sМ־M+J 33wF-Yǫֻ؎[T;e}>3ɫb΄O=>=Pُپ{<}y%Gc[c^f~>8y>lJIUQ6Էfv O2ƣ.wV EaJaMf9/)"lsםtSZq {WO~6G>-7i]0bh&l_I! ͨ0K^n"Ec M!RͱT}ҫC|6a_/i*^{r,g5;KmǢ8h^XJ`h\^03WlհZi*tJ{i! 9@ss,YrרXܯ]xÇ=rcζ?}5ԾqksK?Ψ%[+B[-(fw֢tMٲƪCzޡ^nڱj߷G~ \ U\F9/۰뎷8"S=Ґ# =b5o:q5mMA1K)LUзICYQ~dЬwG-y6&6sF!X>YjPC[kZ^z޳_ Z)5|>\k:4a.wVą"ߋ0uLBu s۪s[<,|oB* ׹w-VzAq)-j5Dɝ !h ׍uG֞̚_ޛAs~UOJ1|O;}/^^55v]֝0@j֑=u]d}tL,۰lMO՟%\Ze<ǎ2Xu;7O9L @D뒦cE"->WDs{+[0_-ddo贵 hzco} h ?}h+7JПKi)q^P]J%{W2Ž߷[%Ɖh)#H8jܵlTg.of6zqVl+c\_]‘2[>!O|oj-=No,DdQۋ(O{̜PO 7tA إ_rН=:69D9.Y3ȯ!k+U jXxO\Mِ2Ezkj0+]T;|tJ$šLB~5 M_gb^ubqc-sթcN<"'] 3=ȟs>"wl`wSrZ>; V{vS\ɅLtۡ7j"dЫ$2+3޶;P^k- ѯVl-5a/l; JsG5'-!Qe~o玮\>3{̫^aIbW}|2K5\K %TDa=)0kɱ_ő2уX=䏆$!k6dAr:k/T)GǦrd&4^Q|ݳ=;& 8 C8 _ߥ;Z{)UvF{SduWu3٫̈́ [#~i &w_ONP 7^Ը=r-R ܻߙ7Y5uȴ:5s.T}3HՎFR'Q=%*aQeP%ӻgflW֛9ަ$̙j wc:ō}.2>zߋ0e˶>Rߴ*+3uos&=(YT Y[!(o-Rh';s3%>;i&qsq0D봏=Y)]ޯqlEלb/"uuv,h/Ҹ9tBmO4JuSyT@] 7aw54˯70Yӭg58U; ߧP!ZJ1-Qur>Aӕyg_խRҬ#xfZe; 6g PmJ7+CŖ}tBߋ0ehֆ$uC֜' JT$tP<BАg.dm43C݋t5S3kncR'= C8tne;Met5 U?zU.+pCJ cC"sGϺ4i8xs,E*n "p4p_iR'>bu5А~T'>{-ZأzdP 4Қ|BOkCNָ9};{0Ϩ2ڙi.{ +:{~9 Mޛ9 ^$Tߚ2ѓ=p/'ߖѢVYG+6%@lSR33]S N>-*Y='c\ lTbAZȪW[إS߯Ψԋ!jgfBTUkV8~-_LZi"(Liv{QdV?\~lZZ@Pgѷ^٤W XnhXgZV%KDf>oŝ_ [9Gσh&iLxV<ߜm&αqC>|Z:8BuJ~A*)`֥q^^WN+[q?ֈ֜N!_m{R眾t4H7W)K&{V̤GoZ9Gk1@%4@xgt4U [F]?SCuv›OkbmpBwoMekutU kNnNȠzt-{@(ӗnim''RGͽG9b{ڡ j2^y}'{@*d?g_{jzYbd|P\PNUrKunR} ~Wu;=Rv<]!M>$XWӈa*aT= C8mǍk*;uI5sjYnUbIuBGr=#+>"@nHO4I}A \x @᎝Yg`k1Qx|#} $\=mO^pfO#TgX9+L=^ӫEOk?K^Z/9νn>#TgY>G| 2VD3 3ŷ{JǴ8 =lU}ABu*ҹ]/{6i}0CDO=ua:/k%7VVԯտow=KaqOl8Tj(vtJw/:"J/<ǶQ?ޟ9r)+wB C{{U϶nPj)Vr&O;wIfBڨ9^Krk{dNFѴj \imU奫;X9D'U8_S:yg?jvCߞ֢sZVqaиGE~_W53BV?18nwJiM=(VN+QX?Ⅿ]daGL[UOaY gnz͆<к*W@u Ag+eBANl8~ [><HBѸ_O^=憷܄jKl+UGmIޏ9XnG}zuȠCs4|\e9c9ugt}jjjџhޣa_=˷>1ݮAyuAwGWSO h rc'~KLLj*Up~{9HgTtC=W2,-RxM]l",9j ߋ0eVvj{PP[NoblZQsۀBkk\U!7D[4 ϋ__:6еi%Ow~ia ~:歔,ZH5agNҀ'a6^TPaV :HebvGƨفeڹa6tIHikfiok.K@ D"xP(P'uSQnVnzuoص|.ݺ@)Ox,)$ZU*x>,^wݾ-ԪX|FlsՍm=nՂD]sfڻ;uJ9?63ۣuUB= [4tZ4izV}5Gv~E2d(󫦭}d Cٚ)T¨GXT 99(W?jJ- ZjD;VK;s&,E'@.S(w iFK okm=>V~Ս-ރ'(Ѕ/3|yÇ ?7癡NXqAZeIg[R֬hJM/L n%gT2ԌR&:3kV ٘zvH+|/B;!6yRLBAm(6T'~e/mOnݗEvi?xaKOgTMm-Bc; mNRssjk[E PgjkIq"IPvn;lȝRNeM\P{yV33 T Gw͖=({f9S2vBӍ#R@ sF<طUSt{;ߞPh#j}s֔q2T-l|P7.%Mv*gTriL3L&] b"uJA߰cEsm8{Ɵ:M{TPJ]ڊiBD_ߪeYsPKXGtrtǀ'ad/iNi_8m3*|z%eˮ͍a5Ll4?GVb5µ'Kuu"{h() E^~ۺw9r]8]$uJf~2jJן.Tka^[VpFY2LLFah1󖣖sEh2In]nX19qb l9(bH C.O*4Q[-٨ݿ}7o7( /}vsWc_R@Giڷ|:Un9PR꯾^ lYc7Br0tҗи~jjۻ^ ٠j Rȭ @V *ޑͻrJDCuC? YY9vTch吽:kmE?3WW3?wD]k?:veeߜtYsܙi<%tՖ?n:jiP^v(>^ܸl=sVS8C")Aa[/:Cjz>WNԁSywLzb?δ-*)|^_KVlĝ8'a8Ν՟N{ 6ٳy%K?hi*O~6sb3xã{w:-)6s3oO42u쀲;&SR6YGOWl{N/4I<>D4Tٱ˵mP>[:90u5*s3^C6gn"v=3\i^GϞ5em.ʼnwf]ЀK>~[-cŚu(Ul :Fֶa^e֭iѫ1 [?p^e㶣lQٝƚ`Q32Qbu~҄F2Oi\`%ڴ}-@ O S[S/J)/TGV|Vk Yq{ںI6㪬۲wˈ.OaRMu~|.>}4?#8 l8g߮%:?gfnZ-}Ut¿P9  Io L 4a~];4'":XwSm3~?g9V> lڝr/JtYdh!4a^9t +yv皕&ֳۅ=uF-j1~d,szg&tniѺV,]haSJ9Y$fFLY7c j zsQڅ"~2HyҘF54Uʯܕw=XxmFqC甠]TXHmɺTWef TSSBFP(gن]i",̋7*бfbǒի\2_cO窎MhV.TFUYW?k碸3;6PbHU[B μW~ZQ&_/T3ϻplz7U;5vñ˾ڝ7P]?n4qFM}s;}7G,c5ܳ*#gITTU|{bCuKZ13*r׍Ιi/UNn\1NڑzcX+tԢv aJC12HB}2{xtAZi$"kw94IR;>'dÇLY{cӢVY+"SrI]ܫrq^ezyfCO/?fo׾[y 6 s¼8u\hN/Bus7VׇhV CGA@7^&&ԅe,+Nto9 +N!p̿-2C { &Ƿ~a栣 7+CwnR/Z++Oh PWRzB++CyغU؄PW^9oOEEkwX)NthT{KmE{oiFyq]T +n\X.2Q0ב7Yf֊mG<1C޻biARx႗=7ٷ&t>c # .r}?w:&(LP%D[hwǘ߮1Ko9߽ɀݸYeZ}+^_}R;j.G??v >JѸeȧ-09#gpRꡏg2vlܮAyFܜ>[{У4аbʬ~tRH:*ZI~ P![N֯]'48 qG_\{F@M\owT=g~^}O=j-CJm~nka|m}wսi% %՝.|jܗ⺭{d}/ӎci܏1Kε[#_ֻqu܍&Rk^m}'ڎ؞yW& ukoL%zk2ٞs:3ʳǏ왙ľ9ҩ`ig`_rSxfٯQH5?Xɣy0PkQ\L,Uj~'gݺ [{ZJ= ;uUϖqUr񢐵xc" joG=!нY^O.\[YO"*tSƗI5:&5@Ґ$̋*& jL&9]js@MlCT7ݛgYXO"zz4ǹ+cgh{E +tZcs4u;w/g|ǃ;樳 {1؜j tU/TO÷~㔦Mr~α>TpCAA Jgܺ*5vG.ܯ WW'ͧ7iY\2Ee1i3guqϺkٗyFٷtdTv*ʍ ;lv~/{ PTDe慨̾@IDATŽ9ڡzg^wr#ÉjmN]yF[rn $ϖ RsOu G~ߺ{3Js/M{ OJʤʠۉ+Ol`ts3fzb*ޡ\ pahvPuLU`%tSyRunITNfzhik ~j(]P"ka|߾\c_vt$vrQd3xbR"@/F"@DTQcΨ%W):1cs_3ħr Ejj)O^DUQ0.'rѬ2c orOZIo,uhSoGt3C՟cfgώOhCjviz N.@rs"{f&;|.:U)UaŲ4=R-zTw7?,*]]m "Usu % GBa`PEdߢ0䯥]'= S^ކE!koWyb{F28*]ZUR(+ժoESD*W{yyq{U(n2gh~zs"j=O:stJ+hzbOfˏ8ih9[kwoT4ϵ3X~nO4zu8rn GPs0ĪgϞ6݇z~}B(2bܱmvzCHj O&հylUo$_[o|FK/gfv0|TUC-괪GeVNsr^ 8mePcgo1LuW8+*}G-5^(B;E/\ݾ_xǹ)9ԍ v+Q0T-vWzJPhС4Ac꬙SRS0ZL>u;C.Pzi~3 d_bdj}湿9>3Pݰݻg?F-}iyV4ڶG1#Ap̉T/xnZJ2+ȞD[4!7u ]Zn\+̣4JC/aJљ%xqCSdz];Dڃ&*+?G\1qw>՝&-sdE& CGHF@#?vik5tp4uf-*Vk;;ϥg7oְZMH6d᜕hw}Niu#Vg.Ƭ|NER>(|F%gf1|3 lJO>XsKz#KQK$^(B;~8/U/v5}'bY%L[h._T *.qמ0l^UIU¼8ݹ7C7c Iy]k|> ޺U'6Hj;ܷ1Y)&@ -G@(P`~uKj_Izmsʞ>6zmU]5標ۓ9^&|GE7tNKX{[)w~:9i5ĸ)q?6^;)'ӲD>R x(fc6Dd+o;ɭSGL9APhG-E\(=D S|i,O˴)g^>kv `? zi0+ZzVP#;6-]w ԗ9Tyq8SwLkZrm{ᬙ em}SsgN _1ORχ@ ' ˉG2#@eu95}E,Rb#G.j}tT[06SCʎ5n|?1H {> Ϳw-B<_n78KȖOU-]&wF(UgT\%ggi3:S秚Lcܐͺ8 rAsjOڡo4[Noo5Ln:^r)k K[-|`%to| T_t4GN}lQmruzݶwNUZyqХGOT3"٢VQpۀf=gѱ؁'[TnseoI.3! $<䂲9+a"*)ZѮMTM,Wذ_HP=z/^sZ[2_{hߡQ m޾uAnX:继 5z*+VmG;۝5 ׭|yCa#^_<)m>.?mŝVԶ*PbwƺqeH(Ⱥ,56[[vn%wۺg[d%W-հj; "VH]=~L+6 brmYijh+eF'?ҵ;LE Wl[iwp(^Wclc^fftlϱj_TluQv*QA2}Hp]7CH,g 7~uܽ?m}o/C7V<*2(_NYc/ƫ_~ľL#@D뒦cE>!mzE>حh :K&r S3[ΨlΙiQeB;P-ݴft82C/m])g$HE 4탇wPTRdg)Oh/&VB[/*@})8]ɢɷNNI*SO-^H oxK-\0L @Hs՘<(.#bptMU*. IJgf$cQJӝGm9ʴ;`e_$KQ 4mN~VՇN[k|D3|qPN!?|7^ `o:5BoX^2#Cɞ"  L]s36u >'ĕ9jmQ@2E=l,F$vDN1@@@*{A ș߮_Gm;De2g~6 F|6@EpP@@yTjb``XigV{hFwL_ֻc@r-as   KW)բV˷(^UwЌ[5_zk~j5d}d@ P.2   ASݍmL]ɫX$T /;uq]IDZh]C@@*p~M"{׽Y֕'6Ho Ѻ   @ߺshS܇u+R@2$@l Z-l@@8&LQ=E-Nj]uН=J-zV 2l@@@RE{W&װE#~^}V=!Y@ DNJ @u*Le  /?u;K7?tZT_c++Y 9r$"*yǾ^B)^$@ȹ_yϚ{Vm޽f˞7% +Y|"KnU\ErRr@:@@@@UQ&Bfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc     @ u 6,\p͚5k׮sʕ+׬Yz5jhݺu%R9$-qF:@ p#) dK{) # .ȑ#B nիWC 9t{5lٲ_~)??-P'niӦ7/_/Xb>yfSM޽J*~o::묽{ELߋ>TRi-p ˗/oaÆO<}NOgh/m{'L`/"}NYf2FW t)SLVzѲeH}%)@@ "ԭȁ {wz;u֧zj7|s'Azt͛SO >1c; {^ziB;-.]ꮫtI)Ydҫ={̙8@+@M}9J^Hw(`rGE;,ٵsךCuVU͚]I&hKқKzEϽvHAҨ(׫W1Sf^1WI$*j!I@\w @"+@.!@ ~mے.~駤Wg$ [N-UIǐƱO?cfOJ%D Ft֩ShѢнb. US5ԉTuܸqC8_"+@  6,QP5. 't˗/5jѣ=ER}L!+V8 B{a̙VҸd[*E9ܺuWTNdSlթi? :! uzqeϨ_kX . uY O.90@ GB"@Xr)SUHcv>_.]tϏM{&s2'Of.sV0Wq7o۶"6mR_udN:5zUׯw8S3ccM\׽_ZAN6yn*A2T ~g#%gNO>dG׀*c>RHʆX@ {e?[G#_wFգZj垯9={R5ftŖUZj2s]kA3UR]M*P TEˬ4ILH* V*7,f={UQXbrR|nޙWY={Ww֞feeiO5@z9k95'6_ֶlotGRihr5( m(γu:Aa^T)םcN}-W`VWؗZӺŋ׵4+^&v9AÄ."Ұyn첯J7K_u["LkD*U@ ѺD$@ ( w9)pT )*hIǤ/R8*}u=9(|WgqFǧ3f|g MO$~ 6oMk4Ç[5Qn뮻N1KUUa*b"w*SN9SN ]7~w5.}0 +K..!CaV|=XQ{n!**-Zȱ(QO)ڨ* 8q۬ Ŷ%}# YE TeУߙISkR<|4s?qq /UW](h-G|+og}I}c=z{챱3?67C_VC6Ja)E<[3= [k7V>}U`M+p +?beb]dtNׯLϾi]T}mVwy;w6#hŤ|D g/~{1 Ul }~ͯwCΠAb:%t)Sqrl3d@,c^ nU%Su|`=jd};8ǜG=!QA!'b t{eЫA==@@ K5S_S?3ɾ{G_I׳nI ү>ro16G;TnDS1׮][})oiχ1= D g##:OP䘩7*^%9 G}G$qLc~:rRY [}/aJ ɦشg;JkuERbEu ci<{3O<1klFV5k;vڹgej :zc+AkZG:TFUv]rwey…MQ`[oullN껜ϯ9ЫwyG?O8Pʱ41$.%rSMOPa@;CE{u |UZ]N-願oܯ: : @ y4ugbvֳzFnU?GU@cK^̑LU@/Yg9yF8b+fjTCUplvP}Hxժױ\*{޽/Zrʎ|(_ƑX%g6={y޽g}IϾ]&kiiZNNU9?yZ9Kޜ9BuZx`R 3owީ=ZW/Tν9 5*C^_/cofwt:o gEÞx c7xRG{jP],[5U[&G jaj:q (VQ0+54z~{yj"(0g`eLU )ˑ ?y&/cr:)UWVsq';awO[ٚ3VIٛhҵp<'<^E"[]uUҵI|5<}BuOC}Ҳi?wBCuƌ1yZ:;W=i$gCu"ZMe=KL@u 8,B u%'u8jpԦMrU5۵}Ê9rڗMhV`jP_bŸuZ  UVT%i~:<(|܋N}I,9Xi`ig= f4BnN S9X-LMһG߳<*ɫ7pMWT3TJ?:,1Ol] buw6kG]ImV߲ݬ/jVWԴ0]?^"<u28 Nr[.&=#lM@ &@3Hu noCzӿ{Nuz1X i NpքRbsʳw6+<4z"j۶:D[&k(Dj.j=)v78u 9k? 1/BkwRcWAE bwh%k5FMEQc4jXbcذ]PXP09sver9s̙9gM X(a5",&LV9$ه d2F]v;ꨣx2!gw՛PY/?H8o97k)zXO =c㴆c2 +^K|x8Snr R,T2xzKOʹZoͪA/!ZFU_r:;ؤҗ{K]?1!RmwqTXMSǷ6xɥx8ޠ .XR&K%qhAxF+Pc/oK'+\WwCŎ_X_o  ~UKAL+- k (@Mɧar0;,C~g1#(K |/|W>KW!±J{\)W勵ƶn !v"/ {2K{9KLIᨰF7̗h;! ; m!H QN "Ƕ]wݵ-f \tEt6]hYq`gz9iJYq"}{҅*mvHs jy8%#k-YkOoc_Y$CZÑ(}J eZd}!~!~7z[c&WBՋSHzyg)N.^*ҩTX:hko5{>._ WbE`)裏fY̛1GP 0ZjV@Z/HN;?#@sa4|h e>!X( % FP^ly.F ӱNDWuYt1%Sy|t[lE<$MDK+]Gz䷒~+Х4T|/j(|K]iD_ޱa\J{c~=)7[(G)xrO8ѽ2 !K46 Յ#t|Z|ᥗ\*lf<}7f ̻AnSG2-+>tN;5zKZUQzd1Jpb„4"ùJh%7kixD*Ӱw᭪4io>zB|K Sݲ`P'͙qСo SeBPTh]) (@kX>,LgU,ġȂLv+czio9ҌrW||O_ڡ +}J;y]qL(V@fMCgBl-ӏ \P~[bbw [,$xh]鰲-n$0K^Z:;}dgSjPiKo]{8Qz -AiC>S#H3?*aw@iT:mf<'m˿!7+6,zUUU)_i, s7z4J#2,?.dK^̅bleO٨K.)~VIP&V' &f+7o},JK+ʼ 2&qx>1Hpu (P0ZWqSXvm(W2cI-!fIu3uO4 ^_8+TٌCJ?lᆥL)Vz6lx5~N?K?K-4yf>Ta0T^X"dFQ&Km!p,k0)S^RmT)== -lj)}9 WD<˰KLD|BNP2/F=zjc"/ɮXaf"_sFiZmo nA>]>̯,rMƓѺP[CWiیHSb4]NᐼBK'KD>x 7)~X)f̦v+koh+4WD^, DzY:!}$ aV??~N1RLP@E`›J-- (PC؎|K:>jK{+?6 $T:P&!->ge*=<6q*}:ʥBT=Gja Q? U-V،/FݬA|RPʿ P#%@cޗ-)=+] (gIx |*ݾJ{zS-GSP,n.y GҟVLP 7T@0ԅ UtAtE2G*.jKtSSa׎;XWݬOJJ +9)TH$oPግzN}'|٫Z T, зXa3Oޑm g,=WmΫ_-(TR2IM-oG xECPa/\oiJ%Dob{-_i=猍} WDW#b|̶iINWp5,sSPEu-Y@(Pi- J7CW^1S:*裏oIxEvgi=JOtQ01\wuCh3È *~ )x7 5ͣ@.rR4jG*7Jmr7M+n6 LmSLu>Ny16^R$O@[kKRCZa3Ӓ0n 'V(#]Z[ˈZ. !ۊ_wɧ/(<''gpVp~x[h=P-+J _]鯈|2jBnkVuk f̨P (Qh]0 -SFLMJ]ˇvR?a<4ZGt -o&lBUy2Ky?~Я$N֞ɾ Ǟw$h|L {>h]wͯU2~,\sx7?|^J$xgNf+:]z饥:VcukM$9C3{V=mvU?>D|(BfňDbxz󛘾ӖW7f ÿ\+݂x^r%kvfС7xcݜ7 ͗$:lօ)PB`>^5KlqV|=>ۄCi=|NҜN[˒vD>Xt;.X?ZQ0!MH|@IDATtMgT?9Ι!G"w^l;餓K=4 %b  ֶX4&?L,YdXbɘ( &kk!!b,1&!C)o$ԏ=X>jɍwCivr,f#[{M'B&@OZt_>_駟S(fgQ>ҡ`Map]tT;8s5LҞzIIGy>DB8Pv;D0 _"D=gg}y0oz:#I?ҩ9a2"S\a*5,U#I1F2)_%x mtpC9Z$Ba+ҥKrJrW=mĈ-: c*~]Yӎ.߬3 _ykOo~GZo$oW)A,GX2O c!Cf_omyj<}xZUe>}tڕ2 \2 c|lԛo_;w[U=GJVbW'͚/j)iP@RKsT@QX3V^ {7|X!*&x.LBtӫXC̛·C\aX[QP/KI->oĆD :>!VtX>BU-nJd)URI3[ۊ{ڨljnuGɜ ;]K"jOoc_ Y8(B(N4)y:'/bogyf^o+^L/x,]Q*$|z֭[I.^/ewpZw~~Ԑ7xE}<& w|b<|wX}OdBP5k92 (A?yY5}Y^u15{WܬOПth,_;cip! z11|jŸ|駾4@IOxHiC=d8_J gav|o!tk942FiK3 g֕Sئ.}JC3ZqO8b)<Aa)*\W[3jkOoc_ YLǸUTc2Q YMg exxiJWS(13X0o%7+ܲ*LrU)P{lW\yW6Ny"ѿ'iNHo- HAP@J$c (P=X kN;. 'ryXYvW9!*LW兽q.!zҥQy b=b:& g=R9cΕvBQOoI W`/ެsUJ^c¼ys̶Ye^oi#+݈1ygaBiF]r*]o~Z^p c+X7y 2}1D|eO^[cO5P3Ї n1:;f0=k^l>NLG^2y!(C&2yIz0Eҳϧ7UfG^'K\o)ċye/.yҧQou`Kc |Sׯ}ه=+\XCXDV"D G7-oZP`bM* ( (SCY(пI?I0E] ck7Ǧ) ( (m+@BF=m< (u[+ ( (I(ٺ+|P@FWP@P@@yjYy (@5P@P@P@G+4vcB (0Z<[kV@P@P@&'?p -vҺ (@5[P@P@P@C KM7V*$`n ^ ( ( (0ÇϏvPS}wM=+ ( ( Lcƌ6lX=z(丩 (TuMrP@P@P@P@p$lXU@P@P@P@PFk ( ( ( ( !`,* ( ( ( (TuMrP@P@P@P@0ZWEP@P@P@P@hѺZ ( ( ( (uˢ ( ( ( ( 4Uh]Sy\P@P@P@P@:ՁeQP@P@P@P@*`V ( ( ( (@F ( ( ( (M0ZT^+W@P@P@P@Puu`YTP@P@P@P@ k*+ ( ( ( (PѺ:, ( ( ( (@S5P@P@P@P@Ch]XU@P@P@P@PFk ( ( ( ( !`,* ( ( ( (TuMrP@P@P@P@0ZWEP@P@P@P@hѺZ ( ( ( (uˢ ( ( ( ( 4Uh]Sy\P@P@P@P@:ՁeQP@P@P@P@*`V ( ( ( (@F ( ( ( (M0ZT^+W@P@P@P@Puu`YTP@P@P@P@ k*+ ( ( ( (PѺ:, ( ( ( (@S5P@P@P@P@Ch]XU@P@P@P@PFk ( ( ( ( !`,* ( ( ( (TuMrP@P@P@P@0ZWEP@P@P@P@hѺZ ( ( ( (uˢ ( ( ( ( 4Uh]Sy\P@P@P@P@:ՁeQP@P@P@P@*`V ( ( ( (@F ( ( ( (M0ZT^+W@P@P@P@Puu`YTP@P@P@P@ k*+ ( ( ( (PѺ:, ( ( ( (@S5P@P@P@P@Ch]XU@P@P@P@PFk ( ( ( ( !`,* ( ( ( (TuMrP@P@P@P@0ZWEP@P@P@P@hѺZ ( ( ( (uˢ ( ( ( ( 4Uh]Sy\P@P@P@P@:ՁeQP@P@P@P@*`V ( ( ( (@F ( ( ( (M0ZT^+W@P@P@P@Puu`YTP@P@P@P@ k*+ ( ( ( (PѺ:, ( ( ( (@S5P@P@P@P@Ch]XU@P@P@P@PFk ( ( ( ( !`,* ( ( ( (TuMrP@P@P@P@0ZWEP@P@P@P@hѺZ ( ( ( (uˢ ( ( ( ( 4Uh]Sy\P@P@P@P@:ՁeQP@P@P@P@*`V ( ( ( (@F ( ( ( (M0ZT^+W@P@P@P@Puu`YTP@P@P@P@ k*+ ( ( ( (PѺ:, ( ( ( (@S5P@P@P@P@Ch]XU@P@P@P@PFk ( ( ( ( !`,* ( ( ( (TuMrP@P@P@P@0ZWEP@P@P@P@hѺZ ( ( ( (uˢ ( ( ( ( 4Uh]Sy\P@P@P@P@:ՁeQP@P@P@P@*`V ( ( ( (@F ( ( ( (M0ZT^+W@P@P@P@Puu`YTP@P@P@P@ k*+ ( ( ( (PѺ:, ( ( ( (@S5P@P@P@P@Ch]XU@P@P@P@PFk ( ( ( ( !`,* ( ( ( (TuMrP@P@P@P@U@P] |7~ifeE枥Lӕ |ſwG:ⳑ_|sY0٤|{v"磯{h >}~ا#>rgYr.{tݠ~BUsf(( ( LS}wcm (!]0e ˷^iwSc1ެU4dϽij1 Օf~O׿WcJ0ovZ.o$Wq]{SU~Tf;}~/4[;Cβ=>atF|ed_,3N?M8>wOviIP@P@Ku (%#{xؘo~o4owm/;x%~r; 5+p s[ko{R>ֳo}rt4},m>N)${] ܜx?>_5( ( k*SW_m/o;Cm.f?^ۜ=ΧMCuL= ̾P$Ac K53b~Q>̘cf}FW@P@T^T- (J{^ŏWMag5~o_2O>xŽ䜥 jtl%' 1=`Ŏޮ,3s\z&0;cG\><Ͽ}CVx{0ro|YCf馝My.ނŶ^i9G X $:ET@P@<uS=P@v-*!ӤVN7ԋ9TNb^ݗO>ƚ y}=$ĺ|.ݣVXoyb!M$_dP#Fua4`9Zw58֡}6:;Omf~՞!F]t+з?[pݝ'pْr#/y<BW_f?\O`q/i̓}̛2Ӵt?Zg:/!Gn;>TG5"~2?]q;7cW|gQiyfsEc ( tA,1k ;=62C^[^G!Y n=p`hXsե?栧}/}e{vKek|G??b]dvU* S{^_Yg;3˚ uacU ^u_?_a x?\y|0}32 /LXZmICu=~ahȿl3xx:y<sE ,o%}n¸f< ^)~q &=#/c~uߛWlح2wW= 4A5b$?Cތ G]O97teOrsca7>:?skj|y`^e(rC=G /Ǹ^%P@P@R`ҏ^i˫\ (\`~g? Cx8wJ+ 2$μEƜ{# 8kl4 }nb.$Bu1WB.k;~?s;AG!?SHX Xێ/ˌ(bRXZn|!]w59niuoꄻP]Zn/7Qr_ߙҒqwr-ifH'aܸb.fҩp@294T1gkL>!_x?9zS 2u=imuQV@P@hum)P@&CJo~l8{o~}-:_p+mB|зw=x+J|O+2:J)g7Xrt#`wc&=l%hZRy'>?1=T%#YCWטmC_yP^t;y>a|nA(&b#qxڽ ތ&Vݳ c]S"~Ŋ  /N8ipDlrIS|ecllϾe/ Ye|B,HL+6|Ëv /uu#!SŒ8p,;#ǹ@#|Cf4?~[ ~ȏaٰ=s~a0W.}׏ou{xlCu#^t}É¿RǚV@P@hѺf [ (@ e 'f];ѠqR!Z#B soVYjvX( VU)AqW߄tL :6 Nkh9ryg;{}8˘.U MMN{,:A% Y+kGԻ upȋuY.vQ@bBB|.P(Y&Q8aי :aiw_SECu,1߬SQm!TGN!1tSo3G!v}ؘGz3" /8,!>4h3Ɯ}_4q~b,\=QcVEziP@P@60ZȞBP` L3Tt"4F􇦼}t]1a]TnX9e7c1&wv-:Gܛ'^1!Ķ 8;85?v:P{QBλgfIk,ʊ}ĒNksVݻ41g4֙+< Y4MϷ&P½#dq7LFw&44+L9]+vن2 ]rv;ؘFߏ%p kynT,\o8.@ ( (m ` = (@ ,v^c]f۲ vMY4+vt M$@äi}9+!1~;SKP;i,ޯU[~J9(0LIM;W_.!^KGILm^|:G_ӥbH,<,1RIDrԗߤ87=:-~t:33'&dYgRU{Q2w4{;wϞ3MW< _}]D=n {_]^C*qZU]遦P@P@@?[۠5BP@`^^ 18bBuWi$fm^.B-\JUkhgߜ0YzЇYz-(Z5 ҽ12s2sn Q@tڱ،OYcB9pK tlq?\?;b3cIw30Ꮗ/yzY1,vu4>8o㱌* e2N0p@jrW,{ױ35@HXmK,8fvzy>ƥ-4SP@P L6&) ( 4`4k< t(O㪲짫j]>J|Dozc-=}KĥNzxZ8uiZF͗D)c~Y{u ~nˌX.i'FN4ô!kMޢIФ#dXꖁ)ƿ3CrtcKsGuƤwybVX/|^Xu9<\tR E_}B]_։Z~gہ+1p?˃7<2<1h~NX^97[1&zM+ ( su<P | ~r! wk+~Տvw,l Z}-iN['| ?BiHb"sO"s[ƽSTJm1 ԕOOu;WcP~aԄ ciz]b]GIx^+fvPNǽkYKbKxbhèYvqOdq1P@P@hF= (+@ ʵM}82,Zd+v7GA]wΏ]c=q02@Ňʼ09jϷ^~yyɟn\Ԝwf`~.1'$pC͖$W( Nj1GjDQ( ( (@ L]nP@h7)K~9[)K2,3%X;W?`hI13ϫ0$G隳0C&.UJk+dW='eU0YZObFbY9ڲ we.cW,`}ψO* qD˭No7jmЊ][>'t[~qBk!۽gz3Kko9u0vK4(g*d\V+f2̈W*j^P@P@ `=۠ (0 q??LCF'>t]g'%'yV"'NP@P@ڭ@ _նv0P@O?B:񮕗s3?Dbە>Z<'"zAP@P@ F n* (x÷\g)X?')YX ( ( t@Gv%w89rdr/9lkLX9'k}x]ώ(\[3Wʖt(tS/p<[Ν;pݺMX6Ն) (<5֚h/91 7|FZq-:ǵG"0/fa1nVT! C{SNi I/ɫVP@ k)B}irXve'&hQmGgS:+|wxZ)0hРz0P@)Hht3 ;䓫q (%ƒ;GAx ( Z P@P@P@P@h'Fɍ ( ( ( ( > (M5jW_N;ntMɾ_|>ꫯƌz&êPn>[dE9Բ ( (+`nb=^P^3P`!aÆ]|ş~i;`25gqF.R+Pk+C9m+P5\3G}{=b7;묳7|-2,^8^{5IEJv҅ 7pC [ R˼uc9b03ln4xi-۷/Ѱ{_tm%)s=Tιz7ߜ8sW_}00alh\sZ[Y``7Ydy.:\Q<o͍6ڨ{!Mc E]tUW%]yKnGyYfp޽{ < z+ 1b2.U#G" ƢRǒǏk׮< v },#bni1ҵIybH;F|jsSP@PFim (cY':D1h/2,Ҏ!A^z?a $J1s^aab7~1!6lӧOI 0MeK.g{'5v[*Pׯ_\qI0Uzj+b[!Kc=FH+(NDўp,„!Fn Nl|QW]uUHzPsq⹨_8/wu|AD5<@+_=wަv!&33F븧1BVu饗~gTOwLEa) GdkSO=5^By3ZX.bQCM74fLiO0 qxu@.a0Cc.nVi6*CZr6X̓!|sw^(Cm[lE!0ՁJ/S1D sa-PmWxiL%ÈfB N|SvMK.$Gd<rѩ>& @88>3%0Gԏ΃!6G('iܙ d.Ht^gh & 'I8 ( (@5֚P@*@0Vds1'OOZeUb`JJŸNCqA$bՑIL aD!B(.LTG#v ' TKWD tqDžLF( = cj aAOF@*$vz5(C?)8?MBi܅+ s2aoE7Nmq=A&u2 LLJ[uik[Wy獇$>\ -NM@6#M7-HzÅuL4۟'A%G<*]~衇.X<*:6uӄ ( (<uͳfP 0-ZCGOgu &]{xbmy"SؕA i7@p)7 #{!œ}Wb|0/CNaII)b4J7IN@IDATI c.*sENT&5>i<.9j՟ZEsq;)/%Pyc."|J8F2 & 2>uu5IYZ$$ 5M#q P@P@fLƵ[ (@^ 1*!&!wXIKCuajC'ReoEl[nd)L:5a!'ΌFOYp+DZ}Zp֒CX-X>cN!Qy+pkDRn=g lri{90ۼ@.:y|62 ( ($b߁&jP@:C;!FPYUW$v#LC<-K]bK1F(1?UZW9W=z GC\\#̄qiPba2]Lq4t[ƶg)IO1|sI?o:+?P 鴆FR ͉Q?fO~B;V)_=BLӪGi21R'#mÁi(0E5 ( (pu 'BP@0:qw画0V\-Xl$3b3X"T25&jY=` >Ӆ@!Y1O>  PROڋ)^fW17R+Fi.pWB,̍ckd-s]'boX8dq#x7l3n=ϱl Qp1˱q1X"T5[φͨP@P@hѺfZ (TųbY!XEB,:Qb1Ɖ4fA` })֊&! <8T@,Fjb=4('bpH1+ 0z|Aދ_t+{ZۜTMe?ͥ&K%8%tl͐cX 2`Wx%4NHoSsq.lv K?-,A-v ^ ,xQa,w!FBxBX<T[{tF<*.P@P@F k( L`W^y%l3P4 L(Xq;$& ʅ6 1)+ŨE]z\A>P3Mflqx,{キjDBS|ͱI8ܲR=LSϕNF0V[mP՛)t;묳^V93 %d0^}DNKotUIxaO?΃1G#G:)L|$!җϬv ( (@c\e֦ (0V`e1lsJni:W : 4{)_)ۊAČBaBuLD /x 6Ɛ( K] Qp -2db;FgFjf̯=zE+:ʏ !<brxXcPq޵^Jmj4Kt y~BSz J-NjBҽ{N7 ( (<uͳfP 'UDZn2 <%IyNaVZiUWMh4č6(.boZ,%6#bgdM4"CNLz(±ħfvuWSIYbe]Y*~%H/!\9lF0YZ9PYzx*>&hY:03VK礓Noշo_fbXMLD[z6\Q Ix 7+UҺ|zު~,SzSXKƘuնX'kĕOJ`r( ( Bh]+=y8atҡ4[D N'x;P@P@$u P@L8aF6>]VČ:X*`:thcr=V׍L(j<;ܹ^ (UUq ( ( 4_`ȑdu96Ϡ (@0Z׮oS@P@P۷[nzRP@J֕ ( ( ( ($0Z7 = ( ( ( (FJYT@P@P@P@P`RP@P@P@P@Ru,f* ( ( ( (0 MtO ( ( ( (@ѺR3P@P@P@P@F&T@P@P@P@PTh]) ( ( ( ( LuS* ( ( ( (P*`LP@P@P@P@&ѺI)P@P@P@P@(0ZWb ( ( ( (@h$@ ( ( ( ( +e1SP@P@P@P@I `n{J$o~guUNy + ( ( (@5֚hkݻN(Iyjz>P@P@P@ ` LO?}=6pZv$9j2V ( ( ()`nʼ^U}v!TGIwX+/\P@P@P@v(`@VXa^zU؍9pe(I֟#P@P@P@PF-j} Lj=؃&T ѫn6b/eBI^ϯ ( ( (M0!L3 v{ՑCJ]&P@P@P@P=LaP_{It#=zثMʸl٭JP^z1c%s5ֈEoObvE]thT@P@)^h(!Z{H\uUÆ /)L( +{mvtA=\ft P@P` 8v{j%馛1KCuL,`BP@.vp/_P@ڏѺs/l ~wq*ձ2U KP@hFU}> _8igQ@P] kW(0]mX#HP@*L/8#. /irȯz;P@Mwk}ݻw-e ( (fGyW^ypFrgo!MP@:Ѻr̎(P]h5+ ($8sO8*'g_*ܥ (*0w}7^_=^/ߧOjFO~/Q]vVXaW&K&fK硇z'Xnh+M5TC yg={\{f;L7_ai&wlP@&f&?ϯ+|M-?¸6y-lr-_p<䓼}S34WN7ty K/t&Obc=搦a5\C;.]lq3$X/"c7ox%s> vZ{P@k+0쳧c9橧"ff_TXfX8M*j!JU6,䴟P@T`gL_1tqN3cz*]^;\syb+N^عs vi 5,R!D}ἴiKB_ 42=Z~W=_|4bĈ*{z9e虵;6r|oȡa{W*Ӱm&Pi i0 9{WrwxKXP@hF=uoVynN[}0f H!on馌$MN4 (@CXx%*д S/Is*y>\(@Mk+ݕg> S-g?#Wlfi`ڸÇxKXP@hF= `^.,뙛&͙bO?=!MN4 (@ U-}=zj:3c]!?ls=W_}uiLm^4㏿袋{lj'XlooC8,XU%kb ( uºC)wy. B>ٜr)2:˄Ls衇N=dJI% (D CaSO=uuk;cY)۽U٢.Md?K+atO7I'Lz_MϾA)l2A27|6[a 휼6{Uck/YcS@P`2hwV(Knfcش=?O"#\Bo>M74*,VWʶ4Ӌ2 (@ L.3s26hN6oPo {.t[dEbnp8o߷rK,@⨣*aX _e諭ZU@&~ Z0Z& w&%' ( Ldc;\ .s/xC_9=Xa\XJu 6e] &ХΎu- (d'(WfqBD/B(GoN~;3fLܤ]L[o4s=,g{ޚ1ӌ<4i`޿ax궯jWdMZ1s1]Hq 7tҥPII-ijMٿ7x{IJ{W@P` 0Z7܎xiy*8_JaWK,Q?: OsgdZ (0 8K.d!?|y+$.-RY2/|^43/V)V>lo0m@6\sw}l͐!CP1"e]( (0 8v^}ٓ?餓6{lw1A C0roS 2a˔)ӷo_tXn<ֿob5ܺywq/`=O{D.9~WDO馛ֱEѸ[qiIvV9LJEYa۶m_atgNYBjժ*.p5} ה)S mYo3䧈) C؂&7x'H& sp_sGD~p!`@(]4;:YA uur1cgCĬ.{YN]KW^yer_-[l\||}e3orIEA #i6ghu]np2cKxٲ282sGTP?1vjB4_fnA C03 ߎ-Ok1:vH*UPp4rN,!A xH"ЦGރ>HCݽ{x .6sC0 7 : M:YUG EwrjD3'Cʨ([G^wu"9t"!~AyQmS#As$#Cf8H6Վqd_$%o/^WcSabOd!Pp )a37Wc[> h7L=Y!CT\9ؗtjWV[p!K.!5?&'{T &+KTp!`d\ѷ`w-2Cc=6To~X%̒mJHұZŭ^qw Ok愔 ՗\Ezlsh6vI䥗^ yꩧ 3xRBw ' p')1C0 y[9ʕcsΉf4P>7o\Ʀ[|' ~O{2˂&@|ڧ~ԖCSpGT5§M1y[riq9!` j)R9#Y Ec܉oXaJESM1*AqTIeza6mݨC(ժUkH;k֬Sӗ90Ni:q GYӥOfN[@D I C0-V.߾XZx5_GO6K!pK,a4Dizĝk:FM.7xw-)m)RGX% ($DBЕW^ Eeøu1 C(ppBE ۭ[;tt2|9}UXM ҏ1f͚vi%SS$d4&@5vä+gȑIiH .c^ᜐ#o=!P0o]~}6n@9TZB}{]?믿SI:ի ˭ HaF9r Xc!PX9)Ȥ|.P'NHa{ek6etpAf8՗4 (t@3C0 ,; 6c}v[BZb47 C0 C Yv-UwnjCb@f\JrfzlԨQr]V^&l2K0"5yBfH_}TTxqݔλ2C0 AVѬ\pɵk׎aZⵯhUlO_Bm0Za7j(rSJ-~֏ӹsgs̡Ո Pd|#È vpA)eM0 C(X8ٺu 33BB=dcݺuia;v*hSqax2-dTȌچ!`yխ;l@ Wj"ڷo_PiSN2d@fFm9hAhenA!c8p o(jцsO)X&8fC0 Ch"+CG+Cf$3p3#?jp4L>}ԩKNElr fAU3!`!.w4kb${#kN%k»Թu3g$cӦM "̓]bREq .s$bvڴi$PD&aaEcq$圾Dv0 Crd9J'!-o0*xs1-Z i~@8۲e >gCcûX C0r˄uHAEI&n CvAYjPFQG}tܸqߒ4*|T-! .@SH*!u<oBꈧzsD.ݺdzn#5ܗi^{A4a8(C1;Sώ1A}!`E0 .8#_q}'P2 tlB-HV%5yano۶mj*Xر^~e gϞeqvALh!s,.‰(#偿!%fZFy2; |fIѨr'R8I JGnteMoChn 2!`!PK`zwuqp 35pg6l0!-h 9XΙDlP9BA^: LQ!pp0oF16}뭷z˗AKp1h%ԖsoIl)#Q ]B"޺Crh,Ξ)} &4j]zʕ+kk!`@D5?x֯_r3 3i`8!5C\dƥ&$2 sꩧ^~d'{o C0 l `޺lf] |n28u+V .J<ԩфnw=ww+¦4$pQD.)oz A0 C0 %KK%@*\[LW~}̸ZES(4D Ȅ  ᤣH͚5>?ƲKC0 C Xݺ"f@@$k׮UB:*^3z\y AF+ C( ^#z+2]#%uKp6gK0ỷD^5SR)EM0 C0 ./[r%df…Li6pqz!u0ݾ(q@0|˶"Zt%0 lbŊL 2C]6D cBC0 C a1!`9o޼{ c< l' S_A!=K 'gbHdW_ݩS' !`!Php$+|2qH~X ^z'tq ]Îȑ#[d 0d2CduSfg_ C0y벊 {.tU]u40c:U?yT_KvL5=|vp\]4"X6 C0 B5N:Jypkr)NdW|a 3A>[Z{abK5= 9 b3ڀҥ d#$ DLb!`y C\ϟ /L2*)uQ֫W/`yϠ)i[wch8.8;|%=srIZhѯ_VZ 0rIk!`2L SG,ʖ-Km;VZlAq$*$ޥg| 3"cOM.!3^ 3'r+a!`D(OTjC?8 'GIcf7!).<&H ۾}{t}rѾ7Yn8[< .Qs 9s/S.&4 C0 1{3g>7|2Cۄ 3իW'e˖#_hG;LÎ)1st߹t7[4 C0.I C, )GBpI.t´ȰH Ԗ46odл􆈻knk.˕v AZkqaG}t޽=f!`!̐ `gy`˗\2«) S{9BKt;@B趶 gGd; ^ T{p4M6?#>J{3feh d$F蘥)!`@Abλ'sZu\&pMH$MXA(yXeA FoyRlJ#%(wBOGb9PnhGfpQ\e0 C(4hPQ~~{vC"uOxaÆQD _BD6##^ۺuk2)2s%yHEe(DoE% ܂BR6>I0sȯ!Coqإ/I C0 C pCf>}˹R fc92\u A#){$(sˑI`&8`,lF q 2CI Y7!`@DnO/6m4iҤPx[IH /L\y`.Bc,{J ]0qM:sjGEjJЀ+cZ0 C0˄Qǃ#Lx]ajժ~:r`y OGD: nG0 хqd,C'\aG. C0_E {XCP!믿/9ґd٠AN>dS;vWPH<( *GpF8`1b2J՚ :t֭ 7܀r!`!r5k֟O>1̈́>ùov̡AY\fͻ :06_A㬳κk9}"ek!`[Wt޵=i>B ^{G4G09sQ{p8mzb!(t8Mm^quOLjƝ yx-Ar7n 9\:[P;qԬ(X0 C0V~x rqd ^C` 3 p!3`,/Q/!,ZR 1?yv0 Cp#`u _@pq+.ӦMX # [nFW[C:#N>ޭh҈$D'{v2g7G~+O ^†]1;)_TV k$AMh!`B92GM2W]\67)$P2W2#&O Z)s $ @]]T%ɀJ6hqhҝQ}!`z,kпb{|bZv%>CU ]sӅBѨ:ܬY3 tD[Ё5][LUMl\k5@^yv\r7og{~N1VA">J*%dC0 CC-x蜫?]B:rNb`:b d>$KθO65q2 g&V2Æ%uHgdƽ&k@B2a붇=d৛>}:U]&N?]2ã Qu?Oxy͒<[AaT$_=Hjb)ۥ!`!`c a 30i0 C0!ggg1 -r]9>M|IEdn{jҤ ՗A"mёF܃뎹̈bd&1.aK X ݺ!` |FdRO>HwɁN|ZjE:\u^R}r[m?Ƶ)ΆY:$L&.9B#2SJ41b!غ|RlJ \vڴiP|JP:I|:餓;8l8 G%b?VPG,GO?]h+Tc1Ջ@b,9;0`,L!`@aE2úo&d4L)QdjP)ɳd$Wݗ3]:a ;!y5q}!33{饗Bfx PJHF!@MߤtĽ:2 ~e\}|5!`[ߑ0"e;$9l(y"ڵ#.< r |2a]y啐:|;ӟ4hРdY"S-ZGZYA7b,k#2:3nh2åG-[t~MbWlK,;Ǐ'2ӬYkd|vlyis̶ټ&e`Qfغ~۞hŎhPlݪeދ-:xӲ ;oYlF5ʷnP'=:2|]_;y_[冭{,׼n+ؾVϭ;dFRW*:ړyk`)C y뭷؈f3:M0^4| Y2e/3 B7C}+}e=g8#AEߊ*95QNIK,HUZ3iّ0Jk}wP{_jԨk!` Wwds\N>o6y_~9jYҀ:mޗC:&wOkg;X愕zbe:%|3Yu K+FI ݲ'?coνg̰{v}o~Em:蠂 t{t`HR݌5."q‡Pz̬M))51Hͫ)W4ewHu%:/Q!9 .D4}G] Lb!`C/Wf|Ƒ־_#3S]rGYln;i|AsfJ+ X(;n͐ca5PXF)6/y J&,3\&u׿|lN)d~lvZئa%w,Yr3S;4ҶafFUGl*eڶAm/Lc,㵐}i8.r֯P#b%Ƨ /MHd/Ol^֭|riz US(;A'!<<(t;N9YD[ΈVN6Kw#& d @. 8a.P!Z~L81կE|X^7]!` Y EW^6l _r: (=,EeQW ikg]O9( f)ךqfޜ/ mӵپ}{(QG,5suAfN?SN9dvț]rh{L2u:͏:qf߸y 3g{Of_uwJ n零yމ 6'^7׾Qwxs6|?ډE|uӡQg~݃N2vΆtt#L!zQmgJcשв~ų;m-o2,Q6`jV.pVGVì[O_7yG-9k=!?^{5Q1gˎЯ_oNkN㭉F^O?ޟp_7q<˶^\WШT,s5&Q_\u\_~nc¤_&6wN[7Ѡ~tԿ ~АY+W h*ݭQd0o]}H x:REV\ U^[jqkFFFڵ)̦RhR r_GY"DkbD+0β'l˨-i5g'A@Z-:aa+TsYhAv!D 8.5b2l2k,~ɐL􈺋 C0 Ca5,g?<:`U]en,d%^uZ]:5 ޥX(DZPbk,8˞ܳ.Fijo除iY{fKϸ 턋|g[z5ͼ}m\Cf'i8;^A!<;4VPJn,|aM6t*v޺I 8܆Uv.nm ds޺ xjbgE݊$+qT+%\[[c2/Y~K/FDVT_uRnGvv_N9)stDչ.eI =ItUE,M=K/3[fݔre8f8=m^":8.r>\Vᴍn"KJ1 C_i!`yE g  U#YJ!3HT q/릾.nm,әLJQKT~Y0ah^"{x|h$3 >+%EY!^!G9ɮx7G/gjw:ͷ~zxӽ/zSSFۜ{_F*3t opՉ #V$ ;FWj +N[#{jUʕ,}d zeHGXNy̪KqBq⭛˨I E# w$ &P/Lӹxy8`k׮F[ Z VHP40N9\O#ٸlSZ ZYtIGlp\>>lĈxp{8c;$8&4 C0&Ā >'pdFљ{NhgQH┃0h6~9)M1+rYNFJɓK0:O]BfpQYbn6|_*Cx]eΓm]cO桨?N?7:[&U.?I^t;W;6UE>:e/jOW<2q޷#.X3*6c7zwJ;b@jW ?S+N9bCx@qՑ{~;ve]BD=C8=1llQ±-hWMlP=W9&NjKEnO}rl;s;w\vؑ쪃vNcYB, /#KFQwKzz"5i$h- 譨 R.$ g7K>HӦM ${e>SU3!`!pp@`p< ,`)#<;#W]p= y(:(~ЈwZ"BgMʆeg$7eItIGY,k}tȃgW 4id&A '85 fMh82W_uaV$*$/^v?K/PQeŋկ^BuSsXjjeGxz4qgAs%/;1ߟZlc.A<X'x1.]z:ue;C{W[Nu sW}ɿ'->vlq^φNuGfu:UcuT,^pN.;V`eEy/OM˲LԖ8M6%dڌ h](MW& QF~ʞ2S)-Y2wn !\JqE|pۑ0޲) P|pؑ=ԱcG 5 C0 ~:N8K[M*u>-Z%QAGչiZUV:ҋFoIZKSZ֚YRZ-]k82Î2NBfJD7 !3 -kvq?-=vpə˷W6~{F C2pNu|s*xw-6p{`g4C{LgxVKx\݇#]B_nc8qyc-zm tS`+D}!R gJf69b¹q`3{*"ܰ{ȩ`9s&u8e[P Bmݎ/$*Q51@Z?zW[H_dvo,cMyeϲas0W!p*U=qat? 4 K`& 6 C0j{W?s2dbaq>zg %YYtl֌}ie4a>n& b$KsgLDz(Xr ( I܅xd!ٴFYLtZ,["Шf9L tZ&uuLV[Zݹ݀~ϖ ʓ!UqH+;lnrG^ֿg$6:Hnt5.On1:/l4SS4K|q[؛jɿw~˪SC 6zRw}DqK:q&=-;m|h]%k*Ό_C^%؟x: s`1~xKW/{iC `u3oݪ>MTr=hp>-v%Kd[׸f9o ,hY( %uuUDADsO~gPAIkP,gzG>X8b:@Tg]a;qרnyZ:Uztؤ~ʭ QvY0o]!xcקz8(VL?َ451*BNzf]79h;ikake7zoPYk#" =9z.qϟZjӧe˖z颓J,UI C0 \A ns=G08k^CJHi۶-~:OAz(AyP){3t27O9(O_!#A'tvA "DYyP2CNgso߾mڴgLUADZKC ˨|;n_Db:v+qx}!Wq+箎A}*S2kKn66' ۺw*}ߞCҋtg=^2x WFWt wRIG/ߑŏ[UCS#O%tK{-Ǚe65[D [Wߠ?~:>|8!uuUR裏ի&JYj;r`+s7O|vw <OG|H[GBt  > C0 ! F=2 &yYV蠲tj;"D?<u*/堅MqZ:2!/a2ڭ[7 {<\棅6N˶[} p0mj]:)ٲSlq DiM'wݻQNؿMͿ6۵qZox]Z38cb$3wy٭ô=tظ_/n>rUgZu= yf;5r;j!ɿ]/ -_t%Χ-HhG>[WiQ"ꂟxd͛I ?Cَ-2ʚ%k溲Zws`dz> ZM;AYKz&p&MB(e5H 3-Y0 C8͖~:uh]BY1 kܸ1Ze&z^ɚ]egMdܝCg0 &P';RSϙgpQ3F`5HBfr4!:1.! <4uy<3/z{욮*vŏx^{s@6K#SN0hĖG?xygg^t}Yt{Qc.͙=״v)Xrx)|>w ϻeE^؞>c=;F5 >r']5}EQN"\~PBT@qDNOCLtj[b\Q3"r=EӛsTNX%Ԗg>1j3Y](%H9v0 CHfߑ)STrBfȁ-ddzqN8M18#"FMj娜+B+؍fMG+ٞ={"~H n 6 Q=UoXśQ`=U&lSS30@pEv!xꂫ.p rj֬IPU]SF&F$N?wُ%W,k1sŸ3ڠ"M\ %^^4ylA )S_456 C0G%Ν# Gf 0 b-͛Rtu zR^zCܲg0'A=D'fm?gF'#3l:+!wq9 sA[!A3[ѕ۵3*(9סQC2yHyRBd  e֬YTuDmB O:wyE)y&U" -bʵ3(ΈlSS4# QES'2Ii$w-Uft9:&@HFFFaIY%&1 C0r t PS,̰DdOgEcRO)z"k#-)-+vPYhFGԒ)+j$*qڸE'B7%1lì[=iv8J6I02ӄE M}[?,R׾}{ o2N?ط!`!Io;2Е+Wf9ONJg k^VI5Z,ַD)kXBQyT=_E+h#sE9Έe8=N׀F(\H+~: JuA21 %T}>R k6.]Xe]Zxy+R0<,UQ瞣 [|:ۻwoJ{8@.jZ(J[!ȵAiPȽŚ44E-Й.Ŧk ʃBϠ\)gk4ಮٳ7n%Ν;ۆ#KJ6nи&1 C09矟3g~ %8 2CtT[!sKG|q- 3ܒ0譨Dq)yb2×K6l@%͛7p/<;zH 7tFLȟp?_*'.'Y߃܂P)JM6m„ cǎKipYI{l۶m-Fsͥh-OGfx`9ٯn)L9 \x($!K'w C08ӭ\rFP.P 2C+q$RlY@:%jYb!`@wIԿ/bMTS$s͚5իd?,@9RR2,BozN5a-ɪeKC& 7B -8eY{a+QKlL>i iv15C0 y[9$!6ay睡CY~sgPJ̮ )'FZBΕPOCz"ԚrFcYn ybA P@ N}%:J+D$Br<8k(AUJ9eMVT 5k帱֔48Q'OӬg$Y'w7( N9=* U_&<nۦ^zTu [<ִ!`!سgdfȐ!jl%KVZQGȌ[=\+vPYhj#qrmPA堐.qr&8M'LXt՜AyP(6D_n !W2K. L聴՚6 txioOĩP]t ,!`޺,e b .˖-ӑ#ણ;u֥D1Cv&s8WaWQ;9':A97릚w=LIΧ)l"wdP}I_D8+gꢯLKkk!`9D2C 8NѣGCfd"1@] 38zkҌJgtu9\ Ȭ7C4vpqf ʒ#I[eg\uIxqf-C6w՗oRW/+v~g_?IX.4S9E7l &y nD+F0ۄĦOAa& )-h͠e'*8yxP9( ;;zD=Cח /:FBY2:=RՅ\Wӵk׎.WIyFT҈)!`,X-Afp|!3&2cpֲ_xjG)Yeي[ֵMi)pb*pF[AdMC0 C ^2b]2a^e]F\$[^%FZBJHP9}7,4=Ɗ^"#Fd/"3f̠!!Y X UɌL[dĔB`mC ŋuYA9m} ξv{}饗`0&MdLC<UaG1f X4)J\KElD[֚Z.tC+{3q0AYt8e-sq et/oDOw.?OiӦl>7jԈP[)mr7(ݕ0 C(`3fĈp= Y&$?BfH}=˥YחH_j޺nKx\ 5B6Z3e썘lY+At)Qaq.t^9K3|ƕp12}&)IN󝻮DEVN0o]NгF%3gNY~ϐ ڔ C0 ^̙C,d2rFUʓAf8׬"|ŭe4xN&,5+fY!DRq%w#W$ *[%ad+eJ9+==|[i@ڼN+խ mkAO_7|Z$Gq?n5qLJ.\f֨fس!\,)(oOZ5y͖ݭUи s8EuQpo7%nb۶_խVi ߸]FeE[vd,]n̫Uy=7|)GU,uˏ[iiBq˳\ͫlQ揜~=nӠoNo@s- x7,SXe47]AOڅT 0k׮}GLBHQE.rMhDH-"G%AZxsT֤-e\FHPH86(8e9fyܸq֭~q$Êz+( u/k!`]ꩧONUd2S\:@f8d K;NEY$E.Ƶ:;ZY[NG`#ygĸ |+UG 0&ѷo_9UGS$NG:QyTm$ ҌɌrS޷e̳\9m0jٵ=h`M^.t:ŏ8w1u_ǹk?5͉+ꤣElÎ?4EE]}yG5wݢ/hT==b2uC!8,Z/͒,VoĞN{q\_z=Sot9gC5 [8ܞ" }وNBvΚ/.DWAsٙt ,DxrҎSXI:bu[" " arJw)t27dFDsΝ = AB꫶ w+( =vi!`!{&L6uՖUφZ'YwRem\SrWYgVSq)q"Ӡ!F┃r-yzFGyB)322:u?]BkY){Oa@8n@IDATyۿ:ޱrB2G=YPC/ u*hW>c^ԧH\#_?[VyϷڹվfYw^~GT#7[& >ʨY\W{Ou_]uՏNn۰rSO. "+oPyl9?N_&J,`3T崬.]PޅGح Hn#-WN_˞w)S=Seqܤ?o8앯K.YB#/c˸q`BC0 " r"U>2]vm߾=ɏ]@:J$΂^mM+”Ƶ\Tep˴ 842C0 7kDg!nƍPNbE Be,, 船`nqv͝:4|fz[w~ͅ-۰9=fjvjR,QOW3y몓6U^ J[jtg!s7mclz븕\K3"-G0rꩧsF5khڔ5^Z?NGdO93ʈ2]͒2}ez8B9 .WB c%4 )m?~j -C0 +LiӦőwԧOSN9ES.1zKV֚^Z?NGrJM駣,e,]ٛFqgSf/-5.Zz ]2C ⨅p]p hK"+:uJCnUv?ǹ[niT~ tm.87clKD!W^ܷ/%]گQkuC&SoWǾUkKh#'P?u`],GQKU-h='G'V\!`!!СCz p{.>;ȠE҂7=}%#N9ᒕ AORE9hA i\Y k7"2C] n;Ҝ9HgϞl=Be Fpb3(twnŦ5 4Fh^1s!WýߗkxTY$3+թ͜-Ssw{f׺UGc*"ߵiK#usY'3׻ L"cnU*[¹$_4͗V6k6%Knݸ_YFU7kw![ȑgut*mqT G!`޺BB oRvO$ܣG6$Y檺I䞅QB)<(enE,xAyP~:˞XB"iRGԑFġi8Dv+hbG um!`!~Ry  n;-4 Ch#zyPH8ypqN.cyfB7譨$8U~P?mAV4(~: dJ.W)2ʵMkYB`֨Ffk%wF mj-A4-v.S_u/n^'SJ%>!ɭ髞[ws92B4~ԹΏ%Yjg1X\ NY'Qwr!!A vjRUZp `޺ S}Ǹ~H u$qʳ64djM ޴4CItE9y&ޜ28?֭[ᵤr+_-B+ѓv:̞%25 C0 q\EhxO!3e}I|D9iK`C8~6FLP)I,)=X"&C+~:;ڵu4 ~5J*l)- S0+]FLRXu<~ %mH&&$ϩၢ*܎=둅}Ŏ8p2S(_& d{, g6ot-NV(_DР^z}I_{cneF y7po؋oOP?%P|j{0r:X˴]DVLWY%)=DO2V2R?YiOV\iӦd⨍y]&Mۥ!`@GI&E ˙; ԅVPj4McGLb9xJe@[J2C;T3IV.TW^=N:"XDq/!xiD]&ˠr.2]bqG^pQN2@Wo]ZfV]_9W\ub?WJ:Wdǯ4[Re2]c+7HuʤrRkWPRgyϏo՜1nFđw[网~i޺ IXFE^fΜ ŕӻwoxgXFi!N9(n\nXn踿ΎV֖AyPɃsN/({\-YMyD"PJ^.՗;wL+Qu~:o y<*qqr1YRm!PÌ; ]tɀPŌzbG$}2rp 㔵<:CzgӔЙ[A "DY4iɽˠrP`YәʨZ!T^^ld!d?<ڵkx:=w:_w'_gpqDZIdŖABH !1 y,*j;wsuUau^{)YUknx`tlw_;ZO١կ(Үʏ4wwԶ}v6}'nGC#0uk/ZF-Menw}{q`$ cMɓ'dKZG锶-J\%I5W6ϨsW^cUb:Q*UQ_a }˶)i+VXnj乱%}[ @5i݂l!`0o~H&ٹhXXYW?яA J-Ydd&=i'K]+{1c7rxҫ*C~B݆$U\1ɰS8*x@feBfģ 2hݨT{!cכ+6&!^|̇?swbzɋ]朻>>M/iIB,)RIR Dp /ZRc2;Á 9YY'Wd&ViFb,e /bGC0 w%newaݱcǘ|G4t}$tdJYܮ3_S!j@!{5ӂZb\g煸-5l1d 0Wt|MtS( Sd)S]_Cn&:+獗%fO;uC_oXÚ>ܯt7+KO\9w3^χǭg_;3M{_ p̒^`8靧8VVъ߿D!e->C`'6dX6k[; RXdIJ&)oR,[w~>lӧ:qׯg^V@B{ENf`@pxi}.FȒ)Rm4ULR诧qc,c! Jl:2dv{}da4NjۻoM6 C0ṯJf8\ ;tDpD,62Nz%oߤ!8jzsX$MZ|9{>slv0:3 d!X$ɷ+OT䞬N5jFX[dՏqdn1+h_z%&ῌ?XJRȺzv^e,a )$'5R1 Ce`БòC2CNgʕ;w2 Lɐt^UAU•-^t=k,N]'ZW!,P'`yīyü̑dvUvBfȺzɌ6XUR\%EdY1}rOĀ}~wOY'Wvyᘓ ˧~WuϬ kGq2CmDʘ"쯷5_ۚ[-=/?yߊU|[F3YuGW`㇖?v-+Ss)>7TP謽grsZJo?󷻏;Ō;jqnj~lns!@[Fz.\@ΘjGJ[ay$}XKYJATp*1P n)Wx[2[U!^*qŒH dlV>\&Q [ }Q-zR$]v}t]!s\p!&bz{4 Cx#@b 3f>x*-Z^٣ $j`9׾6g/,~?Do?'~cg>~ib!_.$x~|灓mưR 3'օ v 0n߉֞, ;6Ml?9a7] 29[ 4TކZ1FPpġ3\W[Ud>-BQB\-Ǩ-`. B& ;IY?n%PrOT,ä2&s+oG𨸕"pA~!J?>%feqԹ[Ɇ!`t1oG]*a*,{]$dƒ+ zV]G:2leYŌFrBfwj䀸ޒXOhC@?SS )xLmNϰU݋'gXKΝϰfj0FP_$OzMx7!`ٺw׼2,},e0_56mb }7lJ?:rA]M>!8j:*1p9v*b^Ws!c/!tHNEKo~ B?qz6a\1$^zՠWe}*]Kdի1J}ƮU ѻ$kLj) j{;v0>8gΜG}+^JyOg4N C0  ǹd,X@ / .fۭZa-Ne>Gr_>ѨAJ Ulչ<kzFV{79w+zWʱ~l*ٌGn,ǿ'tbL6 bFuu,6?Fl’XXV.W#yǸ4i#,RMUIMyxs-kT*NJ \xc[>?:ˆ;~Qh ,`y_U'V2N:7!`!pmзo0, aeVIJ+{}s uIbn7 ^ď;os/@cX:|rd@%I(*nso"O5?s?s$y.fBEw2h)(^C ,[71GD>/p)`2ʒs=tT}$(zq- n>z*)'5BDƠ9" ӱf "˔Ƈz?(!G&IMZ؇y-Uj&!`׌,ds۵m6zXՂl߾}/3YL%wcz[ 5F(CN:ɨN4U[eOvl/^12z{%_%5j}URxy{ "*?^~x8@Q8K`D02!Y®/kf@"`ٺ4XA|όK3nΝP.dl/s7sҽŪޫWb^*3uƼU']cכ+u!. ,e'AvuA O?[׿nk2NGC0 C`jrC$@fX4X( b,ܞ:cƅ{֨JxqHH$dok@!7UD61~7ac/ CpvX+bŀe+X ׈/vp\霠_,da&Ų d"i䝫?6|n 2泐<]VՅQh0 d%[*gQ ,&1uk=!`.ֽ'b̛d6ҝyymTz$=[J#dDzS ^Wyu~o0,I1n\$: PѸo5 bCuE,"٢⬃;3a(޽{Ϟ=BN Mtaa+s{8y^eq~=o5 UF'F Oe}Reؙ,E~O$kw51ʫ*HR{^eҧjkA C0 "@M+;2y78Ss%͚5;6n5p{7ޫE2ί筆B uڨ{Ub@#$= .r/+,d}@̧{Z$)z2S5i IٓuBn,Ex#6РO4!pS 0o7E,HC }DY"CǒXḬ%2k]2Mud6*]vTJ{].#Brs2 o# e"9PRZkTW0Fn*O-J0 Cȃt%DKNYُzԩ,̸3h`䝝v޴[U0  Eb:pcҢ"JCȭ[u­q$m\_ Ehnُe 7X] qgNJ` J7x56+aE ܭ[HAs.|nʪ*{#1b[[!EO+0 #=KEy ]G `OS9OeL^{Qz_iT&!`snZ gܓrz!3̰̰t9b,& )v1v5{-ct<Ɩ۷ogn3гMYXtխd7һ>U9wrm^SVgB~f"&y:6TzꉍKNgV!`.]<[1"@_bRWZu`AO?4|o.WSKz2d#{N ž֬~"6va>|֮]|:-;qE**Ou{  JꓚXHh!`\ęIF,0HOͺGr8; ɢCRbwĉWU]]cd Afdǎ, OLcaOް:ninY wRe~!?ZʄTCEFRņ-j@1#`ٺb:ۨ!@1gҥK;FB &dX-fv̳y.5H5 1e~Xje!F2sg pN]3[k<7ۘ2qm1*>q.!` 0l63Ou'_%5I&}*cGxЏ7 C e뒘]]) %e%,$yGL Op#2T1FƮ[-n<Ȱ[sP[.cpYa0MsB]Wvpk(25 C0 1E<b!3d؇ 3fc v $^ҵ h{uu!2C/8d0q0i4Qhaj Iƭ([: %mqj C0 1EHTqb,gC1׌1$Hc2'3>PH]Z$iԨ1B' ,=!`\g,[ws$V^MҊl6vvB4̰ىp]:8, n qJƝvc>.2w0=a@^ΡT!cm]L{y"J$d  C0 A@z|qY a}#Gd rlJ裏 M UzP%·7d!١X#:77!K[LIqʫ ǢGC0 C`m,{16lܸt[rwEJ*T@15v-U_zpQ''ODŽAY ܅ 20טmWUj WJEɀMc!PXѭ!$#3\n7XPy+9dvy3գ]t @.lX磅 kcx`*{5F4a͜Xtf.!eسo춨H iRSc>W! C0 d]ѕ2CEVc;N ;]n c(#dYʢ'l^H2*6?XTƔMe?vҊTc GC0 en Vu!Lr-C0]8D63ttҚҲ*}$}r);%Զ-U(tww7aU/I:Ne,jA?ln٤qR}>i,>Yi C0 E!EzUd0Iu1RX˸JS~"GoU^Jj2Cꫯczd3on:Ȍ \Yv 5D J^2p͐e$` )u RZPcHjUZ:tlu}U[:W ccjo!`@!`ٺb"Oq!/dDvlAd1%Gǒ»{1qiBg; KTͰl)iX^ӽ曤 ,} 3]$T8 2?W2N׏b*$-Mc!`?zGABc̎ klgǴ;3dJBb=e>d!RN:uY9Nb]6WZ_'^ BZ'!{ק!cޫTW1أki!`@q"`ٺ.UAe 3..!2Uc84E  Wڧm~,aVڅ{֬Y!ȸ: خ cX{.30 C0@ U H)vp 2kca2-H10o%Fj,UE!TMV@f vI&rE 1HX~BE+/fi!0Xn4W@mX7a  ;ƲtӦME`9 #dX+;aL+$l7xiw5yI2 }ArӱH ?,E(Ϋ*iyBJ$ds+!c>LjZu-M6 C0bx;_G I 9/x/| 2bjY HE$f7riwkť.v0ò566 weWog5*:VH$d(d'Iꥮ) C0 `.v##!d՘hhرyvּ{naT$R;i$0J 1 B3l64pqƢRQH765kz2NOe| KFz%v7 C0#Gbd )R@2 dX1@v? 3HVR YDVjސr(Gr'[YRWFU[Մ, Y$kU'[=O[4r-h!`@ `ٺ9 #p9b 1-|`;wd e^Z +SԐTp"dgv#uH a?ӑ4ݲ2WwuU1Fq@X^SeHO%Mf<EآNvu^ @j-Zxb=T51rifl!p+# {V"d>*Ȥ'0Fõ 6)dvp [(xE# ;Yh>#[1ɓ0 X aS`oUxi?Dm!`uFup bt^Vz酴% ỉX\\,í\qF s{0+aƠr,`ǔ(S)VJJ2Vo1kUR0ǐqZ8:1!`!C I1Q2PV+p$/DPkp\www 2FD+4XI4788[,zWC)*̠a82$ SC-&xJ 12.P26Mi!PTX>Gd 2kWy暺{ݹ6',Gd0]浱&AΎ3#X VF@RG2Uʐxk\pi`*50 C"<2ٰ] $bbcK*܃?Bf\$Ȗ&̰#FAyd <1J2+yCR#o!3G|:9p oW^ w*^CU"x^*\1soo^K "IM0v7 C(,[W<"BeJ$QE JGkqiH GSzCN ˗m_02ng; 5mY. Ib@;KE ߷owl׮]Kg$V\UH66Q*'5!*)ҫ7B!}*Z !`8n":}rv]|^Iv$ܾ-@!ACYlȮpY+;ărHO+ԿĠR(!38$kI28̀#x|nq}!㘙>zR]%WIِ^z-h!`@!`ٺb";1=Bג`$!0b %[9gm,\1 ϐ{ޠ#I~e0CM-Ö;x۱?l0 C!#+Xd)V pJ]7f Vvȅ0h+ 6$ Ru$eIOǁ]BfF`kLo_')R6+,(*)dmsO)~B , \n4VŲdFe#<v4Jƒ+r|" ,%dYuǙ&[ޫtC^*vuvdK35Uu5*+*KH֕eLf|`YeEM]Uچ򫿂s /W޷^ef!`@"p(-0CERHZ.|)Cp$$mMz@ #ƪX֮9 jHJظoT-2m6tE"̤ سn&N\ȣO$* @IDAT1Azʐ)%wф:40 C SBcdFfNf ԗ* 0Dpp.sÎhy;v݃>x {t'Gc10 3OHOq•@^5it]]-/Wǜ9^}꺆(OWQQ^Z!^ )ɒf2 zB[ٮ澲꺪 dܴN6B-T}g=!`+oa R5<9l5ytU&FK#L( [ekI^4.\C3tiܝ}YE+['Lc+?06;c#煸T!1׃) C0 B r\&"3z]Ȍ+J4c(ݢʘ(9d֮w̰3/d6alBfXX%,}Řyl y5*U_qC~FysHє!`@@?+!Ŵ52bL=c$꘮!{A@c8d$~y"{ 2CcFmW}Ht8=u]xzՓgoTY]FǰkggWggGwWtZRf#淾^jl|,&qӧ]r̩.;RVT;a Rq՛ !`Ō,p-6C@ o5! %g4^<ͨ_;,+W /p >쳇2d͚ryB{'0cՉμӶx#*)ҫ7Wҧ2aȏ!cޫWW(zC0 C 9.2#4; ;` 8.,xuOBf7[X r`s=Dz6 3LcdfÆ ˗/-ykH2F1CND}Dyђ;_\ ?L G>tQN=sΎ^{d#_չM6k,}Ʉʪ4}Vc'9z)5M݅yZ$aAzNJۣ!` Gu7Xq G&aGN!8}b)!{)톫#Th.y:ֽ:uGAm^tUV!8M!j*a‹n/{{T'IpK-{ E !`&ɾ#Qd1P810Q 2C2#vxHS8EDxWgX cc$22z1'6LƱǤFݗ6O5{OQ][GRslؿ'Z/uwu~3 fɳ W3wb Cv0[Z^QUP_?q9s._~ܳjJaƦ Sg9rw:/6M) hخ2>+{e!p}lj+`9[(p\"e* SM 1KB yѲ0E2fv0]2w 5ђ4|!Q-^HbXa hcre Ua!`22ttIhyqGXZj;2CE.R"̰d Y!K :ٯ t, k )d3}=mWf{yKj Y*=G_/]je?%I E\;uƾlW_=G.;q䉊޶uwoxt'|>0eFMصܱ)#/wؽF_e!`@!`ٺb"Oj \UPF wvD]lZ|uR8W&ZFbgO:knܸƎAiɲ 3n.z 5&{J ҧ2<:__SqǜqkLb2oJz4ԖwvUUYa֖Cܴq_i?>Ӭ1\][?sƦIwo;sPYUMvih!PX?ʭ@"EYp/"@aZlfG(gP\JgVoK.eǙX&ͬv{hۤFz_֢s'^{2P C0 C@!ҁ:s1orvgǎiBf`8]]PgG^GlC}DfV:{ުU*4~KVv}յ L,ٜo8q??ۗ(7oٌy+sk^T[Yx{W̨(ΖN]ΖUdɓ/lz_?͝;7eoxs;r4m͸*˫XRyJWv7 C(*,[WTÂB $ Ec~.Vzb e䭬%qi\ˀr *y$Z"as?S̙î!\F[x-y*3`I C0nq}dq, e2ã(1ee a;Lo@ft| W.6$8-CE0|ܑ;Z7+d;wt6c2]9I:E5 UWQVhfӣkgWWW]RV4g.w^h9{7%%فl{'>ǯl> !XSx۝VUxޒlո\iiͼc)bwC0 en,V{AkY! @ 0T0$.ׅ C}֒:lƦu =JULa !fc_{kOdƩ1/_◛7G9lt D3JJ dJ.vk+Puҁ'/vo! 瑣زANwe3}͛9r_}c>c:TVU_~WYE׶ Ԍ$34NkƔ!`7)>ӭ$+)؎bM3H1.9.M' rʠ4x`f:2gBᵬ'q8CzF吱WU*>ƙ~a2f!`2O$=ݘX*qS3`@`pGw80 $Pc$}1KG9eu]P ɭ5.DڨʉטŻ'wü+䉓?~{;7.ppE"#ْcthJc;g.w<2m|}wc/+**ۇn4be2}|̱#O3O ;oj,_^QQє=B!40 Chl]~[70X1d(ܔ0]w̒clY-88"J^hʫXaC+Z0&$= F둊4e.nE!krث*qV-rWN%$5bGz`!`nDLd dFwBftP(wwn w Qdb#Eb1zzswwmDN!d+@^:sXl&/y@`_W)[UD^A;ZqeV$rrO_fKX\۝\,/DR(+ʩw0p4H@kpp%o~~W3g/[3wp.fUԎzB )!`@Q `ٺ D!DT RQ;I)MWb)E[㑔w цsn!IKu@8OdzOq@뤁.t|>I9x" &_:/2I&=bSS0 Cȏtd2^ 9;>Mz=@Nd]M4[}7۟W?9sfS݂k{:9R .S!`XNlI[Մ]]]'M ĆhU zDq%3F6Yz.\CxkOkLe,'N]dRix)#_6iҤ3gQtdRp[8z(fx1s1Çc1``CAIx+ߔZ”Yf-_5l\ޭfn50 CH#7dGM< 硔? 3L%)|2 1p+P0S!`6~eսWovdH0"Ŗ'yvPn诜.gEb.a=6ּ\Uu2wyE.١)dm)szJo@xf⧪vZ.](-(F,vOk+n!`]̭Ƒ"?]X&D>H6]ՁkH-P/h12~ГLrR\!f1?c~3lqu%~jUAURkʶWJ"sdA4r: )sm,^%Ywc<[6}h]l>dwoO׾q ˿Ѻ8khZvσ;&6Opһ6&!`'+bQ : %CG6#"m"%t9"GW.QF|)%oƥʐ+&C(x-[|'FAe$H1  l~`*s;1nҥ_fS~ 6o޼3fcO@P.z~gn֭[j'Q\4b& _|W_eO X׬YCA/ 5䫤F`!`:BfȵʺbdڱGI%LI/%mݘ ɸJ!?^!㐾pV޳~Ҵh\\ߴiZ/UKdY* ;7Imc\!f AO Fٻ ɒ8ΖqT,R_eA(6a+{m{emem~$2I!`7?θ9B(@/ g.p$w$Ýzl3*ĞWZ#EYF)C\k緂}-ӱ' Emʔu4^pRK)x̡#GDŽ1̙?_ܑȝI(VlJJX ;=M1򲪊Ɔ)ՕdΟ&W~6m"aw~g~gHɏayEc0 C]@.\$HI"3+t"#$d&ן YvS,_ w$կ~YrT2kQb.6I"+BWǛ]T$2K^FӕTVԵQޮ4Z$;2uV^-qKkk6yxCty1綕NonmTTJd1=!`+/b\ /2, X,JȫZp&x\iC/1EE SRG>hHmܸ+_ KM_Er^H , { Zvj|m5(gȊgƺ _w\4"mc.e,{y΅M>~/}K"??_{Gv1q=!`#G@z"v6XzظƐNb<(.9 Wv@KKfs6R5J#T]nȉ5&qL=VC dڛ_t9jv5n.%O[VΪ#`$$Bi9w9oC4J 9*$X1e]5V:|rWoկfvL.a`Ge:˫^|qI@u.'͘;kcGO0Vj:+1!`@q"`ٺ.tT!‘L##7+lWrvh 8F<,ǿb '$HE XUW!Z 4<f}_dĞD\Ӷo8 ULP!J(sWW3nՂjŋSE[W+:qiy9RZ;^l?x=jgNnoټ'.?}ғw߾zシ舘{/v3]w%Lc9uo_1^իWo6Kzo28qz6)`H҉i C0nB]FAZ:n3ώW< Tp!&sq >eK-nycԋ^ܹ G߄T57~\yձU)*XrJç/m--twVVD Glp%7Y ;Dݢǜ6Q1d}<"P(d+lRZzq׏^a:RsY gn̔F;ر6]vӳvm;וCx )[!`+oaBA IlX|`03 mq;ojuͰC,y#ƈKѐ'O~_f'?s朾Wϼw:{r5#6Л?~LfጉW=t6yRr]Xmh Zڻ[;zCZl޳l/}ǁA:G308|{/=_ܿ=ycͿ6ve%V$v~1|z0 C! ] ʁ!aH!$ȼB+drAf#d(}0Fm63p]o_v۝WSN|f䪆 :,8r&GrؙS<80)PV琌p9H䑻h"N%y#ٲ u㧔T t.\4}}_ 8!b?@&= =ٴ>z9'40"Vx^Y{Ux#v [2jԆm=l`WE ;+{FKtWg'~]Gg-wK+\')3 C(F,[W_ >Q##,$ʅVWrɐ5L.0]p_4沊#b WI}O*טtY.7'VXqOm8LHqu5X|ĩvh.ӴT3q̡?͗\6wڧ?`wOCg`V3 &u뮻'YPo~~jB&20 CȏK>1ODEPnutY1$U@ExTAR\ d:w6OҜ>n9hN[m7Qg͟8skw\n>}đ3'F5oqb۳gN}ߪ(kg[4I1hi9GBHMWlyS몘t}<AN쪭,g^5"0#V♥M$sah=,Cف/<3+W37zoiSM?s`*6V!`tXdD!A@Do0T T'E)q.2@#m3UǏIHQPٴz|߰aG>yeo'3bW`yiniΖ[^(r[W_x\K'G۸h3|AK2g_91!1ч^y0 CG Q@ Qu @E3mmmBop,cUNqu 6ӧO$~b>u}n+\@syIM2붕=]-Vo9?3GqK%r $;~rs+Kjr>9. D8UI͍m=yGIQ.*;·{qK?3fs֛1ﶳg7g{+lSfy}f!`@ p(, C`^KNtᯐ]. Œ 9LJ"Fq ARmv0]a88&\CG4[9_G? &;ֽU)wՕ[<ý0Wfw9*\:l羵Sn=?0GVS3k~S/--]l߾("1!`!PTsI<u0&đ_CO2q22#F8sd ~h]ŀvv\T`T|m3q.'KP)ے0qEΜ9RՄk986K!`܌Xfjt.iq. ce ;DǥTE (W4XBmO:u90-DߺRy*KAb۽{ٳgg̘ԳM8\ŕc?vNy`Yww^4S~`eɲPXI~=[;z3a@oVI?yS9z6!`!C@:=f}ca,o,̐}Bt^0=dDF Iv---Bf[uR6!6>T{>כtGMV2K̘[6e\W+t?1'h!pCl *-%1a;TU$#XJ ͥFLtpW1#7yjoo'w ‘( ?ŘFi=zeߏ͟7}g]yh^9-+^CଉL/aHhlf!`7;eoU_'B:&BP`S6SE2" c̪X,d̓U*ˣH0*1Ό3xwA M{h'`j+͝:fv٤stԐ~ϙlΔNm7gnjjQ )+!`@{׃nu@Box\d*1a {BfPTJy+U}/1.?wi2͚x&O㙼~0S {z-saEQ|sZL_d*kXqWoA9.(F:zWD_:c__fK]޻8ez];T*|co[[ImhKUGCW<^1 C0XH>”p1LlUĕT#%E`Bgq)2ND)9;.a„ B)m*@W/JBbe( 3g.\l m ,UH__Wdϝo,Gg=_}no~oELH|皪-Œo<C0 Cu=}DC 01v0|(>5cF *2FE <$bVAOjU*}̘#kk&w:thUWVΘ`Ωo1fڷr[o6SҪ6QtO('v|ƪ>zъAΗ,JsGל#9՛sՍX#J#mmf3q']:}!JE!!`@!`ٺ$Hq`yCRz W]FIr}e'\0]^InTJq"CsI ?~<oI3ʅG-< m]9)4-3`dٴryeɏ^;ȚV/#^|DA6٫ m!`iuLޮuNw?&uƐ~n\(q%d a5< [-& O!1"nI\1}R#bL#JȭLXuu$&u>ؑw3'O1isLZ[stnE\Rn(He.綩3E7iB-TV2pr""\kK]j*Lfۺʥ6tuYh׹s/\K l?QfEW{$C0 bEuenո`Ib%@Jgd.* h t%%.KAzPXQJ)+W1BpT(U1c(e6SI%w0< *gO?mb m|\1c}if6{es~`|zBP#.Y.ē+Ne/lφ!`@O&+]3BI*%9!3Αv 3h1( !w 2#)oBW,c/4%2.+jPUߔ زײ]9v 3;Qv%51W(Cdȫ%3MsA)30XQYaR\t$1GP]ןM =O]>prCuefdɌql;y\m"Nrܑhܹ>!M58drѻbn!`#+Ưb1H9nƜ (Pgp -:h. P Ţ(wd!m3 vZAxpwA]Tٓ*+/wtmR5Eܻt>ЪY×GYKEySc 0}b!aWHUQv!`c@;Y2W2~g'dFhuQ\m]'{lɽnl1PB'! BKC !PBBI  wYVtFӝ$]7ofޞn} q`3`F  ='< es`_>q1S ZΒ]͠ӳ)B>oS[ "!@'.7E;4^]0t.Bt&9HXNVB`9\1iM&{0ø${a8;vttMFMπʡ-AΘʧ1sYO|Fs]weffƬY>5 kӀ֝pL^pj@ :4څokaт80mt em%RQ.FAfE#2/ ^εYP) )ɳAajbdW,.^"q_"QG>(x\d dnU* 쌣yˡk;WpP@b}E' ڻ}3gBJz 3zp4&q.XSgXPz,nwٝ^ZeUUf֪Nkx N?gltf!.B.JYL5 k@րY HD5 $'Nkm `;jYA3Xg!xZ hxPj05׀'cfkRh(]_+l`,[jU,qP ^3Шn5եY&2X0 !n ^ yJ!'IթRV:znYfKVGRcnm1?R6BYPi0T$4;(rc( #4ZfE e!]yee 8VѺcyy#G(ڸ@!{!@R1Z!?-ǒ`PL͠$)t ^E*FdݰpQV exjlM=`eL7)RMm%Ff-ůSj4hFl3''o rxI2AրYd Uތ!ub @v?2H RtjπCF/meZۄ}bEEJs05tL*Ej|^_00댺Nj *Ho$t&`ÛEnJjW  4C=`>OuZ< (LOZ9e dn5 Ę2lޡ߄d|Hf*t^h`n8`1 Bu>E҆p1 1x/h5%Ts QRjAwᬪJ3RxW] g; 8[MhVZ8r˝Ba0xSF[m9=79YVgO.JEp4ĢW@IDAT>x ---P~qq1B~FIJ`N"7d 5 kҀI[$u+@%ʧ nKc?Qg:;=H!YHpƜ !|눿\^Ì*K"HJsf֡|EKu9 AFݧՔ h"~_5jɄBpѲٖENԱaJ{w-+7KSzaO𻎷6]$VO =°k@cbҔFt&/kpjXic43QN h݉rNu^J2;2t=f.Zz›L&T6E 3=l$&T.!cU48h"_,i4f]`YdnV¡֯lxn鈨NQUp۪E˦Vj5?۴zW툂#6w-^ϠS^vX^!2zbӒz脿8Z#D>5 k@ր_{Mgΰ^D ();DꆕO0Z`0;,TfP3{+V )S+Y薉ЏjbcI~%H[mOcOEed%͈H1;, "Q 8(Xf!ˈ]zDUkp9J~EX}bRl,|.rU9:Ñf<=׈emy>vjw-[i7\Н0zl8Yw)ssМ'u jNf;4 u>Qv+eR$u!1L0d!l c0p 9#Mt; é4X!+A=a127̈́{wz8%ˀ8o-ƣE!U6+H F 40]ыԘEﭢۑZpх;/R8;YjZMpSĺB'r臝DvPzj5GXE1;DpkN3(@B\n@DH{EƵ5/>]nBP&>$[ZAH8jC"IW=fVmn$>\@ܠ__50T hP5(?5@MQCڬ0dqۦb*K \XK5mҰ(TSSpyVV `&z;!#tV@\fϾDdL6T4>` SQihop~rը?Lζ -b  +5'yW^ dĈpa:R2h W3v9L[Y S{i#.R6DFjBj=ΔX:ƀFBp=00l  +QFVdĊQ(Ў:{m,XgYRX6vy`a(kG :A mVY׍yHY -55uth@"|SR +%&R p]瀿xGL[$<-<ɇTT&jLMf>k YK2Z7\ ķ4"$8BOYm4v $lf/A_ SMQ"zy<++СC7oFhb10|gÀEG);K3'(Dj8DJ`D=[`>rb1-IcFŜDPQxݮԾ֐^u r}}. \?A'akwd mB2iవ~ɒ-FNŲӁ?8{BGeҢXkfhBjC t%,E5⠀Ey"[$]VliLq_rٸuJ{6V.ȱ^|f 3mw&Y/D 7d ѺS0=Ft|MO8YCa<OA K$3f!vI2JKKa|jjSssnaqEAuL~" bt]`| *S`(u:Q? bpxX@o)>f a!raVWg[r%4DϮMdO(6FzƑЀkFTJ֨Kxo Rkj=U=3&p#XﯥkJI՘8\B8D&]͈Kyc b!Cab='_ pt7".zչ]ymzyߥAA'^~m۱FuFcfd<,g !`GvRE"d/:D&מ %?ҡ " Xυh#߆d1?ȤacQ{hXm?"Dk2ܐ5p j@FN.oh`vx'wp;<gb0MKTo Y0v;K*$n؈$ H+HK417N秋Ds^tBTvZ:u*<-!L9e2vhalyw ` NICA4EyOEi SCkd hiqSgpRZ͚9sq)sݻd|{TG08sRW Ҙ|P:'߂FzNݨR:mƷ~GLg'UuC+G}p"o~Be7/aq7M ;F/3'SR9筏KWٿAu' 8u>w'͇XgVRoQ䆬SP2Zw ^aR̤nEC3º[-MfMws+f @$6??} :(;=ը1 0Rr"hrರC- O(KϳYj[1$3ۯdr01>\ 8m4?;SPEy,rs@a7gLCu>, φLPv~H(aӇW/zIqF^Cx__d]1 $cSe!ĵhF#dϙ>!1IJV1Aۜ-݂+ց>,,pL!EHKQ(nh#GUfe5t)Upį"q+㐸u*y6rIS?TbDq\c_"\i2h FPp{]N_Sy~FTm;M_8@8)IUk&R(b\^U# 8tIɇi 8e#ok@xk2nGM]_h rJ|R*Dc)#F444lڴH7eցL,R `>gQA"ux2v NR)bCeLifUq2%h"EEEP5]۸pR]tQPLJq>4&0ЗUzp~\u z6 ~W>Tx'/o|ω{+v{斟gMGsXW ﶽBN8 l*QpBY\@` s}I37ÁK2 mX1ĮLA): E+7ʤϼ[~fUGձ.2y͠m# m?S|PF%ޮg_wXaxK1DP6M9w0h/b^8X5uc5!By$~YὃXʟEh2;81$kX!<*^!%H7:뽮^;[T2zdy]sJ |p;Z-;%'p`U3Scn1dMZu~ M^ Yހ $"J2C N2h 'ZBl#*dx'N&k` W}u1|x*IΧv㥙?f ?fEW}QgsOc:(zQ~E {[p̼򩬁SM2Zw]StmN)!K:kBsUDDlWH]RR4۷o`)MW-&M M('U< ^{jd\&`hƃ*E[sOcȢ9Jnp#LHu,:i" EE^[>(*:u%%ARs32֛{g ? 1Jw.[t SCZ=ҚOB"DIc \Y@]*~bFz9~MBw hg?]"ѹ`Qp!u(D0hCyndIG˙ 0XcrNp)>q|U(׷.Y-Ŀ1HDppR"\IVDƌp.!f:#fDґClhxtFӖ|]U]7SІC4"GRC8  iWZi4jXsu݁ZD!Q5L_xkEE*+q9(CoWy$r `W[a &HLpóɮE4^F2 +Ik/*;lߺɈEC,f"[(L5ph@FNk}tvQٲa?lVMX@SqϞ=M94uinH9- "6k79( ]usƖSS Z Tu^c3M,i՟}G<9s&vZD# mk""MI qqM7^>Z$Hg,tSѿJ%JJΨ4-1h5 Æ4h3O¿:{J6y!nÂTmC>D$ZB_֨+1e,0HK&#" IđЅZ/(5zT@~ޜlv!Z\IȧTA,ÒCt5}p;5)B woqAı(2ΛQxn!x<²ӧl5gt!kԀ֝]a]lK^Bw(P]ӘTd!P^`Y+5t y+CQA8<vy11  L<{7t[N([~'@fjw^GlТ`lS'#f))ȴη~tN4TTբ: vPbNc)9 @0P&y*w>Ds RaS@2  - AH=l~SGցB]Mxh÷3IO^}Rh̨O(P uuAYX$)v3\&yEO nD1B?yЏ4L. X$CVnaQIJjJP`t]oԪۮ ԍW_Be3Ϻ'TҽjhmDg7`%o :Aײ5TvLkIM: sXj % +[<#5]^;ls**D#R]0!4[g.@ è1N5j!A0 @ؑ{j c F.D'[Yt@J PvgocOy2y(A1:Z #>*qgN6_HZsHX|{&ZXܠ{[Q]>d P2Z7dER3AB) t b҅&D7nݺu۶mCq7V_U66wp JXfVѠث%#Gj[zY<^kv^0*+٣wjs}no"lDF*9c Ge!;:*2BjVm #[_dUƴU@0BΥ{W,em4|64 O0sW HpƌF # )T(]i(MgHm^yn+~#2m6dWaP ;ޛ{bŘ(-626vЗ"E;\yž24Z?hȴǽv+cL)X Ϡ̨77Z/;6ו~W֏(_t ?,`ʰ6BA]_E#8[_8 a7dwh D*J('~:_yxt>LYY?P夶̆ˍAhCz݅+AuX}*LsxKm|sj8{t"3.A VPI),sbzx!s$Jpp8H#`47Рo7q (:uxױ#jR+c¤sk2ڃ@DF" pJgIр~(ƆuW݃GSxDhlEQ`Fmzԣ/+çβ]{!+7G. [ܐ5ph@;i.ɳ|'_tI ,q+)!1&b,×_~9o|t켘);ELH8ŇLQPnءbQdd5t۝b\4_fisǕ]`➺^7 10+ U{W" ٯ6c'KNO!DJ(LZQR-NbQ~A .j`=c&H+R?0U|(ǺGa-ڸs筏~,4~2G!tt c$}ɢrlD~VrպWma(1 { Jqt_?=+9_W̳ywKoS>D8{lDY lbX:$B286y#i Mq>cF(1,X%TaԩWˆ7t{$zP ֖=-+t/TnGnr|GJI!RH-&)W RB+҅ қ]РmJtp{#vPx pN8G<ȌX-s#a"H;fҥ=`um=?2h(y ח 6W*˹QA6#c79)Ì?pe7Dub>{(]i=kQ<mvWRW u=kN(9dzU'`V)…YzqVnK,c݂ A.=yhcC= nJx, )NO&up tR1~zUN(|! b?Y$[N =HL Uq%(mߥ9tݴ1 (ڴ;Soq| l @ m1~VeI5ڌ.@K<&2p 1PKbt$C >EN\W±)<7ϟ=u15WcZĪ"h{@l/ˠ:dTX! ~HϺTxCOۺ!rxNm{ͫX $ȍ5?q5@#F'uHxD@i/fXV. 2ýNYp`ƒS&{]^-7߸e{\jDЍvd4xA;f$ obc(<pvCReՙ !S \p&,S6kvH (sY< J |*k F˾Ǽc詶,?m&;?Yr5OG-k@@ h]]'+CNa7Ӹj|"m#<$db-rϚe6V@^˿/+Empsox)ZRmc.~@.^~?'+6$nHrUYf$*@x9 QX'A$8/-[{FּR"~.l7oqu<^|ΥN|rdJd>nߤNzcFHhm\y\:}Eg{jt&oѮ!k%FZė4#Ҩ@ba'(H5SŎH$1p3hsYR!@X;xnuiVYQϺ(e 4ѺR9UJ-b8UUUYYYݫV7~زtq G)ׁG̬*\~AX"GgXLvm!f'{8 bysǗe(}iif>C ՗$9؀:X `8^*qm:@ۈ{{NW+b5l!Bq#/;)]1 6᧪ !nW~k?-+&ўJ9m]YH2|fPDv!)lpʒEϛvbZ0tS<&eZH.&6VCO-IxCvQ®T)EaC )f!(r-=FH|f:)TK@޿k+;WMoߚs(A=#iwn-JkX+'oEyy˦_6M,*-B>ԑnEMڒLI[Y./KsKXٹ'qqoXQNӽSJ r[րpA %3ܼp67!Tƪ>}Bgn :&7d xvr)<:L8N bCDPFtN7~Kn޼F;w\B^{V^5wN$I`[ cJslv4aRIB3rsjĊ1%9p5ݗkk?[E6^r%ԱNti-Q~Q`(! 6g]ߋ^-Wt-@dKkK C'0겓BUkE,ƌՏ3a49Be6E 6mǪO v/iJ ]> }Z֠(N`<?Ct G{іz V 7߶}C"~lnLV")bNߠ1>:U4d߃|bqȖ1{%±=^]2Xi@>BU\ dpk%J*'ŬPY%ջwVWQ Ƴ.Пӧ!eVO<ءuX#'I܅"0ʱkЫ'O*x=@4g5jrУQox2 gǕ(6یXT^zO;8{;Dr}x6TܻӓKQi 2;ྣoWJW|-k@րYǕdbՀ͑dqJwNsZ===_|ńg)N7w:]`6uwʢ#7/u8ܨI 3< wննМ)hj4ջj;kHFiTqֈ vvvfgg,xẗşF1!%^8]u7'cC_?iDX#ʍ_+u2vxm@$0yLʞ n@Kp BzI) x%(!- ')'>1"JMjWuj[޶KSh=s[7?L>1(e= k%fمWզTOө:AGt#(tx'ؼ;$2槃B2/ըQ rP\݅%?U5uݸ kL(A:b486IaR d 5pBh<YǏ`jGbx$#-#dcBöT7eͰN,@6\P5_[) _w֤_6{tq6*#gԵumu{+.i@*M?Gk;afsZ`7xg(ne˖?^z饏>'#i8tT~W=Bu7XVf@IDATS!k@0j ל{)W2oTZeb*øYMY6_!hafqxbnRsz\]?Q![t=?Zx%HBbfD"a?څHX n0ѐzBoФٌV]Y%äE+9H,Q,qYzB!P]\uw_|Ĉyh,]](߿euSKk@mT(X :q~vv!p*1(k@րYdzpB9.MրPR1c>مD!OOKӗ-[uիWϙ;6ǙLXlxrg^/=}Q_m?y2 jܼPKRjIJ1!{򁆮}.(Ȳڵ3\!v͚5+WD Ԛ)} ŋr-ӦM[_W_uN4NG50 C}nl|ܖ5ph@v- }DzBv]0L5v[3׌4`{~S<}˼) a>O/ʫ^jpfF[4;j]pi3jř4m{:59;r#?d޼yH]BE¡][Qj-$i9蜤wQb P'ѱ?d 5pi@F뎳 "/h@49~ _}.[IсdUTTgqƔQ^ӢV l"#KInZUQv0[v3'^{`0DfYmQfw@ZIE=mooC4䔖b=6l H]ߤt%_>w>Uutވ k`4 Zb䞀"Fgs pyɲi@~$uuD$>2'L])5:}V+ Š"l=?}l1G=Nެ{A_p*rvhH;L܉F˽zg3 jPN?ZG*"*JUP,\H ,E6"cla6/vwyKx3f¡kFY}7 i#Zr 0LXpB"aRVb[GրY? hwMifYƷN9*A/X QjvkZ3fx7/_^SS_Tr;kZ(W@GThU)50FQbtI.bfק7W7rԊ Ah}̱Z ,tinٵnL\@xvګq۶mb#sQa߸wmu qYNNԦ̺sotW҂D1]˱\g(I 1>8FswnHOYHOqO^s?q7@`1+qN60!|flGmN 7.[73 Ja[TG/N9=uѤ?jXGe r={PGΨ a LnхjvO`KkkO:{ٳI @l{m>btį t0ϢA/ed 5pj@FN '/;67|'v 3 ?9sqCwy~iD~uuO>"駟.++knܼ^x[N_ NN  D.\0iUeT78:<U&&L2yit G5eĉ@DM؃0%s$;ݲO&6eEFE%BqGB%~Һ'j.:pt4D',qA1IN!j&RkK?9/Κ fB {F_ZyoDoCo[P{Tk҆P&Cx8vG(C.o i 䰛VUh3BThMk/;[w9vxTxu4n6/ŘT)>X>v}pinHn5 k@q;/ʩ$)"GMAC\p?^DG H5#mx̙| J7|/8wjK׉+̰ry=3pʣPg6++{o]mpɬ1<{ʄ#jۺM4괟o>俗uyҬF+)L5pyzQv\I9F_~t2%l꒢۪4k%>go<ȷZ94`t)GaWҾyQxB>MGmC_"$ )L-R]tQ"%l~THu5ig]UTFs؁Yk0yU#|.ZSy# jCtkFvw793fAVz 0lsx;4ÔR[e~QrxMsnK/IdEٶ_}I1P+LR '$tod fd 5p\i@t\]y14 e^ķIlW"%!fE QR̢tJ4 v!> Xvo(CGRt&Lx|m5^pjD˞1 +dYKܻ"i4n͎^Z-ݰ 9Cx!wLE6RR8}݇YYYƍ)w+Y:_mсfѮbwD>5 kj 8 8fpƌFW3qNb `~s\_wO91JUec&o/_ˮHá`ц.f5V lδ#n ZWz{.즏.4i5^C6Y s/opݨ~;¡PS-+?pS (DI~:E >^ϑu#bCր#H0̫Th)IOm2Zwj_Sl1VZ"? Al!6Qk_඲QW_rѱÅ\9w\OC+M,r6 j@HKK/0| 駱{ylwy)<.;\@z#(H0m*|p&d9hJPKm픒, ^AnjprD 'ř,]()!  o0O:̘jRiXkf9W0gljVkr8=@@օ¾PhEёpW,aΝQ@+WćJeNTɈm(ۉK<6:@9Κ5Urh@^#j56L$%A9qb ):f-Sz׽r֦s 72Z)-c6w55wmۺe{54w](r;x+rElUUNRZ͘9tĉMVQ5lMc uolm)j>$,XA]yTGA1یN$a^]ր5PzVd BxyQ˃($%!'+BF%KgP:ׯ'fYgueg)Q6HVxG 8, oy zJRTZ^4s o/9:ne6 7A${瀾+ڊ&qAӁ^p@! SmHᳩm%??.^Χ02tbj`Ϟ=۶m2_9Obik֬Yn>bĈl'u:駟ݸxJOG*84 ugIaԦJO!5*Y, J+,ry|wؽ&L+hR4Ec:ҫ]ƆV{t><^AQZVV\R_XbF Q=[GRYv'vR~*NHT`Ν0b\RjaOe Hi..˧zzcl|b6 3$P$@P*QiWxɩJrكQEbz ܝpj@FNk'k YGxn." \sM[[~at{3 g)sOOϊk'Y$۸qgQkd=.8jU9qD~ŀ!mNKK+%Xg)6 @$ 9< N?rH$C=Pl I|,Se5?wׁE_vW])kx<>v ŋaB(ki`ƌsҥK|Ԧ':40qcLVxRv0*FT 6uTW[=r̄Iٹjfy,xUKG ds% O\"TfgHb2q_I:ԟԒ0h'p6]nj]g=u NSk9)Ey1]}{kRZtUesz%=o}\g :?@g ujʞ3]kz$R&V!eMu-!/g^nC8adhJ fp>E~PO4 5>(YG *HjC|ቨ"68e? ?:~maCeKU۾widXi;_{Ӎ.p'T"|WIҖѺBL[ogNPp**A!y-BQ" jnVd۾}~dCщ<-[ Ѧcm̙F]7(OKtرfWX@Z':i"4n{{;R4ÀjSp"hG$#tXP(Q(Ӂ1x1!rh`֭41w\}eMLJ3iLvfڕbZ K;D<6?D$e?G3^LOE )Uc1ؤqSN]jƢ5XJL?>5$o-A!Ré EZ.*)˭(dlrt<{gY‘c3s .x?q;]%+|^GwGs꽝vGHgUXrEW8ʿZtG|Sy@ %L} kX:i.k ^_/fi(8#n!}P¯ fီڞ{z=kjc~]8?[I9y6&z?K?;3@B~uOdJ˽[v'+UgxskZjճߜZUg?\8[7nHB.ðԤNIa`g_u4RЇ</^up^z){_W/<ܱgI,SJ3g} ˦NXA)Kꮺ*¦uYӧQZ~|12e S}.J:)+݀O!L~~͛hѢ#2'bK"iv:i"DHa,m5"gL2׮]}ke#E:=Kti-Ƈ~8{ACh,5kKDKՉo9mOφ  9sLp`zn}Dſe?Dο9s wujS|qw$Y.?%ZfwgV,r!*3_~>dM|Xڌ`fnz |C=u|~;HLa-[ lT(WP[d~`x?GՀJ[r%ޡ/5\ ~< `T-K #0>7a\ # {%Nraua(՘A8AIr2qҤ{ٕ+OZK˟q1o=o܅3f*S)h;벅]!0 7z=?m4}Դ*n[ @UXx؄fպeXД1{FHmƦMO yڡ2BcX'WfE!Ugzl (X;B_1.40x#̕K6W3m +[UV>8 m\pnep@.հL$m.¤>yNbY+ fs'&P;bKS'~ <B4i̖7Ed3ucxqhZzrLʰHN'QE^ Mx 7d66j䀨 qhF ad݁3܅^GIH_ dX$[ oղ0z;itH -B'gH2h6 &.AqM02_%'3 ,@P7%GA7dLַe}t5Lwi^W wWVwgsBиY h;0j̚kBZA"9ScNrŸbO hH\frK{=NrP"LʜgI2g\vkޅ=2S/_72~ ^f-?c=ZN5xw~wg9yb)ӳ#q |GzڋwR6z!KhH?2=XuN)!҅|dѣVcfVβ%r{Gѣ:J,mВ!OYX j' gW>pga(⌿<dy կZQN`g~!|s33fz\bfr~8>GnJ/*FHCkPIҡ2m}>'\š5- 7zMLvrcl3ג C{pEܦœij/&(K}J'~E tM.:Xen/,v1nٻ-[_#v9"aS|ո<' zvnrR6feg&+>=Yh}B.oY$lEs9c&3SHB1u|6Xqr^6E9'JMr@Ys\h#AIm>*"^GlT1 r⡙ɐ}Ҙ-]m'Jۋ%%miL$4?uɿDx(cB3O `kP!쮁?ͶC ;D--;dp.j1 ֏Iy8O*m%J>%aqkceXd=eȇ /TOmmv\֮ !D fY!4򃓲Fa sI}Rt[o];W;hDQkLU; |YAꊥ.`tgz^aX|WʋlS/bnI03DS-{(#-+G [13=**Ӕ'3āP/fnsZ6x4޸jMdd534nl)YGgxͶXzw/18[7bnؑ\ACaAhT>8&5 Bd̩p|Pul %mF1 Vj JBB$`v_3N[k?OXhQX%)C :l|I?Iɘ(Xi2BxBx:e\^h̘? 58h:Ɗf>m}eg7r?|@L2W'XSC&(ܸqRdЁAp13Mm4SXT)VƩj&dxIA.v ?AJѱsL~ 3'˨:c1&3\54WoW ERi){٧Q>k8Ί$dЇZs1֏-l9ww}7pK8*R5';s'f"YS]2/tַZQ(eC*0\EzvyCb|$sY ƹH2ws9& h;f{V Ů}ƅo|"hWɌ.J<E*%xBd"SUj;ˎ@8?P|Kuڛ>[IG.Bv }{Rkq l~xŎzL8poc]+K{C|y|o9kJf=O̽FO엾cT ',7O6#]_~yVC/⦩>i!:JGszHv}%deP0rՋ4췧;Ǒ[~ø=FAj͜>x+ᡚ,572/n0dF~uA쿦KU+7l cXJ~ dOHٳAL}䛧|˥ɘ\-y睬~ C=P~(3j >Όea4|k^sYgp|@6ଖ AA nI+ժi-I&zb`|_E0^#`rG2Ʌv 9t\T֔5;4)S{حYn}tڍ5Mq>!UG)l0BI OW?yCfEQeGK^"3zʤZ&d)GU/D;wl6O/\s6[ۇ>ѓ'/1;]ugw?x6h]_]^Kჭ{~}(`p|WeN[Z]0vYv@`(#?ޅ˖6WRǾq9" жu'O7ifEG:e҄N#<#.LQu ;YAHp06( m^8^,CfDs=QB.ۃ0FbT‚kcY|7X+ L`Ġ)}0EB/\Дַ4xz-eݱ> ]8=y22ҜHMio}K_qW˿KF~D.zS|}nEc(F#a(cӖWX6x ßؖX pҞT̪+4UWuu`\ QKG \ue9!|)u_gTfgcw9>翿w\k1i1Ӧ>'l@fCU7#k[眮ɓO:rP=ӧN\5){i0HPLo A۽t/]w;]r̘9n3pad sF} (w:dSOCjsUrlodk4tB alx'NT fӨͻL [ PS:ZW6jJ>\r|"VC$g# s10lWU><ʧ¥kTa.G}bsӨ6㎋)rOR|&Nwϟw5,]1[.Zg p[=˜Ivl #Ja}LD@nuӡ\uFI*.,+r-U`xM آž]m~7STpwŪق_¬b+̖w[x:Yn5hIX'_:L=߽d~]}$/G{/n|Bu_1-%߈ͨgƄJoz}>K޷Q*B&w>iɁ}mO=/3zx Ot벌[3ikCQt/lh OyH?gƧV 7C4##_n8[.\$Nq*$W6kzIe[ \sI2ADU``@VZa7};]KߠG76s=4ɆV571ndo]/^lℶ3R]>j[Qݏ~#w4W'9*l:8fWRL'䟭u%Q`7DXdPO%詧 +ȗ^R )q{{{m_\Co2+yETQx3F#Fȍ)y1$]N'M$;ƟAlٱa!-ݰ)ػ¢[f , \Ҫ##%{W?˿}h#u!c2&|u?Ŧ>MozmUa*٥ bRB#cU\c;Q Q8ွ<o;f늀CgAkqK}~ ֗m*XVMԄgQTrьsN~|w7>ݕu5#gFM.!?¬h0rhdFe>b,Nd v;gƎݸwGUIqm̔aK~$B}}$FOE[A<ډ ?oy[5fyl =c׿.o0'tOP?Oa+^ rN]#FO'*@L gR)eg}̶X*;Su/f%xI*eݮjl呅 L1:ȾqMKnj6ldVt5ns V/ZÓu8.ɠy:GeGMT}5_7UZ.- +L*o󌞝gkf ra#lk$#`OaakCI. T]Y@IDAT3nWwsG^i6 (MCA~gkuVkЛ[3N +Cີ~resI-BꤺuBٺL]*2 -0NQekWЙ4g{\#U^yi'/[v}oy{酕n=sfL}ݱvľ6o~ \%M*;\眇}t`:3XJThƴ,~~]U0Ij<}c̦d9fV??;\=΃@ǖY39Y]McBص1\QRT⪩ZNLE0nOIACB|kDյ2#+J6JfYNfiMsjk`Wvsp g Xw&$ܢf^fVKPưmDU.خu=Ɩ8fEgOSV w¥/W~]4j}hukU̬ݬoiR6*LmK.G0fweϾpJJ՞uYӱ%>7!crQ`.d3VskInáoL=?dsnM>gϲoZgp 8^x^Gs*=ms3ǓL81$op:Gٺ1tC@2`*,i\>P #$d#c<4/KS֛gV0|g)r\pEyveO?"&5m ^٨9Ǎ}LuyӛD-ݨ#2.CK/ T~6wºn r;[Ļ&]67n7|w n^c=Iks5YCM0FhѢ*G<77!rcVMVW`9'aI%'IB5"2繎kK;_$rN,6 BQ*ƅ:p#8HA)SNpG;H Bm9kz؅Ч,l҄ІuGu3sύ7ȗ#,e2MöLU1*,+ ݉,݉pEՅ ] mxPg{{0G 7L6C4œd9 YRIQNoCc`4$ MV̶-9Z.hO4ckN9j h FQ!(#;@#l]  %CUNi,޶+Љvۍ-n~fy,K\I&#<+Wd>'SXΪEUSI5p3GpGmS$3T9Zu³c܆MțWF[Ru,  b FvRGZ[횦5e+118+6M Hg\tgQhS/GrQo]3H:I*J(TXrzJKk4 7aU"lPQ7,[3VGpGGjff:eeUʲgUi9$œ3nղ&D'IeT1,ۗ5f_Qd6&䌛͡ [@id0Nim` &Qr*Fr6*8"l]z\0~MCu!$V~Gphl:z2SaY6E{ i#FEU7?j`;b+aqRGpo^rc(J҃7l1칾ePbd˜q}}#ϵ<$9w#8#P@nd)g ~I}-r#3]ksf<)sM'I* I9}٭k"?[VӒXiV]7`Nl84;@ l] a)秾b@_Qyo9{JGfcB'KO*s42KGpG rCu|vQsɐr1(\շTŜ}1!j\QR__PѣFPt@Vu| ፳x:]w8CuC 7?h + i.SrmM8n ai\ȃ.sM:5tJGpG>3HyHE0aY9~dŤqRI^ZoI>ir8O*͕ 8`m؀:p!lp!XjSW13Է`StGpFbE&Cj˃{<qFpwpgdo}ryR{$Px +DE9e}EecT֞\;odߴV2NW:#h䆏XMsI%5՛Ph$g49g',Ⱦi8#  #K5W1ӻdJ![.*k:2iTR%ODI#d_ .GpȍJaJo6Aa'cK*sVd]⹬5Wo$nOR q. 9#8@7#l]71euU!I9 Y aIeh`rK٘ BhdFg$Fn|*-CX\vG~{$sFg҃'fus3NaxOJ572C NWJ\vGpT "Ht fuu-QǓNʤH~J472EI>?g,}4ʕ#8Èu7)QIz&}&=W''r,󰫝,k* ϕ#8@䆡FLNrT0 |6՛Oy-WT֗5 &PC!g< n8#m8[mwfN%{>\/r>*.s>׷4'kDhRo\2ِ+Gp!fS)VW rB}#c*66.5aL\yI#}#cNVEpG %]2HY9˜MθqќdM'IeEjw#8p!lp!f4fjǚU+D5]Ԭn'*Z3nodLMus8!S#8#P O5s|֬Z9'sR߃tTF \QR__IIc֯S֗5p ە#8@FYMw$5# = -iTV l}s'cv#8@7#l]7ߝ40) b;< <䜄29iTR%O匛-Ph$iT&bM5f#8#2yцe$Đ # )esƸamDRQ%bRsRa\5#8ݏuccر===.U\8#4E 70`5uաGBt3׮S]u1uWFB2 G\$2z%Ee2eQ˰EjqG.pnMd@. ˝X&W@I}RY}s&}V\3<#˖-[zK/vדa|7CC~Vr)YqM䓏?O<5k)OEA:/+,%DL8h3,"-Y/ O:u0z̓Λ8q"d徔5ɊtGpfC9'^UI괅C?\T֔<˗/ ϓޠ$ulPbETCs֢ Q\(4eK4al8+QA oQ™uէRǴix7)RB˂~#*܆r2BW:#t'?G# @JQ?P3"a\B2U/IgzG}.b9aMBȰKTQ u+V#}[G"1a^ks9KGpGm¡_' Qh d24'Vd2INL\.-0M{BONV»o2i ol.mUG9 gVzGpgsoqN\,YU}aIZ1+e-;cҥsLI#pF„rH]s8ȀYoY0zvMupG`!P=61oUM'@ J=cH7Vp<, C/?%X0PQ5vU45"!m3G1vv[p!ݞ{N ⏜7;# / /zr rDAIBjV7yXT9yꪫn66j3 @3T8;kd +^Z^G|nѢEyvԪ#.#8@0pL4ĔY*qb!IgԆ#t(/~ysyuogX2>whI 3spGKpKn Li溍FNTҭn #,ymhh${{% [#)I YҨ͒>, e IJ0?Ex|衇i+oVh8P-rh Y;s}׿cŋaVRGpG@Ά 'Tժn-;2 t-o/U zwup[=`2, .1@5-R$KFzF>ɬ0 w@g~;pz,<<C.;#t3u )9 o #HJ9Vv{z{Fn ?E ]BՉ"ْrsSHyeK{0}24l 4I6ɮ:Gl@˙ZBBPi1`ZԂ$7QQpGh&Re21N!Nn, E_'AV>>-D $$f:':rֱ-M 3 (uR:ɬ@ol JcZ\ɀd;xJ̙/ ~8#t!uMbHA PxJA7d֡W(IsbB!g\$/~̪KtppGAd&88F?6N&XU )̓4T wkp)Hx  ʒRɳ~;K\ @ WOM.z~i<P6&Rl\pGpCq2dFC'3BR 7,t1r`f砃-#̒%Ky3$ ,-AFf0REI\9JouȔ7x#i JXKR;x=&)Q2C^Dy3y6@g/M_ώ#8 m/wjTCą̃\"a yAKT3YZ&˗{8 c9г+dW0 }|qGy$3WɮXZR%(MH 9^J3Zud@z\oDClPep<`| .Xp!9.J,tj#8#A VxCî VP*&3`N2mU[[U-i(=DMBUwW0+MLd 7G*(a$!o ޮ>d52gꪺdRK ڵjɡ\!Xli. wq2 .=H2=X[nWv9 #g9fMsM"N8OBF0e$ȉtF=gTвCse͏ jNg*f`BP੉g~ AUHp O.8#``;#6V 7$3#i#aā%9?<04Cs1涯͢\0l PL>яQDgs67X]ٛ*TMq)If0A4[,RƼq,]}` ,7{n~]dŁ"ہ3 #8݀]1l@(4 z!,bO^j>;ؾ||iYcEe!j ;܃5s[E r>20묎 1J0ɧ]EyxFC?4F&}UI:T_җ93oj30ᙊ5.;#8> Hf`HfH]XŒxi}lo%V̾4BԺYZQ$t\PT^oĔH #Ҫ2W^`buUKgYf [ fSAΡ*9t$3 a13^Ǜn fg.qpG`pn#ErHsYH>/$ts$yH̋kͳkRe0RP١L[撞sƬa]'(Ae'NHZ҅ޔ:JMVʡUQΓ1(e`Jf*&!MEتxFRX2 ff6* H闎#8@@4@<1.0)ތ' c5.tҹa5k._d/fH D$xfE`#)թ(PYbuʥa]Y<4B}(B4VׄP)o$3lZGGjs63;Ó_f kY #8ÅuÅ&Jpٸw$%lA̜]fؑr$'%0`v6ԍI ,`s+W 3C%ҜLUE kP l/B =C?ꪫm3YF+3@ .2<^qpGFaVt)Q$3w䀳Gؓc0JԀ, e. U]jT1:s Xϟ0M>2SÔ 9T&#aB;*fk.JMc@;‹j8%3|j 2&Yxr& BcgJGpgoM`ؤVNkIQ`X:^'=7ӝ08 c[)†p~r-<{k`+7r&^KIuL#)UйC/*]vGp XBر;+L1ĐǤORGqȕQIj$+s!z… kɏZ~L9ҘR𐱂1?&`)`BoJ!Icȁrdx&7X`dRA*%Pi&f/ B{3@PsU١y2Ed;dK%3(g8CeoM%_ɸdQ駟^N"KtGJJAAG  /93 :B ܷ;>(e)Z$nZ,c!+@6Ɗ୘g pF%) Jj0ÓlYzXW6&t:#@JOKzJ^|>`\h _xoɔ[_'Q_:#8`fbu3;# t/)$w]Mk{$̈g($;1KCX1̐ "urjDV7CeNS"s^n $cdHfa@\rxHؔl4w=JfyE2LGg|:GpgFx ,g"s Ec"Wg BuR76r~Z'cEݠ*J:JړqL#l*LHx5 ,xs 3oEˁ JK\*)N%qJ* PsȑKijivf!$%P@n πvß ]=#Y7MjWʊ͚.8 )%`db~WQ_:#86fEz.50DᘤaE01=QXVe9MEInS}u[eoI{R;濓Z鐘W , 5QaĨ2] Lɜ)2YM֥EVa%6:B}E2|`Yg4d8k^:2iuih4[?,'NꑱE񫋊pG`xh޸Ld:[NH5HVHJx#xdoHE~9,A00y5tTV+o&.׳7t[vmL%R^T"9Z2Y/2 H. EC{RIY~&&XlZ]8#t##I /g(bfG eګe~ @W"d d Zaޣd{(9ZFd  !D"o2Z"ГpatVBcL%Y\)r0 2:Y#8#lݰHDH28+QV+k(% !!%%c0d+򕤇P%Frq}˰.ђ ) S3'N˓d<*p&%E% ."!ȭd0A0-grbgS -֨+LN:3hm?^pG`GCG &`  2 ȘAƈD$=H㰨bRεҹ<-} c*=JHK^8k@ہ! 2"ٴL{@]sE+("Ja»1J}v 2g&=U#Ri:eM+CdgR&VTꗎ#8݀upVI@ۑ.\R2#b48ڠ /M͠=EeWB'jM^}XjU(-˦) foE+BdȞ7va,H&ܧ#8@,`HE`gfʋXJ}xF2'$3PN=Гɠ^V40#':"exi2&@$3堉pYkQBtI 1,qkEi ,TX(tdz/~6]9#  g \k| /L7|ly)[ɚ>;QJȸn~3<4&1!d7KGpG%JfxS: /8+c!ѷb3/EJf8*)J}ˁ~ ͼfA Hi<߆#8݌u|wvآ4܂dtp̫{=؈4`’ȒvU8d;Ka]2`ӗ1+.;ah2T\!x~GuoCFꎀ#8#hf(a ]aS2 8Gy3 z8|*LR̈cg^غ~I/5IiHfG`P3\]}!,M:L*Z.xHktIx;Hf9sw8#08[7ρD8 N^ ]W^J3UCO" ŜXdqex,wIs)merE !`>bwl̔:~cNՕQr#8@56T0_$34wy'<\`聉+;4'V$"iYbKb!p[&>#@>ʓtD .$3K,;=N9#0pn$͑dd4Go!XJYT|4[Z3$s'ӊ\i?;"!QͥȍF-<;|'x"nwG%'$$(L@Wؙ$$39'3 [9imQuV"Ea5*."W*a](ݶ]yEGpDٺDdg +'6O~s̛dr"VkXmڲZju87lዱ0򙌖"\r%zRέ~vL 0A}/sg?c"vGhFnQ9NXEkjV2LF#x*T["  TB1=MfT~9}_nWqGFFR8;^Y#+{ȀZalP8|creBBY/[Ш߅r.7 *0\ QQ䜵7p~g9LT/-yFbjpGpFLf0#e_c<}!;~0j3|D;0Dc}d P$ABO2C&CQefMu ) PQYeSSWѴ;# ; -A@6EJ1',i.Kbtny-PIU9VP>`pHOў2dj)P.jra:59D]O'K:'Ѐ/L; 5stkn 0tL酛R9^(N/t0]8#A 9G.Q0_#`&a/ YJ3 㑪rrFWwxS2+e'iCn}Őx[I#8u;ގ;[ґ³@L1' qBIZ(%M¡<{&lێ=e("%E&۪PV=% ҥK W2t<"9 (]x:~$8== (YyKGpI*831T1Nx03Lkc7xd O$3' ~(U2 E]̴WtGpF֍7~0[|:r(+M&4;iu`D%=~Dmur*ZQѱKط~.i8IɃA~QeL:wpե#ps'f+s=X g qbO!ts1V(puGء`#Ir֑R %'akd 6_~* Wz,%aBI' CEQ}n8# qD"WrGY,*]W5}G 'E>\1!Zm6y3NHD{ (o9Is}Iv?m?!^Sʙ7-MXJ/;*zw饗6:9ucC2T¾1K[CXb|<:N)a"0kRz :,g Gp# a\`6PrZ CG29Nx6^@}HI&@>F'xdR *ם\_I%-`\8#dG\>&䯤xuE@07L[[8r\iCϑd VW!-MRYY6_O63eU3@IDAT:#8f@9 ~Gt3<`g"ȡ6htX7jnE'B1M#BaZ9 6M0 P$K#cxVaKŏ(+[ook!_:kc?zGpN 6VaT#,l4,C6/IykFs_%3셧dWwa$})+&y#8# lݎsweBĐE"\(TƠ.8{߄+$e<@^W֢0vmU*][I򼄇ŋ \,(-(0[FMr ~k\&:JiN69XBVU"#pp3z!?wzA<qUup/E8l:0!aÝw ?;:#8u`exX`'Xc]ފO }5|0:5jeL7ٳ(N?>*C9d(h/tfi} S68'}WNOE.9v-kcC8.Hg oq9WBQ[|`렃L?x8=}apG@B8VdULEҢdKc %36o=ʡsFLrZQџLdY{슡]pY#vMcJ>qpGApne7I[dQn\u,*!=%%%! 5rFa n6!uf-yh j%;Bsf6P2fƞk~IUWg5@O x60t\e;@q.=gϟ%]wJ8$pGh80TN:dWnfe?z&xT$3Qs8DàI2CJIǫG&3͜2x𩁘PucQQF eK8#8;8zΰEPTOJIE֚|4j\7cϤ-!,  > e9H_"f0h`dO_qQIgN5vX0Zsh.MSDjTg=t}z,EqM)x %rݣS֊o,@JshTUٲQ$nz*.|*y,AJZ.8#8@8$MZGă\t $08`k6e&e.fXRLփC+2V2PB y5rpGpuK:|"mB*\E,{dbKHiHLY]Yޜ5V ;>26$dͣ-`2lүKR)Wɢ|g?-S ol<X@T-:EEΐA)10dX[22t,4?&t0jCn:]t]wŧ`숍ll`8#!6VO8VV{*dFI Ɍv#a?Hf`d̬ЭdsP*= 8 pZ r M;eބ ^8#xK;HI3oN38}@zBckz5ͮv̿[pfY0`سWb렄țy;%>ɀɭU1'@[ereM^M >2zw 7q"sEEZOs֨5JC mJU,e"s9tlswTóu9> ώ#8`"U` yHc#`T/R.#'j$3g#!`$=ȃ008BB|pJ[|֓<*b~B͕5TI*CW.;#8; 7+kaA(E#doV~'U~GYb/Yft7$V S( 9k@k 3]ص#:1pUC8i7GnV"A}b ಆxBd %ueSlB8y 2ʤ@DI-WA/p o pGpr0d0A'b"XV Y8tPƆ135 H9!rM}( -B%R ሉe$(4spGp g e\-Q$Md4 ELy#-+r\%EC>J oE!a6:n1bz2_Q"[Әd#8_F y_dǙԜZɁl/"8ss8EK* sY8)elP4fN,ޡqܕ[A;t#g}_hQY.8#89PGtlH˜39 NÀljN m)$RbdxN $a=' Y)|?L2 m'5&PWLj 91ӵ 0eQB^&;R_iђ:.":uM7nvDA) [G8,Ædrwd DrF$XbOOu\ |Qkh ' %!pHOJ=xɏ%*E&S%2U<̡d+zJᗎ#8@cI'ĨHhe c^x≼b@!O4oatx!GT%XlT8![;fPdMhrHʤ[W:#8#gF-˜\xᅗ]vqR]nf&KVe{LXfDMf}Cb Y|-=K&mVX9 9a/Č^"vI󀳪?37)"X lHh$tEKLQbD4Q+6JU({g,6{3sg>{=9{9 3ilfr1ˡ]x~6vagq>7X!1yF?<п8#oaa2S/@YKDzd^~ ɽ^$@I3-zYM%˭E@D@DKy^x >Zpfx2\vObĊ>e,QQ*(/> :ưsXW H1LZB&n;Wݤvy֞: x@\}r)?$,)lQ~f{QBD@D@2$~[#<JEư /ۗ7T/H i^|煎WU^GHi,12yn+_ Y&#FJV4Ac>.6`ExU*18ypѳ-:Ip Hx~ Y=>MONHq珒z,MìZK -m?2eHX>)|~1h~~x$l3@]Lg% ޛ yp-n K7V4y*I[= n*cқ{WlYR&1i ie«֐7{ë~ WӞ~)M_"CD@D@j瞋π*|a9SN kd~)U"/ޑbt6؉a1DT'bsΌRBD@D@"uu2qYf=&L`ȗ(XߔLfpKl 'x"kt&S߮&׃jݢq| ,0@JHeɖ\alfr1刀TX vqfp"SbvqfPF^V9g:S. }ԡ0oGEJig̜b.T6T(?U!E@D@D H˷'[}eZG6J9Fĉd=;DJI4gsIupZ~rF1X˕)" " "P_ C`M sfX⃝%pb:k3)efiO0Hvǂ Lڵ-pܙ3ДZ$ n{4ogȼyRY?Cݻwgȑ#X%Ǎͬw|\`<"KJ Mc CG>#Avhlf&ܱԦ2" " " Y'Zv~) kPX@<\SO=/"Ͱ_i*1giq0KC1dWf j1U~Pr:oLW9" " "sϹ^3?qO<]we35R9s7|pEc@8ұTRB׃'-Fl3.1d$n9HcF-9VIH[:Vp@~.q*0k:՛Ven#W^>163iva Aa{Xj83]A3Ò#$ROx5%a^>63M~" " "?ϳ"!xr 5x/'!ldrf3eps8#F/! W!2Ř epfɘ#Q`*r!an @boǝ?SEZoB. <)!C 9묳zetp%cEcVO>9}#&(0gَ4 !-fޘ&zrf$O= ]zO@j]u}e1;}7Hqc=92q1Ys6lbc±KgM+U=˲ kj|wxA:fH^"M@㭷b6%}*,*4e~uQA4@B]ܧOoK/3@o: 3ɼTę ^ w9 d7-l/Y838#sS s{- "F ںhG$ ,Gqq\V,MU$" " Ժz@swЕN=ԣ>妛n"/ĉą6 C`;&Jp.#eis^f> 6O7ɍD,HȮBghK6'v88 >DF1HaSӫC :a?'eA/q (@(OU9(l%59Pe@9cz̼ L5[H6gI:3 d /ASy9ĩ]e ÓUtfb ST2/(Mvw`NqH璢6~xFl>V^֘K(w̵7͈OL$Vl">ﲉ'fV,L.gc7&MjHUIXalo%r5r֣uCE{ׯ~/?0.yɮ9L就k(c!< F3Zu4l6׋d1$7rPtX2X>?!E5>8J/B~{ =͓"a?Ziv`3?Sp$*'Mv؀˯H_{YO *a" " "Pu̙a8;9z@LjpNͪ2J8E3UXnGX4MzܹgeCg5N>>Mt+N$-$0$$ Ám .ZU2::uk)tؓJ ofL4QzHSr`<0v83zLCDBլY))`Ç~<4uID@D@B7 o1;vyF \3er.nӜ4g%7٘-R'o e"GNb*zL@j]=~y5SqLD^Xfj9;[PX#R 8|S6rP ƧVX:49rf?ڧsye4s;:TV5#>QjVH؁$j AO#wf@yq_D@D@j&w \/#783Dͧrfx^x12ь\r[6!S4c/Y $WpRun.zȑ#좀P j*QYL93f$YZuEIq',T^ӭgr ݷD$mj76j{#FpXO,0y6hyY΂Ȣu%D@D@D 2:YmgdYWb-A`葃}'xٱ/qs6#" " E@j]mWH!C8p {275.,4!"os=[4Rٙcf*Ȟl;]{뭷X!;.1Ħɍ^'jc%5i3*:"fΜVLf=V#m D4jPlZw)" " @:yV^od =əv/9ev<>~G8V0:NI4jI&(L|F%HLN <'I' ] 7&o=^y,^y\POrjvTb:Z@='zdw[O0=qiT뿰`3w˂V$""Q×НM_ek: ={6uK.Ń' ъl8HJEF]qƳ7[ONJ#r ˴[ il&-HN8̷_&I)OքHȏ~[46ݕE|8x&;T\T~%p*=."dJ{lA^>R^p9QD48H#xȅGAy!'L~X1.-8,,db D!0\B|4ִ`ưtС`)Q2C:u*" " 5L.K/vmC ^,b˱nŨQx_ט`ZU*?WiCR{GOK/{X '`E`[ LcÖgDyl`/frv"-[C.jq9*wI$~ z2 Җt2.i&[4# :CzŖv?RRBD@D@Ai'3W9WwhJ83ٱ^xҩLJOTUYD@D@rԺF05#ѣGiu8Cm-.X`3{նm[DUO\HSvF8Jf̦!`gjtL!AIx&Y<5j+51) i/yf߅iK뀤ڸdr`E rj|t|blޫGa[(!" " u#Rc¾Ug3аX>@cmNS^D@D@DZW:C顸,OfUn (CqT$c|MdW_}۷/z0=USʰ \J\woA OS$ewL4iҥV^V؆gW^%׹Ӝg.[Ť]۴iިtk֬8EEELHS^D@D@r@ë:7-U"k\~8p Bg-xCc 1&J"uY,2iME?F\VгaTw)*6>}{B$" " 93Lqe#nU>ދ93xggQ}@cKU436 }T6mZ"Mc-Zv83<[o c*ϢK.L3fL-E@D@*A@u[ Q⧢q͞=t2Q<ŋs MNDʮ}_|E\VC(Ľi,vg{#MT Sܞ]14b<~r>c<KS4i҄C%2;%D@D@DrC9XE"83\EzHEWGu83 g& ji[E A7G}4}ttFE[ΔXn@8T讚l+0<OQ&L%e˖q;ᏸ9|pڔ)" "RζYeSʦL2qD<]GTF"؇ 4x`V9jw঩n$" 瘥'JSu'i 5jvvg`" ?:3cOiH.&/y"&/ (7HxÀ{0_T GWVd{y۩3ck~,;adppK3C>ױ B.|饗pfPb S={s=.=zdO_"h׃ iB$'M=ɗ<',ÓB͜7o#Ϙ1V6ԑS3#9x^x!>'[@ N<&pg7ܦе ;DCae4̇~! _Tkۅ 1.n tRekwgD&K3ӄJ"Sό=ë~KΰLx)|&9Ԁ >mfv93NSA=?EpvIP @p4,'dyEč!*U>e۩3;еkW4;gy-^) o'|w'eͻxԩ83xA3tfYo43K:wLC8W1d3*KDNs<%<=zLeX]5鈛#a!u3O[ qwe>IQBD@DN>U'̕"ppX07\(5bv@;MTýl 2X=kܲmF+nYfݶ0RUB>$0I*3.O_pIaW0?L{=Ļn &p8~aRv7G˦iL>3mYW;D@D@D pJ[-}xq1ڇ3kԱ.c46w Ս|Lba2^H#Jf([Ya߆9{ 8W-Cn 'Qs~ ǟ0isfe|ыOge`@#==쳙)YU u2|e Bo"ݻw}2d>bks9npmMHt'xIrG5,*m}늹DgF&qW 䓴bi\j?MN$[\&cDOqO8`g<# 3-ɷ(GD@D@ ӦMc99V%W-G-Jek}}J!0K XYr16lذ2]C>#BG(4,L[f ӰHsXF|X7$[>;q]8bt폤#%.Z|HDFVeS&Xg+!" "PHsL,'[lntݙA V-aMKifJ(sIvb]{)CTu*_D@DNvH}!b2 sf*>10/15ŻIeSf}"8%m*_:m?.»6(E-mYMCX H: osisRhp[qI@`޸>u2XD@D@K(ϋ`ϐJÊDx%a)Cޜ9sPSM+q !uw,If-HCNGӧOGc)Ka 28%cs80  dFFy$Pl]֑B!02> 7-;-aez&  ᆖ_<9J?g}\xśtƻ?۴m`b|h`!.Grm 4@3`$b|Ȑ 1W?￟jEHUc 4xe:45>~ 0DqX yxPs~5̬P:|{ p`y?C"#~³B@# =2\' &,J sI&Oд{)~bAl\:'bXHxرL`͵ Oצ3_5TulCwn;(2w5H;~N=]Dڍ漚kJd)?+^ ,3kLJ-W^!TXD@D@D=~FL2qdi@IDATI&ߟԈˁp g3,ũ3ggbΉ%Jݓ`m۪ Ӓm!إJWE@D@D@dU|ʌ cCǨpf92= T Y?97] [z^`JcK8В&# /[XeȐ!|_=|刀@&PcÇ{` l23%̉'8z#GfҐʈ@^خUY(qL]t)./]隩 3&$QF3^Y$#Keո'sڜ{R+9_9" " " " fkؽ+3fxqf"U3c"ҝyvdҐʈ@^Lؼzln`%KFʬjg{19 el:*+ӱK׮][D]lZuhj%ckPdB`Z+KgZ [`3 ȷAG梲C۵ՠiL @Pl]>tu9G 0:Rtl6sL<]6,cts!.;fm'^\ɼlwOUȜQu83SN;wAl@$>I{ש" " "oWss`qsm2-ZǨ5v?Τg˷mLlA=^Ԧ2" " " "9e՜%O&TRD@D@Ժ|u']z嫒ڰ}w^|ɗ#" " " " " " "P..H`˲|pۺl5ӰC֧suh[ jZWYrOfKLl}]"ùus2SD@D@D@D@D@D@-Qߞ#s [$OIu8:\# .מ<"QoUȀԺ @5زtWj[U@]% >9-" " " " uG'o+g],ZEJD@D@D@D@DVYTTD@D@5us" " " " "6qϰOҹj& =sӞ'ukM%W?[@?UBD@D@Ժ|~u_/Y&M*!" " L@j]>?}]D@D@D@D@j@9cZ& <#eu/49G2u*" " yH@j]>tuYD@D@D@D@j D-()I' . " " " " 5K`UlG+-" " yE@j]^=nuVD@D@D@D@j>.Yų>j%e@Z?Z=':E`Yj[/ D@D@D H~jAD@D@D@D@D@ɦՏW).@' ?buPD@D@D@D@rɯZ4 iLR (!" " yH@j]>tuYD@D@D@D@jO{oyPN$aTBD@D@Ժy&-K{my_o(h܈S>I`GJ֦j[D@D@jԺZůE@D@D@D@D o0AI 5y3:]IIWRɫ" " " " "PV,ՙ`g%k:&" " B@j]< !" " " " >.Y*:%)_ik" " "Ժ4ptID@D@D@D@D ;V&Tg͸`G4ZD@D@DZWמFdS/2bNm7ܵ* " " ԺL#-[wD3)Iyʭnu@hܵSE+G\ݫh*/" " L@utd@~Z_[eRr6" .z+" " " " " " " "ӑm" " " " " " " " E@j]~=oVD@D@D@D@D@D@D@D H#D@D@D@D@D@D@D@D@Ժz@.ZOGu[\& .l/Ry" " " " " " " " L@j].?&" " " " " " " "_VoE@D@D@D@D@D@D@D@rԺ\~:MD@D@D@D@D@D@D@D H˯ފ2utd@~Z_[eRr6" .z+" " " " " " " "ӑm" " " " " " " " E@j]~=oVD@D@D@D@D@D@D@D H#D@D@D@D@D@D@D@D@Ժz@.ZOGF]VD@j-lظ{¢5xZ8cy iܥcmٺ@af/4u8cΦnڧgջ7Ê߫un]jsS;7?*V+M`۶m1noK^骪m}UUCNӊK" " " G@j]U" yM`s|[fѨ[ܓ ׁ?⯏]puӫu[V5ܥԺz_+n:񿿗nӖ顲W]ۼ`ɒdɼuuиWj5jX*n^5Yʣ5ٳ't.!& K6-?iaXhi>W?G&i#jᙖѬwKn"~!7ݷO~&Mdӭ~{6LQ0bѣBj~~lvat&yrUZ̚_g~}[vLdm|M\}s,WYJ&m׿7?]+}tv矖OӑZ;W5Vח7ѕvXE*_O' :^{ /qKŏ*>?\+'L0ϨYSvavd]cڼxᝏ,[ٰ][Ѿ{Z?n^⮇˪*,`YO׍θ{Wo+ޒڰ W4ԁ$Z,?My+'w:0m6`CnY=H@jݎ,H;o]&(9[׬ պU=V[CIJ? \8mn\/[o=D=E]‚+/)A念/L'w듏Kh@k'& CN-%|b/t[ݺtyY/VKzYȣ^}˽e1232efVr=[W._,bˑd{-Cڳ?+ì撍"lh¯3vG!ҡ8X6念^CXi~pW.}ԥFzķ8ejݔ>kj1=d;_~ YyE= ꡲhٶ~#9ǭ]״<ЈIe|Y֯{~ w/Z04QC&w2FD|,]ز9*gk}WerS&NW1G" " " Iz-" it=?}ޙаU +I`ETנZ%~}u흡T@Xm."R^ߡT޵aCί"! 㧑Dt%7rucei\j}1|6ª|ffμ}  l[a0}fWt/!mPKuSOXlJue[Knb_Ku/W-Oiz-Fꇟ^0'U`~'rmR]\{KoDg.%;.5٣jdVͥIdY R-,uF_"tΥ홛<'L4޵cX 7\X6jC,9 "|j*,ۧzz<_j1lM]n93=s{{z'BY9 ح_vc*hV݈ 4#im,ՠ^Ćw L&'rWȫKJdY0M_P;&ڗ?,*x"ؙ)ʖ&V- ;VیN 3Agyrui$,ze7ju0oYċ!" " " ;hD@D@I-i6;hߢhaQ|yRU-?ʪZ $+_VVbjqOx|a┉seHٵ3fۤZ/C-[sk^&`}֧1ϮI)'kX^ngX'hk[8t9WX^g4{=mg7Ŭq5N 7R#/eOKVMAblRY@5z2uم)f2yވ˷[K+7O6cwXVʳjl`?VyS>صcVyRD=t[,FdY-ɱQUaT4sdDUi N# $ 3WÎܛ24-WkmvejyG~ˣj]xV3̽ Ka"[bm+L wK6hyi_(ӍGt(hupzՏle]4Wl6Pfoi}Zo(~.~߷zO** Z?d#b͈bm*)_G)mmx21f '!J :$:a q/. ~uۖbV1.PO|Ԃ6ڵ#P6N,>Fۯť2 =֯amfk~@VvX*5+_ yFZIDQlI^M#h>5;͡f4m&*ehszde)}WLtH(?Z/= c{,U mCHZ ş/ff8 3"TGղo_iWוn AϾ zK~zG'&\7HjDս%K Mⶨ:_Hqo K=&܁R\y}6g2.>.D $] {e}kغ%k* .FenSb[z-XۻOD[oԺsR T/fuFV>]\De61(|?{Wvz;.Qxge!Z\l}4r_ @Q剦}{5it ,. /^.B/n)`gtݓyOYݛz욺]. Fv(h$,khؾI_nD '‚u>t9@ B:2`MO*H9k+uZ"f6;eFL_ŗYlY, KCg*ųJR}ZhVEqy(mQlsS{WnSvj[u>\哹LʗNQ?nq(OD@D@DZW$";ulhX{i}0+"UHcZb'r:HxEEo$Jn=$սa~Emcco@XPb魫zfmk4 yv&mor;-@i%2l [YaM66Nox5 œʥY?ᩥ+VvdNu>4&aW lǘPIl:nJʳIdr hXIit ۴ֽ6dFv,O:?nKiZD@D@D@Ժ|x@wg.1xfEj]q_v/͘Z =ȵ~bѪʗW|إMo]U5#9oPIKo^%ɨ_>]IϮ^'@˖LK8HN{$ "y[͋Vun)2JdOagVۦ{qiLCۯgзRD_R]<~3؟w\ևKgFJܹ)iJ~%i=HɺnBݻ&qk͆itm[Vt@O{']>Q*œ6G#-[ς_ n,3` `S/SD\|{Yo"͑ Tbjk|e3uw@ LpJ錳 Yg R8q2ገ OZlBջnGk{7/oV#麗.ِ8h"K36c{lYEi3_rW&{0k>/C _?vcG[ H.*'{o/vV=Fb˧এfYZhb[W%6s}䫄׭,ci6;`oVu.{aU1/rMܛZ1ݓK7T~ve׿ȼ7ŚQL5{ lsQVƽ䥝if$y7aWD@@Ӿeh_\q++}ԥ 'n5koeHy[W$fW˜z=jԡIvqg+-V~{]c6h䪸:*g7En 3mʘX.T/٢^mUm=%٠ v,ڍ3fK~G0](/[o\⥕djPe]=FP_E'E&>_/WIد/vzx{}ɽȣdՏ=чf-b|-!27Rh>[i+??ՏM^5iק>W_O&jv$Ӌ.f8eT`EWU&y%D@D@D@D(N2֧fOkyPlBy6oɰm6HW6s,kȤ?8oO&2vYi޷hu_^m~߆[`Xz]tqCXk$yO;^OOs/Ȝ\&1|rNAu2DlSVe|ل=_L;};/hƍ!//UNtX,hVgP mv'Wf֛z@8ClgQ=O>S,؆umml/:|-Ohux mӎ?]Y aG'J6,l{NE@D@D@b퉫" N͙ǵ9lJuup?%S/njO2 HCp7#U/+;u/ulnpGX~-UJ%0._I gU/gm[ۇuIٽ6}:/}AA핬U{x^ 2:whWof醊jEY2Dd7U ۵iv>|F T){lxw&S8\EDtd9?[a^U:+Y9{UMfY7/Xy?_Ը[{59MjRٟOo^%wǶi:[H<u5\-@M\^5@\~# dRw[fR^eD@D@j"vE@D@r@6A`ji^ N)K" " " " " "CI" " QM‚n @D@D@D@D@DVHjTD@DbX/*-" " " " " ' tM0uܲ͘[6K`Ÿ'سvMJZWitQ ͱu_1: m>hu9|d7kiW;|t&" " K@j]>{<)tdrQZ*@cko0WgRwD@D@D mVu@ f.U%" " " " " " " " "P [zWD@DXnc=VRRҨQ3bܑl2{lnfzY ]*/˗/?~uc}gcǎ934:w|aU3" " " " @@j]5@U" " uY{16l̞^z]|Źc[E-O}hk|ӦM 3"{wvju5$" " " "]Z.`3w]tiULҽ" " " " H@3akM5OD>ciBm~n)-XvuW֏;[h{ƌoƗ_~zj.\.]k!nڴ֭[C=ًVO}:ʝw ۨ2iU>E㒝RՌGc{31; :nܸ+Wzyd&8пF=`>Eg=4̙3=\Xg_< J"]pȦ\JύG8k5`'R7Pȁbꫯz+|򗿜qi%<o -lxʬ!e̍rJD,ve OGT?Ы&jab…7aiz$TG?(L/i%\K.yN$A% ǥ<%.oU7;O_C/@3r^&ŋ%z>k(y1O0!̱t2 jW|B*a .y[$^Ds>}X/g}TZD@D@D@DPl]]yRSD@D nQH;ژIp]%Y#F _[}i v69PYRd&Zc=FPFIbh|ԩqgDy ַoߎ;҄WS% *l`e&| Cu"0tf{ma"}_BwU衅lr%Ii۷'ږ;SNrN_AU`h:;ϋ/)-" " " "y" " B FkժU6=X*:Q[$:jݛoImʉ|s} gjiӦgFb9g;,0jZ!1J9raÆRlm۶gu! 2؊a_nwmH~f6/(h~iv!)K n!bWX|Js󈽉gz;8Euz(u|eJ[rI'٬w-Dte˖n9J Rc" " $h2^+9@s19SB<@W,t+h;#L0XjczD`DE]J:v]76'T7Zرc ˲qU.3y'r[Ͷ#L>- <"DH D#֞it`Buf%[!7ϴT)k9F)RE@D@D@D@uu1Hl`]6_іV81|lNQ--7biQ\jD(u9ͶG G(.q焒FbۊHy;hvlL|Q*j]XoIN0#8̤N?E#,LUPeR%""B7Ls$ӖUdAHvu. ǖT@z9k  ,x+ɢXsWق J|uF>~N#jWFNC-S_0.d =/J7Ha+ G,Gz.-RgN+gIrb[хznlae&u\d@5 Q$M6vE+eZۄCkya[ BбQ+ zI+WS,lhZ cLV ~| -fz:\0'zHe>,ࣴ>#PSc-cc#1RFy~VV’ [ 7enw@NZSCƈ*_t ߀\#ʘ}ɴJ+;~/IHB> G>Q]4o{aC*ojBX}0I1bVsioQX(n@L 4H܈kpX0P M33GT83tw_=cp9Tw"{Q]D4\ׯ_Ŀk٧#۹ܼy3g}6,)5hNS8vZ4$]dNǭ[_9yqz.t|Jr >=2#rqxX.l== @P=_Yu^zv un|!Lq<,Rl5 yYiO W\9_B4g6# @O))$@ @.=>?3$@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PuFL @ @`.H @ @h  @ @\@[7'7 @ @@1 @ @nNn  @ @"+0b @ @smݜ@ @ @E@[W` @ @ں9 @ @  @ @usr  @ @m] @ @ h @ @(ں#&@ @ 0 $@ @ PX8IENDB`barman-2.18/doc/images/barman-architecture-scenario1b.png0000644000621200062120000067455714172556763021614 0ustar 00000000000000PNG  IHDR^>sRGB pHYsgR iTXtXML:com.adobe.xmp 2 5 1 2 Ү$@IDATxǥ9tw *}z^[y twwlog9Nwf)w @ @b@p d@@ @ @whh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @: @ @Bm]\j9 @ @ 6@ @ @[@@ @  @ @ @ .ť& @ @hh @ @ uq  @ @@Q@ @";wOY'/Xf[vnزs֝%Tx2kV*թI.M6Y.d+^rö5v޸}RDU/ۻMM+ʢVC ?(wPO`/ f9.TbZK׮\Ir˖H3}M&/-ߵ- >dI>! EQ9g߿K… %R <… ]slKCxB|N@JgBmݥY::[]}g0<ԄkyuׯΗ XfkPTs_:>v}L[!h ? }?gm.E>ڽu]^)SE kPIcn(]A#Iys?ο?L6O8si_J4K1b?6dpP⛷QkM-R8v#4<9K\}l R*D`޽?&u;ߟiN@yyBN@|h/|~#d>dcg;[^٭oOЪ|V@Oz᳣/xft:8Zŝ/~۽'Ԣ<;{cNAK6^74gx@"ںUرkNjQB@! Hrsc/Z̶ |s ߜ _Lg9q lپ[|5˻ul\8_ޥgZWo~CJ?G[˻%kEN6OV$/Hf|?O^@h `SJ`ڭ\7뷝   ۋqOiO}dX C\qsf\ք̴jUY8hO/q@ں(~#`Ֆ+pŦ  *.Lv}p(h [jB`>v 8@@< *_sʗ9vhQ>TOY!AS<ʣxRU/[JZK)\H{J:eUWjW:vGmFR%0kFt3.[Y^|D5ʹ-^Ejƒ5[]RǏ.Q,)=o"@ں|P@|WW4U s۪m]?~?uB܂@~#PbaJEy _yZUCUZ:_iegPc2ԕ2[( 'G"TX;mPmvWk~uOli{e,oObEn8jJ(z\:]<55Kxׇ̾fqCv ZSL|b[Wէ~i̜3@xT/H$[eKtjR幋ȑ[F++(;.QDсR$7;t`,û5+_U3[|wonͫ:=GN3Cx?uJ _:^ˑwBsA|}=cXyǺ;4d^6֪TZeor<59nmYq۵^2usJK&! EeâZتn F&v*a#nQ.xeӐ<{=WjKlё Vn^vwn]e SU0VoYarگPJ%eQZW+YEz Ixkʂu~8QW3zڼ|?Eps/}6x2q-Њ2fƪVzSOݪ\cKK kQso?n|A5mDnzRg4nbj?6zjNVўѫjPC-UI߰u|?c7 m羚xڧL+5k'cIOB:Ԓm~j)$) gVFG"ʪt yn hغI#lGԎˎjfLOu8vD4}'dk\{\*a U!l:dtyZ$8U.d08h4[F5OcB{W>FٮRI/~3ыIfPF+M':{}׭6,u:,U vy`63ܴVyg^q]zr:+6ļpingotB[B% łB ?::UKH5g7Ѐ3ۮߏkp35Mᔷ5 YRA~0r~}֐NsL9zC#d4Đfݻ4$;f. ݻ_8J_v уTLQWnq.͔D:rD 7ѧ6Ϟl sMPX/ޙ3´1R.L-$P0Oy{GpTZa,MvjKPvSV:LP kGjg~ R!P:Zx.!M)d2Jltx.JN"Uv/v^v=L~gg;+~glpim.zFeMmܙS)zu}4`[#3QA9k\}vAF?WЫOo[ߞ^Y+uvA(b&ffs0R/lk2x}r_ūh|'UuET ̵WÛH}pT3Oyt] @֥5 tV/{ c nYm ]2}CVa:T8'\DY7@ˬ'>0w=]a1_}|S2lSݺUL1y "ɏ )m,rO,z2[5bf:վ> 9T(],hnݱTosn\k*m99NyuǷ:S*-+ N J$5=a!Uuj3G9Tl8"nQ2ܧ]\陻-FWYbKpsc_,E}|?MAUg hV)KZ c&"ʪKHᅑ]N"|Wd+=K,IĴ_ffj:Z|#ҩY; $MxUk̶mgASht$5 ?KjkTݭbX]2$[B &nc#- ! _ ˯5K 4&קGt6_5yncUg>62Ei:4T*tdFx4$뫭I:` z33| cO'mu61Nv5I{NR3G yWS cC-guM:ޜw!J Z~BG i];g.d5 RF|Xs([,rE֢J!n-d"'tۮAuK?~~^d~&jM$duiիlמ= :9UOcOgӃ"Z2sҁd?D*Ǿl=AT +@k-#@\2(2<;h{q-?SKlzfvH9T.>ĪN9aWJIHMFڧQݪ[+/{ayZZwVΪwIuMwײ&K'?H<菩L 8߼Jq@:y 8Q쎲s9=zKsu5tOkulӣE]}Uɾ)oپK|oS// STzF4vȤ3hYRH[h5eփ/aUq]'< Xv[z.^vy)I[TܟiO;-ǣ\u̟o;&N{<)=)Xޫ\[F:iٍoj!"#ӷ&h F-*Y"S^Bk6(]WaZߺ{Iu1;i'u::=Yhݺx+?/r-ˡ%# 9OVA0; EƜO)\gͿөvJN2hX2S> ֙]Jzk6s DOTh I"N8d =HbvyA#kQ~unZ>Pw^=V-Ir:r?Vr*;]a]?!uv^rd3oiF>W^B8om{995o*guǮ׌W.TB6Yn}}ǡ:G&;nR¤Z駿NK55ah]9&a-PO"yj~A΄;\JnsEn]r5ªK>yA'&Vs+a#sގ5t ҤB?C=eM'*W*ݵ"+m$7Q]Iq 'MPW?3JPxugvy l] +ZԓlX;kamKUgsMl1m7ȠSVS{Իq .U- WBcL ~^(}4"CWUiZzUuVnWnPgoWu9d-ކM=-딗0|62ۢ -0w+7/XYGm>gHNFsVHSHv3'f>C.QD]۷mzUK/XC [wz  -*aGF4[:%eڒD'KdkEXT )8og9կB[J7C-2dK)j֯صyc;2')~ˎ獶oXɬ*?kjB6T:UÛjˈh֖s71߿=ϏMb^9͐w7n5]hMUR|N@sh\ozhYjw.9ms1t\Y#-N\ĥOY!k0%3hU2(!GNKJiH*[ݚU Q!4RfP[6:% iux8ӟT5ޓ-A?~m02,Ztk6R}gN@#.)%ʟ5C%)sM]ԇGh^f_4_g Kg= !(q={ Afif ϟ8̮, r nUS41KV[T&R~)X̺Uh”:ØloU_S\DV]٭dZ3 +7/0b+oժ\;ߖEnѾ̶Og雉iZ'SO6Oyy WbEdXzm4!a:P"GOh%͝NeʰzKftb+51h]v1ʾzau@A `/姌ȗ1g=}50i>qo5Ox҃ozs|6Tul#n9nxcܮݹNɷmoq3hl0rLY!rǘu+V$ml<Ueߐimx}2V#T+\quUraݼeټV'ɄIszlda)&\;QR&כ>>@l .U`H&ZuyO Jem[wrPȅ6s,,I  JAdEλu7$6¼ѾA28¬[-P8ܒa[j-aȪ2W"&+`;LN4%]*+F}Z)@V #s.>sׯWI 5w䨧.ة#I׌h'tBZ^T+{~_kQ rƯיi|\ F:~RM->5Z >lR\%sȯ@W #hՖ;\dUujk4E'8݅giqN6)ң#΂ȴ9弌E9 r)Y$Z-y eKZo-P"sYURh\d)k+!pR0b+[uEOǕB>[={io?wv[%&nSM|/٠ݗތ| 'd#s]d;w&8,^ޭi?uPGK<ɴGN1_slܮRC&-aJ[ 2j6g٦5yo%c^ra4+äZ(;W8zQ'Xz,ٻA[4ݦ^O1k:&xסpӝuN!P+`Nq qך^FuRbj*4r\?~!(;5*ߵnH`-Thh4իU5P u١p'ǵz6VBϮͪԭZ&a0@ 3rA~rsIW~Q"O_)HU{eؿ^j͝6;nmt_[MV>&ּ{awɘۉֽ>t60fUUO o){Jy*]-|sݺ*,"F٢ґ7>-)U^t7ALgd4 kj-n* bd B}4xtvx9av$"[(ۛT^={ شW4^VnY.mNt~o^/%>Mvl\y5t/ yƵ<#A&dVY[7Ej41m>4hgG^RxY K+ vzsȗjPog!j߹j`q !m' -S$8]UV6X? :oDȠ+/e%? 3~9vI]sOZW4~mU13V%ʛxko[,ֿ,C*&eJ(L1i!sG|9vqBmlh޸ tdZev6 GU$ClQ5H[LeÚ2Ayؑb!ަ1mMYxlCK:4?Ni}_<lNѥYA3uN Elڶ+nVW3/0zUs—PJD\WRuݺn) ׻K<:jyCN^t=5ʠٷZǾ@h `S|N`}5e-s}^>B4 KBf>3aߩiDy"hl*2`БSm^5~M sԲ.w>Nޔ |O*k_M0}&/< J'/66$ \lQSILM\Oth̿>ҥi^,'=8Gh *)mCwtճe5%dNNg>6|>,RA&V 4:W794t/np=7#5u)>o_R/O=}ߟJMiyMiGj-%y<; j݊*͒dn2tP?ߝlݶ{l뵴nxsC;}Mߝq(VĴkx:|ȃ#^pzr4=9kib=YEcקuVtO :&5De砅5lra!wuO]ڍ[wjjПjڳuvH>^*K=M) d :Iܴ6hi6qmBfG0@ Ke ,B  S)Ծa%!S-V0S矹+l,]qX̙&6[;rR듻+Z]HZۮ~el8ШϹJROj}ӛbI+&y)2,f+qdZZk%o~77yIKyً?k5ǵh۠R %eZ?y3'uvͱwlQ|qǶeSiu}UZ ҉{@IDATOhuzεZp;υV(8 Z3(0}ݑUof` ɔVdGF} ;ˏJc;ϕ=*'%oIҾg+Ew&)*#\2i4&&iD*$P 6|˛%(;3{54hT@)֥GOUhPyAldD68w63ոvmm>o7UE%)Q gr[/jP;{/-ڠ?4(K, ZDD8\>˷͕Aj\^ieO}A?eG߽}&nigA[1ZVji-a('3۹h X-,3WkRFgauz܃',lf䨤2ְsmTo[W|b2S+NgAFd\{v3u}0X+_EߟBu&oCCvu޽NQZ@$|; ( Fn"6e:Zt<ƭ̄ɪa!||+btӃxja1S7%=z.-͎\IEQ!5w[Hg8wg-[) 9KQv:}\elΐBR\[Om NO_sm/T4XNO)};=u) qJВ0Ī_"dEŝ2FOqW#;,[LыmTMG[uԲaM)xD+YuǩmwʠGL]1; ܋|>ZKDͣEdUizn:.9{}bR䕔dwN'+(&!mbtz%=I,#AӱQk4IWriC"tM t^ki[@ 3 C@:2>-V;=U) 8}:ULC"gf{f;swX[Q U"mp8mu)5\yEfD[TFdV"m*`4n:ϐ u?}a'CdoV|cEYk)y*}7z޳5V6iԢ]?Lƕ2j4JgІܫɓt&xNdkכ޿>e>&#>wyRF"@ @[G+! йbs%ea4~˕*¥]dnxOYy*>J}D7-ZLGyNnRgVzeiW%a-*SX23UƤt9)}~0!}F[Y# e$vUePg`RWǚ߷q  H(i_w|KXm5݃'3HBuq4Q{֮A"f*JI{Jt3hH`rzI6 ޸oR>EVk_+CE#4 _/5I9 `$PhaMz%uF9/bR9[nk{2ehۣLgvNسtvb}3)N?F̔Չ6EVkiOGlFV ZS|,Nی&޽G&O98kA[i 驡mgfwfU޾ml)JnÉ w: ?Om¥=y}uwٹ3LkCf̓!@ /@[k !+j|9tE$RR5G25:\|^|A6fB4iҽw̕NZז®{RShj-} (Pu*Fr׮5zWbEF[ Zq綯X&uJNo^}S3tب8t Q{r5ސJVtox9'-fz{[ymwzߵΈ;惹rwȸ! >[d$$c"W(Y~_G-KÛX[)|4ʭrӨ n=u؂fw+h%phXߥY>j0f]th4]gmXS,+j;՛Ɛ9/ܸ: /f26zӉX˺(?μ;c?*RWm9͸0a^ѷ(0[pvb]feSdw cNSkLEd[!-kߨey$:}0>6|ꊄ@JC&|̝ESkiCGsYU:5[]h0a[g8hM_qG( #z]zd4WI\)!崂}I%fh䈦vwG_2'-:3zֿ˚ V4{+f䫱g.hw SL"uĔ"Ҋ~K`[hз\A4(;RŋuF+n=lnRHRܦ:d?Y˺j蓟MyvTԽӰi3r|Nzf'SjE")@|@pyXqƒ mԩjґխZ^N::[] \QҼܹkLZOy+7ۼù}Vt*.u{,h%}/bu[nOje,۸f5˥oS}؅%v޶#v2bU[4@QmTd2Q'v:QZ@QS FS F7gz-bZ%z!)5P}ؤr_I٠4es׎_*Eʥ*w4ؐE 5ϑU$m vRrl۱{m2]Vr)K/ two|#6QVnܶ9`PظvFؼ&޺cOYzQnWVod^ڴapʠZ>)Ϋ_<~Y @֥(@4޵gO%{}ǮۼSHhq-K6R Fi+-*MLEFx޸uVf|)椑%w4U%&KXmWlqL{6o%ŊV*I_TLNϋ~Ǡ(Ӟ9F{Fq md tӕ->()kkZBTe` `iT'䕦}ec1A#kBA޼v)@Hkj:N*7ΑU>TuAl@!qAII!@ $g?1ҵmӎ+3ag_z3lt"چ\AE|Ngj (YC1!혈 @(-;v.]&/9X{xDe? Ed X@[@@ @@0̣5w>\B@"N+U3>n}{[~|<]yw ')9Wު<7_>k._~4,Nnݺ]yNܙ%n;wSHi}QOz0`9sӧϭI}=ڙNz^~eO0]ؗoom_ڎ͛?S!ٿ/Wwu˓K'E-ҺQʚK+5U*A/ٳ'a֭[' OPo45N?2)sv*ƍKˑwUL߾}O%\!LH />b  =k2j5]prpA!Ν;w ٸqce/‡~蛾^xN2e׮]+演;akt.UQj;vMSzo^jq͜9ӕ<:tpyڗ{=o. `;1m4o HcBe^D + v2M@@ͷ&o9sx㍚ċlМP@[ ۢE O|.0/pcucu믿U̧~UV.O.S#ԊXY [ fϞRթ-[zs{JN[W\% &7( X\9Wҫh;f q裏~Z=|^y啠\4ө?_]I[eƛ7<(dohQIBmLM}tMɪmvv,2^&D9NM_޵kw}s&%[?*Tp8AEp./}4jw:#NIm)>kA NU?$so Ǽ!U ˗/ 3.Ctax\||Ab\{eyi5Hpm|:Qj^]/Zp׺% t+M:$y+JZ9S+S2'.zh"7pM4cnf@zW\f[&M-&g4hPdId.S&mJhS+w#zK5CouVR'4e&e'|?MP2]ӔdP::֩lٲAwSTХ:Q,wmݤIϟ?۷oZ?t sݝ7owuFYj' ާ/;țL2LL:@ڌ9FL@tB7S*8*W7f &i:wM,XpK5L2ڲa5mVa,b -s3we.m~JB$Xfyg- \$Տy#2WtL ȑ#z}!HO LTN \;qO yu)שn(u}.gbۊLzcx_sOe^iJt"uZ^m]R1iwޣG^ZC'{";/"|RՅe``ep<'L#0}2$D+ }Kxq@E 2ę{Æ /2/FλG{{ g5ޒxR5VSQGխ[7_ko0qb%+s9$M4AJ[o RZS_#BmJ.Aez禩 0vRf7Cl |WhUl[:=SOM؊l /X;vW~=e^W^ZaHu9SN󡳒J8qS*uPxQ{԰rUvm1=CU"D={l~|au)\ڹ, 8]HltiBK~I#;{Z<-F ryr-2ϯgyz!+?O͋8=[n}%>dzBoCո*^N'h)>})w)kq[^svPrYgӤ#)] yzi~|RǼ\V+RvZAFSP-{3mK믿nR-(KفJ#0z_F,1ymy ߝ\3Wމ[bv;ei]e1떦]j뤨PN%+ ^يd K:}|'v:|{fw+9/;m"eK2O@Kɴە2F߄v}|t`[xsB$kK/!o"@\+OiK-t '\{~K⻍W2X?_y商t}<䓲ᛩ4pUZ)by#z9x`cR&s%"| Rh6wzSHZ:+}H+JPGy<"wEܣWϥ Oצ$ZVȕ.5ݗ^x Ίr&+QKEZ#/.p&"` c.Ί7K;{u&ؾ(jkEWÇI#/lSh]J0S]FXjyK]tmFr~tӥƽJL޽||UZF]g%" JD{HESO9.ߦL 9a`%J¸}9#\_b}>l 0ani÷j|C{3}7'ڹX2 Zϵoz=}Yx]&M'8aK8eěsy;-.VͰ؂$I?#OJjP2{KqN{7Us[keU3 $Km]-eAl`߿wܡMzG w[2c]jMTu cH.Oq+һ W UY֨EF?lRhA4exZZ>|^QE Z&g.aLv{ݺ2Ex>xX>SLqz|ø1\zZ3eWVO>kw ®J}hj*wY$p2' )/X;ߠD Z:j{wq.K^7)iBY֙˞~+|M|ٞ. aUhۼ^rt6 [^| s8u_|Nf(]S[vO9j6(.5 鵢2diqSֹ3ݙzR7o;1#/YE :kh`)=MKx32"ޖ0 2-kl+_@R .gkg~ZVi|SСAu`.hS2%7)yZ7_<~%Zwݟh[_'ekA8 }|vYpjT5FuvxouB?f`Yi+hPsW|׾YE[{jq[!e+MߒLÄ+\(;ڃO :@׌9rdIN^|E dut]͛c>|R)]LP|>+"߲b+e_m::7픲$ i(c K2)@[2:"f ҝfBз&A۷Y3F2܊r}Iag],_lzN39;ܓ3YRqU8<4XCR($LdGԏ+"(w_= X^VK~Rl cW#S3݊7|`LeY a=eCb^U!Xwuj ap'@\ C\"XʐM-C4s% ^B*t]ROڗUQWxa oZڽS7j3юtDMW _Z^PQkҥ/`×poϽ/O#VZ`^u 4zpGWr[jr\S;8|:xc1&vȝcXXH?`dܣmٕIyA)QL1lV ʇ{Pؑ,{~I*lTGY^X2:2pǜcaY6ue/".2nBb`;uw ‡UlZx$qzy['F 벉;ź3zWK5l3$6@? 8I\}o:|mO9΢rݩkF_ ʵf5{#EAE7V fm;2}Qw#{gC<<*zHY _ ]+1_+%& Qxc,#>%D(:i(ՅP1bMDeŨV?ӥ/Y,^b R 0t餭ニ3}]Umfb؂K`X\ٸأ?FniWԳ%yi;ʴχ}ːSxv|u;rᔟ|p%Ju+_Jxü%#يym' ̭!"RH٢T'y,TE@U/1zpp/7@OMI5k9rHr-C$ҳG6bHS}>V.ESaYYSVI .!. Z[Vu3Q#Ű.V+{IU$bttjbwkQëeҰ'}'ZüZձoKC`C!m]B لw\0baΤ1L< LUuD [)!faȈRa bOX?{"bEZ6β.Kd#hV}#$}lh]NuM H@D9-UQ2]cZ,jx!QfXx2T< d !K]}:;B^U>UՐ$Ҫwup A. -ۂ)ͥ^38HtRGC+.5M嗀>hU{HZGڽS7ZnumTu7jz,aB#Ý2! =Uu*b"KW8̻I?,q>ťuM)T TyVwlP*V eWYŽ5DYMT:!"SmM7ݔ&$Y ֚<=3i"эT}#{F qyez65$v4{)ՒSV6tZjZW.HdjX{ ֱoKC`өk:8պIt]yX:2}q\Xᮌ7xSIScԅS]2yVnaj+#5v3ew842P)rXZҗjxhzQ>cwy1pt^m;TYd锇4%\!RE:c$[N n֊u )(} rܟzVC0}T`[lEnK]e%Lk]EZGڽ#7Pj4xls%J+b^[zd-Nr6jK (N1J 1lM{娪դ brJYYTŊվFG0m\!vޔ! ӪwwvБ@ :3{eEY(gY4Ƭ2&nӷK cq~Y5a4Y eM;2fK?zk0ka*qeŔ;N^WxtTպpʾXe Ҋ4d ֫X;ZyYiQۻjqg%){թIml9>z94>BG)9x6 @DdF|> KĔ]Xd24֣l}ӵc8?RMl8J5\ Wˬpז+ֱNz06tڡzi # CFAYιx|Ng՞Ve O,tݥl"5K)R|wCL+V(|vqǕX4Snfe0eeFT>fj41q&DwpSmaYZ—"1V$)R.-bxP3:#*4/6pZh"r Xe@\y(mؓ.hm i2m3LmHfo||yKrI4{#Vu獘bwHUTm/$˻eֱ6VQBȒ]Qx-?%Z,<}̳\!K)SL3ΩOU mY8ȴR!Q;VI|޲1|+GZGڽS7j]ݘ >5+U7V{썥M|$f'e+7vH[EjqX^>-@Ź!ױ^jnL{QTG|Ț(ϓLj $Lg͔V{hG؝:] >e`8Sˮh1+!yƮ>1?T~qv5M};Ց^Dֽ}`ՒZW?݅,i\Y!ky_&3պ>3bgZirw O%L"?X8r ,L?FX}6"bHu&CLdzi5}ܯ-EG㦛yi/Fzˢ\rIz2|+ j5DxQʪ` Z'o>O3Ѳ xXJ7{vN`DP24uWqnٱatŊի\o1Rk;AKnՒOU$ޕzK)M@Z{r6 M0ga[a]6{+:`,|`!z5pEU`C$򙬟] T5챣TEq-btn,YDjVM'&QT)40O٬bQ|;m2fzOdѳ,XRCFGY/.ߢؼ3bq e+`FZ}UԠ)+[tl<:|8,3-ÔlӫD+-W/ٵ1ZEeCvmP, vҨ^DHwYR~IJ*vuOL"T]3Gόr]APY5j 1|`celJo}R\|Rof蚄)!8[śK.nǙm]V[]{:=AU{k7 ّR#ע2AxfC>:ƬxҰn=ΑjWWuLWƒMHCW3zK\npo(v#HX: 7 eY$:^Bne)eSyb.$__J-i`lg/^ZJi%~گW]E"{VTUEHՍ:bXG%yRtf[#m y( b\ݷc]:{sEvMȮ<Ԩ4#&çu`VK9ʶw泋O|(IyU;d{ ոnʳU .ѳOXy啫8YD"+wf^W"Rk%*j>PNPHZT&=ϧ<˯|ʚªinW q[ձg*~)+L@ۺ>3bgT3礓N'?Y+{񞃸Ջ(!$mQL<r_ |fNNmԐNUZ}*I- \ [s;S3PYR=XFG*M<]+N[VyCeM74gwy'gr5tF]t SR#ֺf 1Vpd(MXg/) ή>=56P\ʪaK,B#@Z۽#7*R\ܹԄWNިIFbirKnRH|k"?Li:\NK0 6qPYi7 :8dױ^j)X1K}T''f96 S@r\ պ_?{*S|Zwqay/ʁR$9֥ U; N%Yi!?zrtpT0ɊѩJǯE :Ս&8xa^cKC`C'Z/s=w5'd5>{?˪Q>яF091/Px܌t~_ 8>&(=0#](|;J2۟UKZ{ \P">`7"ǼbPl?և|'6ܒjV) 7Xw~%UeBlA 'ybۉ'X6=!=ЪXe4igH3"'E $SnHQlY5%dUiŶI`9:8mJ+.AD *V `mu;r"R&x駦艕R~ q[$$_B4VZ\gMJ͈/;d$Qk5>Ī6M:0P*ؔ|YV>fIuS$L;qݪ ۗqG4\S)\SrHmpBD.JԺ4j.4X7Vy6() ̬ tZԫTuᅴVk&6̫Wx{ <$6)'պ~4z !֔bCǯaojTV?ʸ<@g^{g<0Dyo9-R1W#&񩰴8 :2>=Qz\;aiэLlT?vT=Яjۗsx;蠃(٦<b@,(U_}CFU&c˃mۇ4(y76>';C`dթARe3Ye0Tdçi0ɴ` U9sЦmf OUi}klDHICjĴ.|SJ/S(\;3GPx yCVX=ij!6j]հX%s<]ǪahHT|TMr\gypB}+>f`ՕyxLU{(<1K? 0OeችyjfVNPzuZ S^-cu \1ij?3GüZ1:̈́e.4w>Jf3ΘiH0$Ī&Gꩆ!)2I"تՐMsk5ְ`*/ <\|8:aRgj$JׄՏGO"U:[unex Z۫u0jr^z RkQëe VW|}bD *#ixl*@ ^Z `jdjiJ.n'BH)V64dD󤸩qٷGGVnXQCun]:+=>wGo#>흂Q~w4|>UW$g;,U8@ghpe+gJhzWG]iPIJW^l2}!KJhD$63cCWm7XbcSJCG^jUrYՆ#nCYbWV*}|Š<ܧZstwI2T':֫Fs/MZ~?D\(i#:Ox> uo%Wa훦Yc59Z2JZJֵڦϨMnDYԶEe}&JFJl(Xo(q-jxLzex2s7ɫWulD&'պ~4zp}g׈>RsU],.>^>Ccx^KXa=5JF(C+':zΨ>d[oua(-QXUO'n-T+Z,ìro5 zߢ̔Լ1+|xVk_T"zr%x=@i-bgC uj4eAeH |W$XduF4U`Xj&'uWc 6mWT|urbc0Uظ帎;#R iV: qZhRYa|k̳aJ,jN*VYl,wj4 עW˴::nO2K2u7̫Wul66iuK'g ,X j$cr&و#c?J7_ץRd zӑ&{1sّN?Yx[=gpK10Џy쩷;C[ԫRUiYLykR̍RkiaY`'Z@WY^>rm2–jlIbpmsֵojQ'@nR[-aϴjU&sVl˲i\=\́!/b&0wi'_Wiմ2dXT&RmʲPsS kƬq0nJMBeT^{m2YW?3˳v\*s⽝5U~Ŝr)5 /{[jú*lu{M5L}z5 |\7YjccLխ`Y߭zᑲǏ1ZYD'Vw۵:5R%o85)g5L6˶u|nwüZ^$C`ǖh\ _nyU?.,ŭ2 q!ʗQ2m>)kU̟WV^\C♃ ;$Nǎ , ?8l zȈ&H]LY~Y1\NApĴ)z] V>gD`K;:'o#|+1&}.R"vvpAd,e< N|LX eb_;Eф Gev`!9:t 0`fH^Rism1"fjV&ͫ;!Rl3FqBwulpuPd҅7&C`3qгL@n$  H`&~w |#92Os˅7ƴx̡=- H@`3)ZGgN g%  H@!rp ejZzӝ0ˤ:$  |)3!p ,$  HcX_ ُ ||fgy&˘yzصE(ju$  !M8h[75E$  t@uw5c7Df?Y ̛ >_%Pu8M][ & H@`p%Z76Ŗ$  t@ Xy; VK,$ 6hY쌀3a3 J@&-zMoӟwiF4xl=+e@<$ 6tYH@.! H@ 0#s9W?яfQ iJ2H@@`)3 ;]$0 ?sO*Ьκ+n,*L/%}$  H`&|>ޓ$  H@, /0 3- -&hȑ#*Z' $  H`B$̈́juv H@$  H@$  t ׭떖$  H@$  H@PH@$  H@$  H[uKKX H@$  H@$  $  H@$  H@$-T뺥%,$  H@$  H@T$  H@$  H@uC$  H@$  H@u H@$  H@$  t պni ! H@$  H@$ :$  H@$  H@j]吀$  H@$  H@j}@$  H@$  H@B@[ZrH@$  H@$  H@>  H@$  H@$ n!Z--a9$  H@$  H@$Zg$  H@$  H@@P떖$  H@$  H@PH@$  H@$  H[uKKX H@$  H@$  $  H@$  H@$-T뺥%,$  H@$  H@T$  H@$  H@uC$  H@$  H@u H@$  H@$  t պni ! H@$  H@$ :$  H@$  H@j]吀$  H@$  H@j}@$  H@$  H@B@[ZrH@$  H@$  H@>  H@$  H@$ n!Z--a9$  H@$  H@$Zg$  H@$  H@@P떖$  H@$  H@PH@$  H@$  H[uKKX H@$  H@$  $  H@$  H@$-T뺥%,$  H@$  H@T$  H@$  H@uC$  H@$  H@u H@$  H@$  t պni ! H@$  H@$ :$  H@$  H@j]吀$  H@$  H@j}@$  H@$  H@B@[ZrH@$  H@$  H@>  H@$  H@$ n!Z--a9$  H@$  H@$Zg$  H@$  H@@P떖$  H@$  H@PH@$  H@$  H[uKKX H@$  H@$  $  H@$  H@$-T뺥%,$  H@$  H@T$  H@$  H@uC$  H@$  H@u H@$  H@$  t պni ! H@$  H@$ :$  H@$  H@j]吀$  H@$  H@j}@$  H@$  H@B@[ZrH@$  H@$  H@>  H@$  H@$ n!Z--a9$  H@$  H@$Zg$  H@$  H@@P떖$  H@$  H@PH@$  H@$  H[uKKX H@$  H@$  $  H@$  H@$-T뺥%,$  H@$  H@T$  H@$  H@uC$  H@$  H@u H@$  H@$  t պni ! H@$  H@$ :$  H@$  H@j]吀$  H@$  H@j}@$  H@$  H@B@[ZrH@$  H@$  H@>  H@$  H@$ n!Z--a9$  H@$  H@$Zg$  H@$  H@@P떖$  H@$  H@PH@$  H@$  H[uKKX H@$  H@$  $  H@$  H@$-T뺥%,$  H@$  H@T$  H@$  H@uC$  H@$  H@u H@$  H@$  t պni ! H@$  H@$ :$  H@$  H@j]吀$  H@$  H@j}@$  H@$  H@B@[ZrH@$  H@$  H@>  H@$  H@$ n!Z--a9$  H@$  H@$Zg$  H@$  H@@P떖$  H@$  H@PH@$  H@$  H[uKKX H@$  H@$  $  H@$  H@$-T뺥%,$  H@$  H@T$  H@$  H@uC$  H@$  H@u H@$  H@$  t պni ! H@$  H@$ :$  H@$  H@j]吀$  H@$  H@j}@$  H@$  H@B@[ZrH@$  H@$  H@>  H@$  H@$ n!Z--a9$  H@$  H@$Zg$  H@$  H@@P떖$  H@$  H@PH@$  H@$  H[uKKX H@$  H@$  $  H@$  H@$-T뺥%,$  H@$  H@T$  H@$  H@uC$  H@$  H@u H@$  H@$  t պni ! H@$  H@$ :$  H@$  H@j]吀$  H@$  H@j}@$  H@$  H@B@[ZrH@$  H@$  H@>  H@$  H@$ n!Z--a9$  H@$  H@$Zg$  H@$  H@@P떖$  H@$  H@PH@$  H@$  H[uKKX H@$  H@$  L) H@N7޾gmi{zv|~#>ʃ^[ 9L> ҆kG=F4{yg|y&|, ya3O܂Rz؄mpχ.;s$a$  H@&D{b-$  L>~g^?sU_&xGWvۄ)w>RHji\pv$>';~ݯNk=V* I-Nog^z"5=u]Oo ϊ r7KnN`ԋGaکqw~/I: 6OӐ$  H@m&&*?1׍|1ߍFGyfoBo_>'dŪ7WpI*^omb씇'9挻B:wL  H@$\J@ ;9-}a]yӃW7rORObN6@*ʤ:\s4M {}}t 4)K@$  CU$ I_.{鹗mA@3{'_9^z*pN6Y[5y)ơna,?ôcznι &N=wv[懲CY1?cǾ{ꋾTSbD[, g1)Ȫ$  H@M|mj$  t5v9rr)&_lC86X~ز3!G_G_$֥iga6]i4Xpd{TWff~%fgŖwoƈ<2vj:G](x>+'^^xn0&;B,֏ܶK77(:_w]xL譂/Nany}qhnEgu9?n|mqRn˝yțcGo~՗7iFkG8S]v?o.=o/͖{3~_5>{#/ʛ1̴&By_{w=P.;xf~"7?AWwZ{={(Dfpǵu݅M3CMYzu{̋n{^gV\hmt.cucg]폼-̫.6CTDaK/ihq6XdiL!L`~snz# }On[~mR.:.|0B 8:Q/sU7>Mz/v>\x5B^~y~v(f!~F!"I4O̜߿_i_^/Ui=CF>EFD%ՏƂ]|(c+av=[h?n? o9:L=T~f}:~uvkkox} y'ƙb n[6od(RDbHluwEbko3cvKjZ15VSG^&!17ry'OVc*Nyݯ~Kz]? 9 9}DC<08JW7?x+B*a~n|0=]ax~뻅0CciPR!0Q[C05˴[Uг?" ˙T=5WXhf a])c˾Y Å?,N8+q )6Ir>F{bV́ht.#Aa@0}8YXG~|Ol({m2;"2]GCtZۯ98xŹ9A c NM=ᇹQ ӇQ $Ȱ}6~>,O.;1-W? URbs6myQ Z7΂{ /xm#ޢHꖀ$  H@huM%  H`P Eë,ccfq24bStu/lLP`(Da#8U>TSlw?>noVcN1Bʋ3cYjY}]32eL['fwZk ֽMW=,B8^xg\; ~-:|v_7_.X;zۍz"[$  H` d$ !&0a4CQy_3cn)þlNs(n`,>L'+ _|xt<8jĶ6 +l-.5ߌQl2 >O=?N͖{wخ;F)*; %;J=}$kfnYpS3 +7Kld8Y}m>0~2Ugi2uɬR.=Nɘ`Xbӽbg{mn^ƍn~6~ϓf/n+iD$  H@@@n $  L̢+ yZV\x*`X)E^~});;~~zݥyn z躛Քָu8[\[扦?lari!E!*(v ?7<seb8z\Dfi6Ii;b'Asv{3tS)=፷re~j잿lڕ)iڍӤz"[$  H`uJc$ ~`6^hSE R)05 ǔSLgŰBbT(Anl-4ٲw<:~.)^{,K:.ʞ^o99Qv)]}̜Н$Dw~(z,q;_w !wO`K>8%oscY}}dSgiT1.F1LLFH )@Nդ2fEn6pт7͔SVm7PO H@$ .$0 g$  H@A ՘q#f!@t`>RU~q]v`a?_Pܥ'_fVX Kպٸλ/f1،M,xx_~5(e/>Uץwúcy莋m8jߢ?X 4&rC/|fٌsp~:1XGZq # H@$%T뺤!,$  [O:p{ADG~[݅w*rx}~篷,<ϸnF+̍mQ,v~mJߨ2^j+~y,;$\p˓?;s\YV?~Yιoa#Zѥa󇩦Gcc~ǟ\m0ha{Ce[RpxG?뺑!}/춫Ϸ3O_F-6x0box:0#]aYnO]߯|4JuXM> )za/F[c}fm݂sL4뎻)0I6sL'\sC]W-=n3Ϲ7PIiD$  H@]N@I@t ʛq'E{hEfC GH{ʯ&,Ţփ6]f1C{1{j_<{BHB Rk,1;*A, E]aYoo߼wK/<|ax'0nG](X!]pqee87dJoDpcC[C6ey;s/~WVU\PBl{#٭B,0a#rҴ^BG(}{ڕ.XS/lD d=J[:/ L96}Ͻl IIӌ%PuRO~W8! H@$/e$ N@I@,PO¢uSM9/[#Iԟ.yw>RD+7+܍X~{t.]-j@ZeYcJum+Lb=zղLSI`Ȏbs7[g9*{Yr`c|wZ?4wR}jpvfbiBÆC,y )vI c=fb1h(M=?w8Z#2;ĒЋTZzAki8'&?=n^mͻqC$  H uYB H@-6u}90l&dN7}w6Z;b.fk.9~f"B?+=#_q9ʐ_~9yꢳ~eN=4عGl]WHw{,ۗvXonu<غwxHplptK\-Zm,켳U]A~jGʢ 6V̚N59aX}-wxV$  HuC+X H@ l5~2dh79S.~(T̗P@ [E)DN(巜$  H@Z=|r[0 H@D`N}=φYj΅}c'Xb^wP#?S$  H@2u%  H@'p臗쎧_ycop-OfyBdJ@$  H@ gNn'9÷m鐅PO3θKk 8xskj$  H@G@ۺckHu~xrLo>պ>4g? d}香b٧_rޙfEl4_ f&,?3aJ@:K@ /|\p駟>={|H'8Cf˔%  H@2΄̀x( H@#p]wr-!M6$8R]23UW]FcNktMGPK9.-Œ$  H@)պa$ &W&ӅR? /pp?ch:/a]C]B`ꩧ^kBagyK f1$  H@@@nRhe( H`|  %x뭷X.[/|awgI|m7zҿ 6:{z`TkrCRUVhR5Ue/v|I!>p~W$̛o$d41b)Hma$  H@:BԑtMD&qa+ `*vF8w܁!SO=f2o&:KO6d! I l^q뭷^q=܈#6l,:H_W8ʏUKZh% }D+~ = wYd_| .`15HieYfVBRǿMdw?g-0p vO?vCܙfi[i[nz衇('&yYp-`};cȝ?E]Rj2R/Tg=&iíڊB,G.~c9Zj)8gFMH&|ێVk̀:6l4&`RV<# kBiӿT061- Gey晇Vx㍩QB;>#p-6lXpS`/nҤvexClf8}cV ƿt;5 Crp`\T-~g,BXc5h40%\.tQhnjA4&iR~:@h/78RP$  HT:$  H` rRAH:! ͂%H{G>$7t1/:`8CC1ANiVYe Gb_jC>{8T m=QAm!J%F )zva3Z!xHZA#L*]xᅘDQC>O hHiN(a^4\̋Bt._|1U>G d#Z8,FB_z6uc|bPbyb(Ewy'BaSYկ**=Q~F.0oyR]L .A-Ry+$BPE˦m[dXQւTD!y~?K8 MTKE('ަIt:M\$  H@@ h[q&( H@,h !0tNL._At:le c+|&q11U#O0Fc>,:BRďئ710xbFMMa&VQѐٚRK.+cDŽbvNدŐXx9礨d}m1zbpG}'2g#Rc%$gb",P<с:Bb:V4CM#( F!PG4 R2wdIE-+atG,PjEaL>6nN!zR6GjG}cc N .hhBpyaZ\Dw _$ e>4AT-N;<$q,5lÍˀt5\$o?ωEGE(c^^NbL;* Z!uQ1|Oh߲iӈ5+>}3+3m?x3Ͷ P5L3F/xJ$  H@m($ IS|F~2͢DZuqL2Y SJ<=Jf1zA”)90bB,vE[l^ZB-tT v!eYd3Sb`&]`s.%c! VLTQGc=0'7I!b*hc:$  H@:N@HMP><̿CkŰnbsCfkbIEΰoԓ#MvenRHCc*°l88d(NE}'ޫA˫% TUf^JbPN] 4_!nY0<)+NE9D18dsبoVn-Msl =A G.q,c%EH6 ӌ% gaꐀ$  H@uA4%  Lxj+jֱ?6j>(`.i,ّj+(/ b)և")KcjABe]~h=b1pp;s5װ^ կ~5iŋTEe'Ho Dd=] ,b8`W^6Ec-ܞzꩻTo[TQ)[-Bl#+ Q "CP`6O3JWģ$  H@:E@S$MG`x 3Q4tJ\͸c1NĔuY'àIaU׉N$}4@3&5?XQ-"NE[UKZ (sΉIO tVd4t4Jdv!nPۦxhR3 .]4 lpqe|x_1/βsȱ䐝[Ìo]r%(L.ݕ%z4i^9G!h&\Y/$  H@@g Dgy$  !+FQ>U6l.AqS& 0Ї,T@@ɤ:a%Q]TJv'Xr% *!."~N{ʱ`3"Eܱ馛FVӡ[qYLJ=bĈ:Fm&5vz a깨{LdfuL X1d3a9iZ q i%  H@j5e H@.thO!ºl|J+Ta"g?tKtѱ2|[L^wuSF[삭x`,!i=RE#}s qѧfb1 ;ņn},@ :a("}6%?N{tUU P(aXj1ҁE4h!I8'OS:@!օ00fZM6d*S$45 @{7]٣`BusD.)Vo gj͓1Mx;T`7)a$  H@@@Ќ" H@=gc[:>Du9!Z@{ByN;8atTI-PLNl$  H@"ui2$P'ƛ z[b#}ݗJu,1 M7v}x%  H@@@Ќ" H@ -.a)[o%E3P l.:tn۩,& VK B0 / J Jgs15 H@$ *諧$  o|#h۝=ߓi==t) 5XP/Y~l05lnvv0yI@$ @@Ξ  H@K 99M2LdۨD~F)DХWKi薀$  H@L $  H@$  H@$Вj]K4$  H@$  H@ Pdf' H@$  H@$ TZ$  H@$  H@j 7; H@$  H@$  $Z'$  H@$  H@$0TI@$  H@$  H%պh/ܫ O^E1$  H@$  H@G@nؚac! O.I@$  H@$Ђj] 0zK`$0S/[lE0$<&Zd H@$  H@I@nlWk5k. ,++. H@$  H@Bu](I}'j-ѣG#{fƔ$  H@$  HT:M$0>ORVVu[n%g Buy_$  H@$ TƳ%GL1%vXzE:|CxJ$  H@$  H@@`n(e:H=^j384찤>|87|3ZqHw v$0I8SyXZhĈppM?h:/>ABJ@$0P N;"uTqi92pB$  X;|wyg+}P$  H@B΄Bf-";xG)JG {$M+%4{AE%ƈ]AUޤ Xaܳ<Ͻ[3gvvc%(-C0 C"p_T͞8 XB}:t[Mѣ/^k׮mv#!Ʒ~K>ӦM5kG1֣GtASL?tlРA66lPNK%8nFp6%C0 ‚@2e3a_zݻwUhk׎sM~͛7s… u%aÆrJ.]s%oɳgfNrX+RJE-lR {G3s~3gj m&_{5)w+Utiɥkpr /" o,! Kc۶m۷'O`9ھrjv0 C(jװ!PxRo[n3g>;-W__=,:)Gu=gL'63=!`@!EC 1R5j>묳vzE9hҤ3<SO=UBhG'W_W|-Z.x֭[qil=Nc LRwwm6< vg!7FlC=wf!z.!`!,6P"@رgwڕ%Ktݕ7eO/ $G!`|.]ʎWo"ܣw+m7&<#k^Gu:sXv$׿~3O?];{΋gDcȘ1c4}4 !`!P0o]G6l 0lذիW't .խupݛZIᧈ','p2OH!`@F` X)QrdjJ=#Zfꪫ&L)#/^vezn 'e8L'Ϧ֮]ki C0 y ;f<(Ԧђ".]4Eqh#Kk!`jzovotRM֭K:O.'1"n۴iSvmÇڗ\rɟgOQsԈ˗OTi45C0 C#`޺lFO_~\7L;'_ObŊ_~9)s|p!/Evi!`W⠧{wQkC@'''*;#\a}pi#/oF}I# n,S[oo"Ǝu.)GGOxI'ɤ0< e֭Ӝpi45C0 C wf8PxOX=믿>".N'ʿmAo;SdIH8e='2yꇲ!`!cS&XeUF26Hq3gz(_pZ!t!uXW^M0 BYąisEǟ>[w'}ݺuZw%tv  ^z|' ,8 gQפ`;&C翩nݺ)E͔j`!`1p"x8 ʕ+sx0+‹##m'N=TiK8\(+\hl C02I ̖-[E/˲ڵk׬Y`O9i .цؑ[ڤ8g̘"PF𔌷zRJf]L,є]tYre܈ 4@' C0 "y-s(uQ-[݊ЬY3~zʅ:>?[(fk4 C(tt=8͛{rY$S^gG5¨ZYq>lsa2;VzI&~SLΗDo0 C(X&lVZ|͚5H𾱇odxjtη !`@Zjf[vZ$zC$r0T\uXz(&3~uLYK]lܥ4~ /ʷAK #qJSZKQra5 C0 C!`uE :e4ko§MֺukOw$vi!`>@RJy $7N*QýVXutLI- .},7|mڴQpӦM"W_=qDwP;<.ϟIraL0 C0 ;[WߠG,_b?'gʴj֫pG]p5.saFL"?@VG.0 C0 +8f.R]N ֭o׮xQ>3٢wY-K.իW\6[d2'tR߾}{s1 7oλ" !`DছnbLزeVj[4'\íˮMwG tzI_~yt7 y뮻$e;&⬰}vܯ0d3KKURqFɞ0 C"`r۶m_lV{}W8k̳uHlݠAp؍=ZҾ}{-W{VniΜ9P-%K*tt&$GuӪU+ar!E {4C0 l!^W޽J87Uag?;evڥKSt5{t'ܴiS]<^G}4rO?k-s'M/9t_(w=Ы]P@ؼy3y rS_*IS̆+_||z-3gb=!`@AC Y*h-?pVAk鐶p)SD!lA#K`.>}y9vQm!`9CL2d//)ԭ["}ٞ;ƺe:5v6l ]^|!CeƯ~ֲeKwߕl{rIEA #i6ghynF8L@P1%I )!`[Wނ!c/_=38#@i~޼r 8l铢rǺ0l=Add:(4*T/ WjAjx nP[aOAQu9s&alɥMٗf!PV, Kd%{Yq')b[(1M5Ŕ}ǕR%!c[TN^th2:N@CTZUA"ܹs>OM_h;ŧ [)X=#gdO>;qvVꫯr_HjW|0'5 C(Xݺjlbi!|OG=,-[SdKwƯ4ixw}6ĔJCpStT#,NUD"!+좿r`ܺ!`:8"bqa޽{B̝{:jYh>A*g&zvcAVfMNh-)m)%T#B%C=VZ8\"n;,ya@>Ⱓ`3a%D%Fs0 C0AuQuw„ 4 f dƥ$7k֌F%K֭[j`2~)%*W\nJ]!`@fU'38‹.S`X?;#LK_K-tF8ƍQAnTaq5wt|Dud?2_l> !` >[~=dz_H¸l@9b[.9:̿SNx\%:Pm.=)!ӂAKFxhm!wXݺ,Bh~pB9;nĈrE_J?szтj-B4 bq#1ތ+PԢ OwsJ I10 C(dyHwt` dna^ 3Yf͘1cIih>MN>!C 3Hj&1 C02y2Yo2tI=묳C%?@ҥ;jnɚ.!uDݜ9sؼysBdTiq£9Kɰ9s& )IxX-{јmܸ1xE9%&<2 C02@8LF&SЌQ=n5 n|Su-Z i~@8ۺu+>gCc˻X C02efR3qn CvQjPFQ~>`Qj Ag{."/h &nGU )íRG<W &\uqm& wi&ᾄL  sG=$zv 0 C(&d1 |ʼn6f(u dx:6$蚼谂@cp]vXXz5`Bfv bn/dw׳gOƲ8 b&4 C=[{ BDuӑ߂3ax- #:K>q3hTu4Hƽ !` /6lo6@fk$9SNiذaB[Јr`)3K:&٠2rH ԅl=^s5(-5 C 0o]lyl+%8Θj˹WxԨ.L!oȑ#94mgϔFG.bam!`ר O۰a"=88Hpt"Jf} q  0`8e@m2 C0ryru1 ɈniӦ\*upJDSDޥ]܅ pnÆ^aGI`S91KC0 C(@f/_S2p1nqK0]92jM(Ґ[^SE.L"'ON 3gZ"5k<LVyc٥!`!]n]v3}C@ QpuQk*uv!G=ee[s-`YLDSSR%1!`!Db낰( \p3}r8.Dhwn݆?vz,k!`@A@ '<859' 2C+0|v TD --01d!"4@@IDATm@f`Yu2Cj&1 C0DuiejGJ.-׿5}tRzuGݻ7 \ּgД4-OٻvDZ^4KENv͞9$-Z߿V>h%|a!` 0*ܣ>J: bp4G+WzUV-6 Q8N{ҳ]> zKB&ra/ SVjՕ0 C0`'*5!`S煓$z-1AUjС~:m>yYn8[< .Qs 9s/S.&4 C0 ‚1{?>>W^y2Cۄ 3ժU#e˖#_hGFZI32ǍaBd&gp<:d\} SKC0 C(P@f]xSO=E0݊+VZLfmꩽˊ[QR%D A!zKKt[[#W2eh*=8 ӶmO?]ӴKC0 C@xl@@MZ*N0jKH0nr(KGhjVEp7!*3 2g根ψÎʳ9s&I;Yvm18Dt&&1 C0 |Fe"QoבgH&3Ds()|H Õ@Wiө-c6l'glBA=$VIQݛ={6̭[Bc 3x$1BlMɔ C0[W|޵=iAGyl2kB"qu5i҄ Eɳ AaV ,G)#FT8qa!\p_|1!Pd_X0 C0,,=xJ]2a8 30`d9!*'MtDAҗFP'*#rs3xjdrc!3l7z2aÆCadF k!`(aH8AlΜ9|c=F>;J,aBnSwޝ:u*_h@ٻv譨$A[pVH*F༃=> &0`ߠ5p-e3!`!`norkzY"?u֐Ȁ窣P -Oٻv譨$A[(H3yca#6j ܏[JdP$%ʠ C0'Vxw{t yS>U&LJH:tFZx`($l;Mw]}7eSWR`( 삹$ nƌl_/XP;*RS\Y!`!.RG>|83u d2gUVȁO3H3<!RvFdr{ 3g(DƑ S,\}NpՇG{d4 C(Vu+a /s=7vXGg ;cqNAd^A!]0f  ,'sѣ( Vkvje&cwر{7pK~]k!`@^#iܹvۘ1ccݏ[LX3 V`\ 4jIŵk׾N 3a4N=ԫ'KQ!`@Au]ۓ Psb#K9[=[86J=:[QIL8뺧cDkjNΆ<m M6qE.Î-zgMB 8jVa!`y+kF<ݒ%K8`8QX!0x] ۍ0PRG-И <'@j C06Vh_{ n2sL?0˺u6jԈ1N}e?:݊^&]O)Ktg'x)pV8=qs԰[.i.lc{9}YժUḖKDۄ!`@. ^!3qhs"JrE< 3xE!3Bix$%2 N$ ѵJe^ تd&7!`@GƊ+,/FeW>Tu 57]X,͚5J=kIXZӵE.Ƶf9\g%e|vqD]i!Ⳬ\r(J&1 C0 8Xrι%c)$(6 l~O!ɬ@#HkdZ3yAK1Gfw7o6,G>#35_C0 [^=C?ݬY2evU];xy͖<[AaT$]wHjb)ۥ!`!`c a}(偿 2I9q"녂ՂBz99N:W( |Fڜ>Ü{}ԩۥ!`@QBiR_|EJԑ@=ul6i҄M6mڐJc\s.)3Mv9-b m G+쟂ͤ-Æ- O zQي2!`!Dŋs~oI;&LC #A] n&vqd=8&Z9qҔ(Kˠ!dWoLRR1#(2~I C0~]Qz6{C "ו#d [@xtE {LH#u*3ZzqvrY8.$K.T!VqǎFi.|`'N\`nP}Lf1LG!`@1G2ñms=ȕ#[nvNYkJ[ ёy <A6Y9aP(,ZBN18nP7xK8tdOa4 C!i,bfc(`] .on~:& "O . j$H+ ,rٸi=3l2`G3f+gygcTv\!`\u|7L]"s;SN×kX0 C` 2C)N}WKJۿLq'IH$ޥU^Aaf !s'U%]:dzv%ԅ[.JpH`mC0 խ+ў#@-cǎgh"ح!UVe:j0CsD]uKB[MvFw"7ǯ 0g`ƸWkh.'\bxF`tb!`%Iq1cdh$;B]@'>ZJ/t>VϥqmJs`>. K΀TRM!`@,RaB.9s&0*!w"9^zQ gA7_PHǨ<*q08b0Ng9:8͠T>~ҥK]Aw޼cفcdJ0 C0*M'}W 3If @:6 gT0N9N-#Qv_ϔwts> d_؆Ȧ_ -@)!b!`g?@my;#m}8F& "O}dۅnu ʃBDQ@frЈ < r |2a]q;|a@FuNJѣGCȄe3a6QaO>-Z K\EvNBP.Bim:aNP.Bi$XVT-* rKr+ڈ8 ^i&ٳ+v)SKJ2]zժU#$:I C0 "ʕ+_~2C~ Hn޼9wdYknkҎSʵPŚtt[6!3$p@fx5q>czB#dnݺFf<0 Bխ+t&|` 2?s):Δ`aܴ.]`TmOFOQ"IHPSxrvML0T~B3N:E`w-[ ĸ\^Ӌmj)hBC0 C"b!t~2:P򌬉Hġ(TNNJ>,҈@"֢X]D䠝{ | N2C|d&}2!`+&Y -A}' /}:a8j 塪 ,,땦<&CGͺdx~F,gwڼSd7Sv پ^d pbCACs&1 C0 ,+g > kQqvd0s>L:us<29c':9h6yLBdvB^%gLg*!3$|j4OZ͂-qWo:{5&K&9<3 C S.SH"3K >_lDRb]{HIKtݻwK4 𤁲,A! Z.uC,xAyPvr<ΈVN6='mKClJ!4G1;NX㧋1|™2eʑG_^%zvi!`4-q⋣FbK.Q!w 'J^dQYy{ݥX0harJy9qg6Y6]!:td^%77 ku%Bu|I'tqDZ2#y%j$ X(OM>9꼕6l*ڽTT+W|SwkӍVlU\F5*iP#ZDS6yg~x2J[EJhX|e[wLUb.(HTb_b5kt> {. U)88B+VMaW4KE?(D-*BP $]R*5SZNi6΂ B, 4Ge.)Gj8d9 oAB>bƏ!-86 C0 5r(':etdm۶̰ad][r) A! AyP2Ҏʵ$#z2bP T]}} ڵkvzI1; [N?i$ߏ2N&kX(B$_ tꂗ?\K|^;uJyN}7nw4Q>7 ʧ7g|tˏmΗVo[{Vng?~}}k&'I)#O-v <QuZ8#`޺}X"TUVAsY}%uM9ZEf 0)065mRf6E_! AzRheYbM7FD[kkz([Z){b5]a+V3Xt)Av!.DԈ@ %C3X6}Mh!`'XΞ}Y\u(N/dXj֬ɢ^uZXzÂwRp"D!(¨M'#ZYq=yxԈx-=4-;5gDwzq}%B|'[f ͼ}m\CfCe4hoΝ;Sdu(+:R '{ɫ&ypZrZ:qwjuX~dݎpрu[|%iL7ou/ϤRچ@a1aCjKQ>Dչ.e.iI$`i.q=5ȴ8f8=m^":8.r>\VᴍF KJ1 C0!`![XpPꁨp\ul=^}zJ_cT{`A$8}ݗuS_FY}7gL&Y%#1w=LȞ1^)_ g'9"D@%J cW%XGfnʄ3eKxm#FONu#S¶ +/۰sԌkϫe]u-ٺA]_}`otݎ럜5=b8yr^j6 D0ydj| ~:lxy8`[nF5RZPH_-$<<( ERrbL)-钎<2w|x}ѣAтD|=8 'pr$8&4 C0bY{{̑JFgB+c925"ĎG"Sʃ Z)7='OǬXfG;Wa+[ӦM9r$.9<}w G;gAb%pW0ܝ[&zn; ]a~n"n}y'cnذDিQB'6ef7jy7:4.?Y9v\‣xpsJ8]1C˯n،'|w9{}wi 8[ɋ,rl;s޻`v܉0ᡶ]t9DlP+fX2UJd["!ݵ\I#ASnDFoE%nДrN&m8#YA4iB%CD%Yp8N>|OM!`Xx1{N)%٠A#W]p= y(:(~ЈwZ"BgMʁeg$7eItIGY,k}tȃgW :u*d&A '858fX.EMҹs&CYu+]طQq)onF.ynVJ*U,[ʛ*\݈_|]2 㠆S-]ro+7kdH| vIڋWaƲUZիw/L\^v-8:eyOgh:OM]v뗘">nPZGu>C>w~Ƚ%ƒ3czݙ\)]᫺*yc; O<[= l C@0o@a \gDIm޼L‚Ǯ#6++ DmFS s8E(K#2D?AGzL)l;e7YqIh.%8 ">\r5Ǎ#{SN||k!`~:N8K[M*u>-Z%QAGչiZ]V:ҋFoIΔZKSZ֚RZ-]k82Î2NBfJD7 !3 k0ฏmt/U{o-yA'`_X xK Spc|׸5[GzrQEo_ eJd˭ȥ\:  aӟυ|њGLY}R:uPy&/l{8!U]{a_Tq6zy4ud?\ί)~d6S%jҥ]U'Niq~g+ۼc7}k!PL`v,[*@ 9s^S&n iذ!Xj:N25ZS[с~%f=e="`lYƈ˙e18`|CUT{5hAL{m!`),U,Xt/̒@f [c 3l=}>zg %YYtl֌}i珲 07193cYe,zM$ Bi<2MlZF,d&-|ܻ,l,Sb:wvct6=򍻎㘏=^ս1q~MҮ: IJ0"A!SJn,ܢ #ic9g zK>ZWsqlJK \;'RnrF!`!6}O?4d#X axdN982C貞r=Ճ)G'#c[)YN)OiYf5vB7Vs@'zKKd8gMni#8 N0[-G\>Bf; Ϥ _o+mLyRёKw~6 OUWםԼc*7D1 {?F[܁٫e?]?8{K=c-=a/\se\:eXoUϿX9n5Y~pZG:帿#o;T ߺ4BY^[cK$]5ޟϭ ԢnErfn_iK6V>nJ k}79z☎4MnykP\BY($/9s֭Kڈf0HPAyP\rJ!}Ŏ($v2Cˆ/TfÙ*?Xryj'M^xT'E;f 6^nnJ|-[F a!`,p!d ;K_mi7}M&޺#W }Q}%qՍs~7n8l}A,*p_??MݹG{GLYuaF[FNu}i 7 }А.9|O9hd5ž"(+}]#gOG%˗/Ǎm6EA,.ZW:|ໞb/7+4 C02d,Xs}'wĽ'8 #ڵk㚡DYќ\uyXn>܊JGj k^zD9xWw3IGY,2*BZ4 v$#2QY8-Nu #b2ѣ37IGߝXZ{WYu~Ngϖ#ԩ}iO~/ZRjﱰqv-6vY%{)| C? Rҿ9%Vnmh>X*ȳ4[Qr~gV}O\G,60Z8#`޺caÆp6a3d s9dvIPQ r3캸A N[ ㌈\+ѣ"D_yPash"tUV%o߾-[OdT`|Lb!`da2< Yz*I d]v<7giA]<<}6\-emDAa'LJKL[ ^8~m]3cT.!bkYoGL齉  |rT>Jueͽo|jƔ˺9eI 7}~_oV8$Uh^ Eo˸7@W+wq.^LE?gWlӪboM㫯;/9(AkVj6.I8J#`3KӦM{&%n,[ ϥDGJqഓ'M/[E]vAj9;}@5REQ-l,C0 C~{ԨQ#@o5xj( aE*K#BSN9:['IQOrBmA[Zل]/1g9AN#3bP&áݻwpSe>ZhiӻaSp5?.lG/]A}5N[t&L 2C$ ʖs6m 3Z. "p9m _q66kݰ+8vː6PWq]G#nX u?xh=dqxHqF逶5Fw~W]9>\{nXWZξpp'6ٻ'wNOWxD"kM{m1sMu܍s8( K dP 4IAuEU裏#aGMи.{,U.QSWZ5$̥(DQ wK4ĔnH%fQȈrT-ޜr*l>>P[vqOB)FY%[!`@:L7}t)@>mky[i'{I!LW/O|B 6($v䟓}|ϟuZr^eO|D jWtN[Y /vwҝ㞿(|ѻ&1}ǻȵ5 B¦x[7sL\uya1@e؂ AUNik?"3n0fAN ự~zx-{FAlA >%0'qM0 C.2,RseߑSTVMvձN2v!3*Tp<5ttRXtI f|ŒHLwæ#"d*u|)屗I3͒%C:UxPqz}u~[Jbp|ar2%o<ŰLN\{be~t+jskJVixC_9D@F,d oD.OVo]z4jQ"C PJ1΁c1ѩYy[Zz< :5? h&x٠#%g]͵J3@玁z8`0͖5BgiYTxʚȃBmkX@AڲkeϠ\:#feOYL顱 jH6숲6)h qڠpɃ^v-?U"YYY=zU"Z0 C"`CBi֭[Lf(Jd椓NbEYso$\Ot6D?e6$,Bt#j6.ьJ9xiNiDO/NYFPk8 ISzC.Zt$QȌ9FQHMɡ ;BuWm_a'UZիԸV^9EᄄM\nr:DJ<ۼnJet-w-l -up*ʴ=g>÷|o|ZfXҾtm?G޹M޾r+i}y:5ZaM_wߣ߰z\wm~Ӑ?j-^^9LW'g\r_u>Q;&1@u5(|`yǧLٯ&ӹ㴬N:ᬡIUBR*5]䴃FB[bY$ڬSwgykS"cP)<άLo! ӦM#6[~ С Lyc!`l4BfpҽkIXӱYò׬UR+kM$h[" 㔵\,h!<*AIWQ3"rNρ5 nqєݻwTR:dƳ̥} "׵~rK,>;֚RL钒KV'~õH>ڜ<ͿϜ&+/MN ew܏2eحi8-\|0o]yEI,YBUgy.l9|Bl>}(A-^|pTBPڲLG J;hD,]D|$@IDAT(֤)rmYάw)6]#nPz2N9\?Xu\͛i&.u҅6d ^ݬgZԬa!`eر>ː䘼^(QHꮦsڲd:toYM!49DoE%ڔ׎SN) XqF-I׼ys XM#K7hi15C 8fܠ4fvdj;SYLAjU.0|dv3W]v++=8܂P).Sn'N$l?خ]-Z8g(Ki8#n7K4vp4KU>[ 00B,=C=;5 F&O&2 C0VZbŊqAfLf(L!8q`9YlY:2z<2+儝Y'#4׀hܸ0v`;2r9#wd C >S0Y7%kwOOQwm.[WWq-̃F\u㎅ 8, j 6i iv15C ADqDyO^yn=1N8s.RlҮbmCp!`޺lf5j믿>r4oN>dJQUu4eݨ\KPȽrЂi\OCZ6[NH\w,? 8#"ϖZ,h!r| (zRGIrDHgѳJ]k!`Btŋ?#mȐ!Teuw\ CJ^r,lMC,8Ŏ[A! "O :Π6-Ў3"rmJ *c . d-pP{7~yS5 B67r'[lV+pV[&lyAtn w}.]fv۷/J{PՅReNY3 UBMZ9n,5{#k;~T:4{ ʃSNt|%t՗ @ OWU]\DYr=)|+l7ӱ6 5lBe!P0o]{eEy 9v o`Fn$ #MHljMPAך(50GFAAP& ]_HC Qrg:~JUr])N׾}{^g$etQIJ#`!``!̐ 9Y~!B\\dїe+nY6R.Éh4r;-ϲXpr{\WhBPkvQ)S~,RCo&rҬ!fFpA. C0 +hoXχ=F2H}Ulj$$ 4P۟$*ut$h&0#bD >AyP)s)}5ʸV EHe}P?N!(s@#lAh"BJ3IOG0BNOc'x+(Ti!`` 裏Bf 6TK&3_ժUK`y+ԫdP2ݨ\KPHiY */ӖVXK$|H,Qł _d1 3 J]2іi돌Rm!P0o]A~;hnB`N e1<C<UaG1f X4h :"D?*גA$і)L--LP6];NYAEt\Ci5ݠ hҤ ύ5/mJv}d= eqw5 C09,X&L=z4< 3jB ɏR_ _.uȺ,@RMwK\KZ(x┵A֚)-keodZYnMɍw7܍ ͣp+B0Y3;5!px~ipI]+V>,TJ.s̙6mڂ `$͕=g8m۶u {E{i"&Қ Lx:(<@ri.۶m4q YPQ\MY0 CHe$I&3,gTY<ds8()y .̇^J<<]Ff8Q1}r.A&':+QǾ#o?Yϼz6 dDoPyL=!`D?)hʹ6G|wZ['X#0o]l$!fݺu?IIpaF)袋p[o4!]<88<* BśC&m-tbM7FBzɵAi)\ϙ6|zC+(A+ɰb9B ʃBچ!`CtWb 6k,%w2C~@&,+/t;ehem9yYwKe2KT5^pD~zWM|8EDQַ!`o]1kxC/(nA͡(!}؈NBvΚ /.DWAsٙt  \A E.E4XpEER-riGA0~M%"ܥKBkyC ʃBϠ]!`eL-|N D$#fJ92R" i, sqW?J88)qnZ.`Mg/^~oo%U_}9Yk&VG*k= D zOZjHY_o_ߤGs.,B{?p iSuZT3 ؈ӿȿ51 FIۿ=LhxU#f}Zk9'vӇ Wڵt֧{5'v?o~d7NQVXAx q 60rJ&$Y&Y]?t+AtqǨ5f0q*W)-et Kۈ84Hneֵ롾 ]TS q!5{4 C0Wڼy3 B a{w)4 ]#rqrq"׶Bf]Bq **BՏs+ ݎPVˮ+t2ҹdHAb!Ӯl|f+)52Bq^! ]V6h:W/l\wʂ$FDG_a߿$@-$ׇЊ9^: '~nҦN}w|& /Aϭ?pN·AzLWN|v-Oۿptn.})CgO? )c?Cu샾n`aݚ5k1HqBMԍ#U!+]#=LYWrrMSZ+}UN$䳫L+ײ_k%3Y]h~޸Nj9'/MYjed_!`D4#iP1d/1?ȺnTV5Ot J HeWZɰݕ8wlhh E&=ztQQ[]uMBo[.JԈI=-m𳗴K }[RSĘ޵rEf w(Ͻᇷ Ŀ>|cՠs?ƞxT5nNOtY#޵b?[muZu,Pp{`YFO;|UCui >lmJC|Uu_r_,B@-):Pnjdvl6EvQ=[ x+[Q&DƷ|pTW>t r/;+;CѺ< 7m]k)NTsw۷^?ODu ͑F *-:5!ں=vm˶o_|vK(g(0W({+mKWٵ{ʽ8=r׷de4˲ܬ}yٲe~eU]r.Ԑ+*JD9NBFnE+!` 07nd8#f2 3d1#7̌g%Ide)򨇼 ]b<-z^W^ *DY5)CʣW+L鸁2j|VH,+** 3!3hѢK[O綮 y'׊ZH_SXa @6!:${ i즌 vtx+F$T'us$ZR9aBB7 H54]]?ABu9;B f/?F'sc בJ Y'ՇKG~{F^e&H]Vjɬ޼lC[VrE(Z$#/O+^5g#!} `ѺVO_ HR{ _Sp*Yl%y (θݷ^W<*]ʮ{je t\* iP>e䔴y-_ c\ߢ6:L݊nkU!`!$\4`,lDªw &f͚ud&4< K[+{!es)kWىSVv$p9W9#(3 ;2,#2CCf\; )x^ak-qu<8t6a9.8\o!75y؎-k8Eyr79ݰ!/VV}tO:,Й<֤)~YU}8}UӊjeEӭm2dԄp+q: |MtS( K$GtޱۆR )gw5~ MZ!pk>0w]' W}#e[{GGnNɰHʋe_%I Mclw'/C3kɦBop@,Xo.T7(Ȃ^}xo}j-ka3wݚ\4֕[#wۺv;v,#b 2ĵk.6+bv$(!^!*!*$MաhAM Z Tz 9zLG.$Jb%*G* 7@%Bf}BT9*IwZ0 CPӱ*@s!35c$o>4$NXtdJޱIY[uC c*ך[9\ɖCoCh9e>"|MBuX  3|h |&j9YdnH9R[h LwIf<"d!Xj$ʷA-N=ڜJT'Z]ef)C-Fv)_FO@eWBuJ!m*x!ߺUVh!j?*2!`EIGˊN=;R 1Cfvj,&CN6N2FU}Uʷ}еZWޓ-FEC0MyW- ٶmk$zdd T QW/q͆{G%!SGo0Z$*9V@xd}iI$f +S9Bˮ {0syiU]XѫGfg_=6Ҷ{>Vw>oicX3"GIlsDU ϶_mJZGžc5?x7vtt'*8VO/"{Eu~sѮl%`%KH;w.^{I8K4*AQ5iEnZvC&(A-){^ ┻} G5*5W#Y]X:~P {d$!4WBȸ*{^VQ &1 C$;, !42R;A[ XpC;ЕjR }W3*'Mk+ qʻuUNz:EBf}d_qXfXOG..Ev-v+Sv+88[f\<+}uY^z²"??K]{`쑜WO_Re7|,̙p1G7~ r+|Ľss~OBp֎k>zoPвwz# ްxc=C(;mg+/k1iDɂ)CՓ%-ST!Aޏ7T6 AǮEȇs݄=D$ b!>Xj'G [b; U0(L*Ek d'CwGn~0Lf)Mžv.yW Y0 C`!4:B`brD%2Ā8 =} `^js!K0[f:ҰyYv1#!TǎW!3;bu֢޶m$7.7JHO=9axs兿z*}I?}0e4˛?yB~͸߿5Pk?yp7Mm[7qD +=7Netl)/>-pj#;rڬo%!#0V7.n!j{5y8ہ{gV3a0V'8Q/y("D%T|eEv="`ѺW?>w<$`I6Ʋ2kӦMrڵLPЗjUmC6].*{Z 'Vu(KZpx(rk!e+ 8ebG@X/aRq??|#0sIM+Lh%N+UB0 C` arr,\~=c,%;4OfytaT9ByJ\Pٵ#U'w u@ hW+w-kY-rPM q!3\l̬Yla08ydv[_CMc\7rPME q^M2@??/X%]v3 *֟ޠH= -¡ǫ]դu18Vұ?Tp!V/W,c屡?Rߖ{Gŭ7J5w:_ߢ5} >} vNuukkEfzz~^W?\#|W )~w+o,Z׿oHji]… M&O@pO\AN(s)т+)\%URv_BWUB}ʮkґD[J) j{۷D)h.db$d0d*ZK0/??ӊRMۣ!`@G)kQ23v؇~}# =: !ZD`52ܱg̼zH, pq:jʫR%$=M)Ľ][ټ^={rٳ 2#n tmeW?SCwQ&fFpЫV׀,_W;ꓟ='fؔ7t C9pnӆ{i}S+vUV7tHI^BY#Yq!"˛f]S+]}<M 59.5,+sϽ4VЈTΝ8xزY]H}kwߞ-dS/j-^26d#s2?8_ZdžS{:%Af Op^U@4?iꧯF'X|ǁ p$aK\,c{,;ȕƌ(r0/MMzZpi A0hqrz\#^! \,Wb8)t;~YhSNeyŊwZN.x '6k܄!`!JH :ApRI۸q#c+KJw.-QQWqrîׂ E"۾Lfp߽yG̰)S9%rE,*BW MPMh!phU ?b&s>ɓ'y@sA=]O4BUt+nx>z G%{+>eHjtȲnspJCվBvmTdj8# UBu !`%#8 (-2rkl˱Z[od: BčݨŽJŸyjA%ݚEA)`}qF9H(F͛7e#a;2|3fsy̸0^WQ{_/;^}^y^cd+4YJş+ٜeW]E@$|f^uv;vr+Ev!["]\2n o┽݈SUHA[>*VrWZsbt͒΃t^vsM`'TGV حk_nBzVrtcx% az֬YKTkqvܶ.ܫ{!`KO!3\'Nt#Q9.˩p^˓̼ ##W\Y|f!I} Ir@@ ձ]Bؖц.Ы{evoVu=bj\Zmosa 9ᄇ!`~hۙAK `۶mN楉qBI u!xp5>]xSV6׭e&Br5ۭeՄ@VLDs^zjND% r%UʽB-xBɣfU9*^;&4 C0c`ժU߅<Ӑ #; *m2ÏvMWÖWH8Zs q*溵UФVQUy*b:y8Q!`9! GןBUTz{Tlʣ(gTk ;ſD.~Y$z f b񇿟,Mu_ 2 oWִ!`*iU̸!Ѓ@^99RfG٭)&K>(P;e0*T :*{=F&W=W[}啫jKk;2#FXd y汪1A+y}WQyT\q++!`=+3 _ ѥ5kְ#dMڨwXW;^W#jmB {^!rB-SOds LP!3>bȽW^y„vFʽ#A~,J$p'ΈWqv 7:aƘqZx1Qڹs /l4L 9mFGqGh+BrByT# Q](0e,2We"]R2z^Vr0jS%kE+!`C6#de2/r?4vǏ玎;{u+ f)'ynh!ڄquTW aWdd',셥b& z^Vr0jS%kŁYoc!(Q`%?v܀^kC#`Ѻ~hnbL9("tleR^x-Dx+RKE#kG yS>R}ÝMH-KR%Hs{Sj̺M\VBڣ!`!0KbeP|'\`5j!3%Zp_H횽|kkV=b# cyQXn,Z2:Q =ҮkPʽdm 46_mw?gŏSaŔ.&Vv8!_Xn~qa~̸XƤ4{I^z%W#Y ;JHE|rMwVWW^ [e&"{gl[N.cȁBjӕH9SLgڢW_W^Mh!`8p=! 6@fXaakL@ “N:ܣk* [\p)wn:V!'Mʊ+Xj@O]Z3ҔTЮ67` h? #Od  'v/MI ؿEȇdxcS ,سgP!Gy{5m ~hj# 1{qʮ[ˮ2el "UUUf+؅tZYt)Dĸm+LUB5{ ƽʣKh!`\YIG6Bfzdfc=|$3ɤ? 7wS!Fr0NٕwkU콒d'۷og;q:`s@ 9={݆>nd*wj9}#k&  _ȧ"}LX+<fOi i}E9gƶٳgS>t)hLܱ[dv Q*D߆H߭eUF3Zf!"!lf0X4'+ݶ *L_q !`@"N]Qѣ9 q:.qHaAY Ky@T7Jq:^Rvj[B!vy*A8XRDzDMBuUj+5/J3Wl6^.a;~,pc_@1|OHDzOl|  i\]wNBb2G|0leʭIniwtUJTJPP ^eWjrыVBrݺuv #ϝ;8q\W^WH8Zs q3RN;q\l!`6Ҙb\d QÎscͷlٲrJ!3SX WY^!MDU<.Z!@)d,!3 Öo2CPíJ^/D߫63ga)\o\$N h `C5+֪5d\uƱjMoz4nGf9:׿5'-Zϛ:u\ȍ kD*TIHG+0NYl2 gUu2Uh]O.Q+tE_E%R%N2حr<}!z C0 F@-~Uo'ia͛7ٳoHmAN^2rNCj)aHy Qs\**}r0NYlB`ql}%0 fdeUBuڄrWQ'R-)g$H3Wo"_E.}%@_2]-CX*oM_e pdc; '+1{+@p00]BW0^Ʊ%MVN_S~YR;vӑ.υM"#S5pkB)JF3Ըk+!`Wk22ɦ8  $`'fGf^:!*yt+&kbgI`!.!o%NaI[.gLŌ3Rmn6Y!гXg4k? s\̾>0]XICAQYdG]ǽD(+j)'q>Y-bsLDn7m2`fr\!UJ܆ߺ^!1#ˮq*]26meC0 CW *Nl(֚4`a,vw'1q w*aCMA䞠,:iZV߭eLwDcޑ儐Afqd!!IVBt*QD)$u5{U9j<#B~ڣ!`FuWpk! |… Z/#Tc6 a 7@NH٥r8e׬UN_UǸ3N:2qO2 oxڟZqH ^aW9l!`@A@F|qĪ adrԩ$[Cd.PWKwUZP!ƻ'+kud#̰3^mocIQK(k!T}{TJT"ʙʵ +!`5@IDAT},Zyx#Ќ=wuƍw$s,EIK-oe5ZBunF}}=z{9z4;Ҭ%$5XLnwDQ+SlBC0 C"X vm$̬Yt[r3%Z@7bjUTaBܵF 3uuu^O<21ӦMc]aא}Wjh!N+O_( eU:lC0 E7XB2BE2ȍRހfhyXLG=[neRwϞ=LM᎝L꒳YZV$.Crt^EVĬ:y$q:v@[ZZSN%a 9ЌnGo䮎㔽rS3R?Zrw=3:V0 C0D=%1N0 B*_b=D8}beiu2#d] *U(ׯ_aґYF@iXRGWՇD^e*Wn!:WQJ_mN+!Gշ!`@_Cu}틘?} x!3,# ͤRK'fBg=YU'NNrf8t qQyTڑ訦&1 C0>6ۺ[ABcHfǂ5}2#XvGwz`_IHhU0Eʩ_G%*ǩ_$%Qǿҗ=!u2kE8}צ㔽rPMQ)]M+!`M,Z7y2eiYO)Qhk(T#R Wg5kkk$ª:p{Y!ȼ:.~ mo[.S0 C0@ Z0썅Hf[cbJ XF )g5dбi`"I^舴x */A%oa=4 C0z fg83ugEfтŲŽl&]v-0{)i"b썕Lv( qu]I#QYf>,0Iδ9q:6eUʽBZW8e<*JVߺV6 C0Bxw_GqI 1/x˦M*++VSe!3,#xKT]ݮE]M.m2Ò:"NW^^Wf%[_MU+TBF ){QHri+NĄ!`} aH.WviGC֑՘h6hl߾uvdk޹s'yaئA6*Nr#D%W,"aݺuB^& \hhvMդU{<#KXMD%ǸW`wC0 C q$DY* 8/ LD!fGPjZQ!OD(nnrss +\)bq+wu/mZTUI*DD%F^!v U{hE+!`,Zwy҅p4H.GLZ:j/rP4 rǎLq!k#ʂ `ꤺ-8y14nhh`Lz!a=AC-;s5 B!7BEC *N6'b«88:fC0 C c;ކlva +I"-4sadp %k+ҲʕBWYIǝqS_%.CɸkܺQQ'R-)g*Wn!S#qbSު)Cjh!wh]I,c6 s)`l_dJb`C8MLbco'Ev=X#e&ӱ[uՅv>}3X[< }zLʨ^2Kf3)!` d$gl2=L,#w'̰!UW|9nG-g$E9aC. {tWCi̦ٺ!`\a,ZwB DGe'^H[yNU?ͅȒnP5kְD0N/[+,۸$>h9CB!3g 4$ޭʒ:\ .d$SVkW+bqS_!3L@"qrF8e*xB*C61N9MyڥzhBC0 >E0gTUU^V14/ \U#ro9\ 2.An֭l}'p  ֹ͕LuF i8LSO=i8ӍyBL2cVy2UYoƥ 㪸fl!`x`Sz $ +ĀTHS@H}ҷ 6Ŏ& &R2 6ŽPAFf A FT(M QlQp-.2 /6.\$M]*{R*}a\ ƽU-z52FD%n$ؗv7 C;X| y(@!*(A!CkC*!BH}3Ԉ͝;NҾadޘ'1FD1wj)1e%z(lyavtҥK JT9W%*Jl[+J\i!N3Nqm !`wXa >1;fΟ?Ϩ";dR]V¸ׂ(wkp+;ărHO/ԾҺ!3$jI2̀3X|n+qmq!5}ʽB5-xBlmQT`!h]_"X\F;<$$~=$Db7D<$ 3ƐQu̟w5S0 C0`2C;(pպbr=!3Dp2idȅ0i+ :ԥBu{%dIXOǁ]BfF 5I[KS[kS{sK{kSG{e&B27/7 (('9L{{g!`\y,Zw13@ JS wAEMG-&k^ qFPfz]d]a^,۽{7)`&ܴi+t$wmQV2Onƍ 6:e$j!+ C8}+t^}QHri+ Cڣ!`!ѱ)5gWWW>m`p)qo6:qvFb.pK Ȍ:[nvtJ‚8+b|",%dUudL oBW*q^ е6׵4ffu *+(!XPwl\3'?tP~qYNş`j\݋J[0NٵfeC0 >šϺh.Btr1˼4LBw[%Z4Q H@LQ 4vŲw,,#C=$jۘk4-2etM" س&{v}#D="HF8 㔥H-$NA+!`zXmxEizgDpp.s&a>pw7Bf$^23@8ՉY!3Ljg28'NNu@T_%et Ug7era%eEA.//7;C=AZ[jϝ>p%l(;7l8SPi~ԉsK/,&_3ԯУy ){-0 C G27,(p%:sA ?찀/B a.. F9B6ΰŃlxa2F;wcqʤPsz.۱cL*xba*בh5ݺHVVu-gT1#e+!` X;>+@̈Ϧ!3 }H_1V2AT 3۶m#ZGj8dP4x˼#nqHa+̇uzt J_*$#HX}hʴ#N*:]Lkt 2𧰤taǍ<|U+ԜpUTpZs556ߣ#[E8zN2u53N|Mn:O#p7&H=U>t4;tb,'8qM.!\|HԼTv*48^=#頶,#?|wҜZ6$# ZWү6\%6F5%TѺV0 CDǎD@]O2Zbvˈ$S Rp:; [_!`5c{,Y\FD2:v|sդzzZ ITxds1'Lfc KU>|l۾m{>_@[6fθ8[gi!kډvvd6dIs^{ݲe gR ʆ,.:rl!GN%G^{$0 C!`ѺE̟,rAPd^ZvB7dd# a"[VH\oEt\K.]f IcǤ4dItc`^W*{B*f$H9xayxFʙzuτ!`@AY K2i#.1*i d[h͛ٳqF 3ޅdTOʽB5-xB[j3no,:"ɬ'ػ'O<Ǐv-tz:uiQ޵-5"˼jXVZ_WSOUuUgYv}gwwP> KM]>xޝ[W0l|A`P u;Lh!h](%GLE*+;^i̎|QΠn[q8-4w9|0 H>Yaq6r0['mTTu&{hk9W+L6WE!`! 0dčDչ5 1;ّ1M 'ԥpk !B>RfU:Yg+ M0JTzs͚7s eM4INHk{kgsKGGKP q]C V;ytYa~njOsKftyns|GN^{K!i*Ymx;&M= 9j^ںM[4h*4P,xŔ C0SÜB $ EXc}.vz e%yi\ˀr yߦ/[0*)'4j C0 (Pt,cʐ 2#Q9>.W_$":(g 3T`Sg] >iݵӭ&۷ HF:jR=UPM !e[jNI6N ~η{ ژd=]c)>uYD::KrT7) rܞG֟i ؜NNhmhoko[~}{}ϽcƎ)syvkSgVѠNB0 CuE^i;N BLqq&a;VE)udܡ2)MA,NHl|vՉ' Zv@θʮqrWGq^W)т׎WxMz-AA,p 2H*A@L#Z 3љ2o ]X20~s粹ݦfjh]+!`q1Q><;Qm!>#y%xO{;+LY%)| Qp U8g-fΎCJ,Us }ꙭ[%pu0 Q9Ijx,E`.Xaǽ#HxyU}9 `]Cɖlص647d'?~6n8XtCMչ3ٹy }BLC0 C#`Ѻ+x@8ď`W=Vf 0ˡ6ЗZ2q -DP4WYȎ{ĎZeաBUT) >޽{ A 2H`'Il„ ^{ԩS \x;v,CH G Dp~G=x m߾3gM7D؎^iC1ΈWل!`@F0ȨJL\Af1؟1&31aT 9$AƼ1 ƣQZp QD% 3Z_Qwfޛ]2h4#G>}f3Yk%5\nj;HF*ZG.\ajܴ/#jGVѻRۑ|, =|榬Ae?h] O=N'Nt݈:V6 CXo~*-`MY(>̎Tr׊0NY9p0Kĭü4\{A%_Jq(##؏m:N9AN7nc SqX8 WeM8~cfϞM؎6mbS~BzN|??֯_㏳}{8L pڣ!`!@6`2eWFUL0Ÿ+ )H *3du,cz*qn'VIL_SZ{ɣZszΪw=}׾MlklKxM.B҅PHo/\'D$*1蹆=G5evtujjQV޵7gw_ω&@ҰVPT@R O:lNP&詬ՂBva\Ti PaZl3㎐6 %2a;\l),(Z {Ě\!!핣{:U]׀ ((.\RSlHr.έJH[SCVc9W܀>?9;>?/`8` bs";a[RB'p >#0NiKxMk %=_qu:;$t`]{g{v}vmͭ9_?]|U+쇝4sCZZ:^`㄂I[!`,Zwy$aGq&h]R4K y&tVP ;ԥ"o@s㦦ezKё#G|e0acgj趧vonI.tfX:N[[s{}s}GN7O;TΚݕ7.LU~iUG{S';86EGфxfTxϘ>aBp>Ơ#Ov$kc]!`8 zD!2*@ve,r\CMX,Mt#Q@B3?@rQ5.A_ |\N]͐x9 |ܾa i@QzqBWS/tք{3fh>}JVLY#N<;t9[߱dRGnzҺ(s+]uqu};rr ʇ V_=m[hڽ'/7pXFhP&'?`0h9F8\!nCH0%LYtQBӅ HSh.H@8**ܫ\:, ?y杩i[=BhTQg$TRsN:?cӮ]fSĵu<>p{n[:{F9z3:Kr Xig`wC0 {}pBA5"7*\vVH*"GS*rʋP2A#mTC Hc' UT-nr ܰaW~4/6 H~4"x L#/1vj݇ F֙EihOVMO;/s?Zr)^܃gwv<^r[WvmaGov8>)#{cBC0 @@QuPgjjj( Q>;W)uv ;& `WY[v{*hk/)ZSqG:rM UOo:?ȣ+C8\)pR f? Bv5qٳY0yHYaAGv@?.%8wN3lmrqFK'@aK6w=0vX>7ag8975IEOq7+!`.}!sq@@ r+d *je䢜CXNRmv0]a.\AGt[9_;tC'2{6|18rH=7\[/hnn:ݽKQ5Ͻ_l #?;7smj>8<ٹy=|[_UdtLn!`) : ∯!gKq0QV#FH0H̎5z>$V_@@l ̩֝?W:Ac:z֪}衇rښde0)HLh#> 3GBu| vc U\W|LӴ=s Υd ES) W >u񣛟\]]-M 3c8h0 CE^_m@쒶P9Q1V搹{\z9 Q R+ByE&ѣ'OSJ'_ZLRvyĉcDzms*OְD-Y`W\;iSsuN9 j0U,-U ;W T6Z :ɟ{tu9+AQ*j'A9##^&4 C0BԼtlemozCn  T!G\"D"\"*!38m:*t6Sy\e&ak[Ks`$XvaNun/K/fuZ{K#EҥT=mT] DzZ[[[#^ Y u/5jє. qN<#uvvw8=6)N"4{g{gdxDͺ0+v89Sˣ3N[XнbtQ?0f5(7O 4<~;ViFnA !]K!>!dt 5j@2*__i ]9&BxBB0 \8Qp*`ɊŖ 1$KED 4WSv"ΰ㋋qQF{>Cc5&ٶ쫤wL|bp9\/b@NQ)@ǭᦈ;"n)@t]t 8Rx⩇ oBFA.0bc-' H@#<^C~j$ԧ đFs|6{}s]_FfK.MN7YA8_oǎ]:!G,xЏҧ 5zTrX5fuA>QuR8h0#Ux5>GվfOݥޅ)uT5L6\DnX))) -jrSfe.]t u ޺~tj `ac J ,Uͦ%N5<,ZaR0Hҥ 쳾Z"ń@C~Hd,,,[WV;*>:<07ڲɎ0;Pb9ӆY1\"ڐ|G}1A5k@׀C@GVGwKtGl?F*f( \WޱwUɅ+vaːͱ H񎉌W`|| f ;ޫqq 0fXwa:V ¤Qr@ mHҾ0s94x_ ^srf6%xlm*ٻ{gdg{|rZF~QbZN\Jhtwlۺֹ=N9| 10+e?5>r 1 q VN.CEo^f,VvHU.%X!'u6qPvܞʦVفUQQYU]͝eL|B=DE.?)BNx ,:W d~r|GOL綗*UQ7ԯ }EJvdM%U[M SSp~3}g} v6}6K/915{Nwװk|yjBcaa"pɁǖe"N=LXY=9b0m`l\ \aTq*!М%~A'ڄJl=+g#"ScJwz~tL 7?;y܏mqwjP(ͬ@pwAp򫓮E?{E5k@׀ϣi"J!_3 1#Fy8j c f Ha!b-b? (@C#4,5>)RH-בƽXqg_?@p2۽sp)cLqy]:\^-; ưv┖*^*r6uaP676l)oa3b)қknO.DxlGddʊ ) 8j zIQ{]ZmAT0;AhЮ+ VŹ⢕tgLPT^Reɖ9@-d|ٹoܳboɖ3_q׹IPk#8i>cQ>5gDV~}S:*h<$%3qsɲ`DGbEs ;mkv-捍%[Um)Laשa-dcS:c%f-B[ lC{~}<sM>/Uݸ_.ɤsiJ[A7/;|7#fmݿO\7Aq[H3:8@oMn몲lw+X8r_9Jk8`ԧ=¾uޓNl%WT4 6+an4rͩNb]k7bV;xRo9s)E۪hUs 1d- 4I9qQVc]}}ii)~*>'a#t 5k@j#)Y!1*iÆS.(,FsĐ@s*BNBWƮ%:]$sw%RG _ a) FǩDfq͈)3Ļ8_H_Ԝj(n-b;h FCV qI4v:.z* *Ԙ卭joWpN 0".1%=7zh=EKM%wv4ޢJ3¨6GLJfL9KfKo]FRw_QQG4JO~~ xy :Oѷ^{|mܹ(#`G<',!Q[R.ܰw$V7e- V˶}"g?-gTm#nGj*x}EZd,ٙԘ\|//{U `o̤~gY u{ńpt!Ou 5t58ثg~HL[N1s9X o.ࣣEFNexwdE]G7;[ÅW2r6Ƿkw/۴vrSFZRcx )38796jnQGYiٌB-٩X++V" 6##KՌ/&%}(kTQ_isV拶e%Öh])qهϿorhow6j]y}߉+ẉҍX F`(F;pU-C/z3x7A>=G>}D2>\/v F & <; cH}bHa]F kޡM-dw`;I6תݵʛ mb6г/ NJh٬8 F8¹A4,uRWbqW։Á40&s]RK|W(Iw0 u HFtՑ{ԟN__U'h{7+w|hH\`?y"B} .d C.I0? G҇xKo6W y'qT}r4@OVt1D(BVf~[| =~~o\Z3^̵l԰?̳݌Nt a X(u\fT?5р(D?5Эa5t͊!m'<1vĩ ˡ{! 䲲ݻwS.#9j2\ؠX]Ug^qc5g&ǂ9gɖmNKMV7cYE)}*[WU7pcl[]:*51jDt!$+%.-!h[x1UQQ!@ ${j-9bN?bigL6{8/Os%n .Ν$'_E9]yf%[>b cﭛ|E]Kcl|Xⰻh>m͟Dčrlyo!Q1)COw{WHϕE ƀK Wmy|`\AY?רX1vg}{Q2bcH)g d`ﰴN spƢN>85 -VSJ-u:z)0cSqy"OxluG y>`Փ@{S+!${>9?}ÊV /Xԏ{o3r-RGw8Y8gx: {R|P86widoԑWoۿR83 yaHoݐId3s~*?ɱ߽<.:ْy -{>U]7ן 3Ztk?r{w<3: +Tx^N~ieUH؀QI.?4нu}ZT8](&Ћu8Ǵ%r S ;I$ඃ)ՒOmd4m󔔔={^Ę(skQYJTɹ&!"O4'|aqOt'ZZQ>[6N7XԌMپtK /.jMrCs끺liKzȨ~qk׮CZ\\,4/eWA" JA k/k5CoW{YbCv'c/1UYW{$xo3.{^>yˆ:k@.݌궎J1gwT3dR_W8]0Lxh'"v; 6 W/@xP_HxiA ڢ!TJyޕ!wa8݌V$2O<7nXڰ%79-9Vhl]YK{TaBFg$*uiZ_jƗ4bXbw3a0SE<:QDFu8|!Չ >Eﳺ| /f~\b0:/Fp0wW-D)9U; xԙ {*7o#5`iOY(y% W ςI50c? j2KdbE`Hb`@W/9xfo_ &kFFx\`ݶrFxbjɓA }U-YIQىQ mvR|[;]=HY Mmƈp)"A~FJoj7gn;W]/H$b넷]2)ͥu<$bUmSf ޺} gEN×fz{_ ma%6V0@Quc&펦 yKX BEqE h~7byo}miZ,u%b[uc CvR 뀮 #O< zMrLLCi'Y\6cb(bP & C*1AH {'oǜ,'o7w{骗~.ڢ{8<GFK8.h .$>ʕd4Jらw W4P,ɪCMc?{wݞo$d['5{E08S'(R'AF{57,Lzr  0/HKT&ՙc!#ձM'EPBI^+? .]Wc䨦_>Al.Y*5)jЋٿ}ߗ5PSxXyJ[|Q@ƃ ,2#0F%Q)YC|X}K7C%U~ƕ;V +jۛ-,뛍5__G߼F&0`I^kv}I ?:O)ի,_+fcҲl؅޺G;m[&~?vVk֏=0}ȠV-59::ks =wm,Wg_&dȒ2d!kK+dʱ4fX-S&"+;0fCOu = x=r?B_.1sB#Ѕ CjyhVRM$D$D7:X{Rbb*r.O hFpIP%Uֱ6ZRU]BEwW4uNwvFbQ8?yPnjDX9spj!p_9 E1GIqYןa3Ai%aѪL$iuSF'eĝ9I: 3 ,~XjWxK}7FljW U'ݻU?v  }㓇]*Uz6NrƷĄHWpWwJ Zjp#ۨ&=Nd 7t1D1;1f"8e興Wk…_2H9wG amOOVR-[[ W7v /%Uyg2%kcvwdxemҪ9)Sr{!fN (e[1c;wXѣGՉ//2:(jNf#bTPxP- J*߉LN-&Vg5fvD˜,f|鏐Z>ZfQ,Ԃ1:ulAW+z_{32 ; cT,Ae- qTkV4Q]wMvfuSn@ŹFFn/ Բ|QcVR|gҢ}}N!毝%pٯYgsOҫֱ]/]usu_|?0}v^Qov{396{`}^7\>AUmWdBlqWxغJ @L|C)PI+ƌZbE5~j0k*smq}!H]6SӰH.;^I}^<~*ߺa@^ jMeM-eMCsrLun,=uF&,;];Wex+@ɮ 0p C4V57E(`ܸnG*D1M8/ʛ~e? z=n}z7[~XT{?b^]9s|>vBj)uw+ UwN,xh@g@i0M6 xa֭;wnH1c"id*%ҫ􁕧4 dHzkmh|h<0@Q2Q& Wۭj#@6s@1n@5Y4jйGC~]uxL2^/2F/zG# .b}T+[x`W}EirV`EV9g54b"yb1ʹzP9*sJbxpR_U^WWF|ֱG꫖ WՏ[i=>ˉ}]'hF_$|wO5{5U@IDATRshEcz̽rƣU 6Z4$dv~t`A$xF].fA&(%]K|OF>Œ᢫(2ȴ ~P釬B, N)+ݹbuNL+$SrSBrtp8=gr1'to 8.K]KF}1B:te֭[Gs72(/u*_J Kf1FַqQvP4ͭ: Fd޳J>oM{*[py2̘ۢȔ2${@N ]fϞM`nG%u >\8T%Ir{UhtHWSKk:E%6K&AQ#dbA>lvDYb54aG5L|7_w)~auCAER:]qLmjݥ> 1dPB{hUb8&1 .#.m'qg1}1__\WWcdw)LQ.G#N2%SgJD@<%43kw]81Cyry\ugOpxgݵzw 笠H 3GS@,/ъу?d2F>쌌t9^oGS+`R6P)ەj /F2]>sTH(l H~j w>' IE߳B~ԗXտʴ9K^INyr HJ /dpx]4{iF?5[{E}룷2ngWRRB' K9֧ǎf܍0zl)dVշ )J.ānK9GK,RFS g1 h8{h_~ekkgwnTxOW~-)^yȉOpe/ nx@7]${>>~ߐGﬔ{骣YcorQƈ03Pa܂ /K' D_ ɛ$ ]]r ]_QNbV¢Uw˨9MQ?a~ %.[JvtO&)6L1DY3sĕ6ohs)9Pg:wIYCY|uXi㪛)`W4?[C_(8gB, i{wl0$;(<<9=K_NV R+N4lO*vIvUx!0^9T4oQeA ߛ JtRs[p JѣGL6kG4֗ꤣLu D55@*䤣K3Lm8vhq})}VH]Oע .. ּ+%Bk2Ċ-==33et]lYܼrB:nI2 }aE*)qѕwR|KFÕ3G|?{|v*%H% Vly5d/g:k$T2ATJ ﯬa׫vu%[G˚88o+Y1a$b^4mhly͟}u3g)Bщ/_by44@`21;22N,g5ۨ*O*Xo=󉫧ү< E繓n>[=g%RNs%@ e`X+zl/]w&>4o2MB%6%?Da~brC˽3q `{e(lwnUz)fdڼ 0H~J"C4=Y7oZQIo(C.UŭW76B1iIҿp:A| ֽMi Lir;ĨyvyDR+4%?i3RW)Xޔ_k%g'/O (>^S =WNJkJ9}U&GKoxwZRZOfIP觺G?r{'+_ZȈH٫R$j5p\k@׷OP4^ l=\FFH`[,ڧΛ7o[n0xʐK2PPLո(|`v Ce'T5U,l__@|n9NͩVn/%6ljis4o\,{f~ qSDhF5j>:|,5ذwzE $%.Kĩ” !@bT^:/ dƢSjrAPvm43έP8%C̺s v1z GQD9e}xŹ^8N xzddŷʹ<9&F«"QZ|uji޿B^0o$hv3N|yi.tWbL3 íw}}̖;J H}Z EQ1MU{8[6uN;FstlvcҮS 8,HOϐt)P $y$ ݋҈!~`ww׷v:#pmvNWggMU5:tqJ&:XԝthA?s sGHD/wI%q~li>lDZׯyD|{pRdbb2F RWWh"0 3fƖ1։,¦Tb9#)vT̤M{'^ZۆfLV@.[eՍ3,˗/vIrrrd "tHJ?>=4[/aLU:_t7{ɡqΐ0~Ս Z7+[!d*vEؐ_b<ek>) uQE3Ȕ]q7?s4C o'sԟO#!]?5Mz.'1<"`0hQKZJ~UNbG[+MJ$[|E 8\#r[h1kK-LK՟l dYNI˭ M $>~GM !W'ܖ -xvCv |Uꏜv Cr'z>J=gWW?|$>P\z2wH3WR)j /^AbRBaX,G,0l_.-4|?)-).,CH:l66/Xz5 2uh=$â6s+}!wV5GDPnfhB8?5-1f}w\l,| =kh~y3p.`gΙ3o_XX(^_d2>_zH ?\]W$ictdp +8WxM %=%PÝPD~8h4q}_o9Q0"2)&GҗOhj&JV(i^gs7p[骆*讘35__^j._drlOxQ.mny*crqsʵ3 =Dys+*<|ͬfSo4Ѝoڠqҧ&fȦѼbvd24ܩ~ڙ95HdS絟m 9bx'\5mΖ:|%fH$2/'1BlI'=GxJ LM"QE"&yΒsz5s/ڷwߞw#܈_b:S*X]{vbÈ܄DDn(cz<#J.OpWBaѹ+"?Ֆa2ɹƇp~X+B@ȉ=HJǶU7z VoOO;0%[Wn p۽dSɿXuY:edZmւu;JQ/}uVۜ(KtvA)) 8Tp۽s\3f̠m޽̔qQOwFz5}WuIkͲx耮TiCV! LTbzgSXsEz?t + ID%ﻃk~/j뻷ꄗL֝CLc~dzx뗛{wygd~Cd0o ?('vgjƈ+gtەڔXx[Vm}~/ޓZmED]t҂%FJlZZZ~~>Xb߾}/b^*ZUVۚ>w~ kI\?>W4{_1~_H[Z{\Y]>@ϣ@;sTb 1z ]E͖Ke8]dv-?jh {툷Y/>)h;h#yO.Θ:"3j/Xtѝ5d AQlDE#-H+|Õ 댱% Ɏ7(-\wq,XRVP0 GL+֯\n4fִU묳@VzJbbp^:9K7;K_G. %w_2CLH Y6 y[tOjLv D)fҫc8H{kʔ)x(6{Ť!iq*)&fAZ?(sΝiR_ .믿>=#h􄘓;\]5Jsf>s l5;6QAF=Fp9nb'1aYYYs3=ݴBԛ#tz&DKyH';%!]VL=n?*N-VkWU~it #bz 'A(QUTpQ}eXsfGv.S/HӀ;~TPT /?{Euq^`emD"(bKӨ1&_T5^LLKbbAEA{]`)ۗ=\f;<&- =<]b¾N!'8>/(ޙ]{@pxvrcdž_jF`pUan4|T( %;iPCtG_-9K@HlYEe$/j=NxmQFO=E]ԮcG}d´u~[g]uAy9??Ql3ۦM+7=_4_r 8i }KVFJ#﴿O*=I,ؤIeU]~G{G^Eش} +ʱ@ުM v5Koծec}Wh@b鿣)ߘݖmF>y+~/|VLf/.9}G;)ɱ80>{3Zu wedM!U`2)mzr٘y&2S&Ekfb ./2_!sM Y3[W3\-/GmH3e3ԋ2==}ѐn˗/gJw9vXϩsW|t=`ʼp᧌Rw׿< 2o?7nG_}I_.a Ԧefz*Zui#elsmQԺ&("Qc^/;tɉMg͚uw{4:2+ބ"P1*xi`G9͏ܺuLܩC!`CMj;V":(-k3*c=Ҫ}S&<ؓ;P5{'}:3o[&ieyEoX_fϖ@ĕ|r+ٲ;+ ))Ԟ w.fi#{횓iG;m*ܘ_*v]:?.>}Cieek̝5ͻKbPsH]lwڽQ`͌s_k.,ܕy;։~>Yo5fv3P+6}w,Ewm=y#J'Ιbp͌zqö7Z@ pl] @sUTltkw$)++/*+d5MʛF(-ST#YTRSȾ݀}F>nzLLJ6cfy_%K5Mo618*ɸ (Meb,cE(yImy!:O=c*[Kcxn>~/^sL~2&v Im:$[3]s)b׻S^/_ņﭝ 8l̑70[:IE%35B{%pϚߝ=f-_';B[!:p70yk2eA :r9哹G;%奪wC8@`vLթ6'؜F7kho+.]qO>oo|ZT\久i)' qi#2R'M7Tyd%k7Orʼ6noFZ#]t-1%6U 3G"=eTNS9`la$믿{L^d<A@ՙ QAo..)d98i W.,3[vwl݃S翩lG"ny~|ѿv>9}{Zzػ88e53{E{;N'uI?=稛(h2,k:Uqߺ9?op8vN;~ W6pOk-g  K]~k,5MD2'?n"3!NK=;!PI24GCG*E߉VT"?}FOG `.mZ%5)` QZV`զG'L[z3ފKJ2Rzƨh7d0xFUD똁l޴}!;+Ozvl<*Ӧ}ѲeTt(N8Eٙ̄]nlݚ5kL#M8<]ǎ{y,4 / _״LOlnϭ-5{s ghԞ`.ϴtr$K"  8R!P9ɰĉ\ 8w! >!#ΝTcAi kNrŒt7EII)fxq[_A+{Xd{Rbkb#c}fYim[.[lwhΒ"bآu;d<]lNm̥,\[@G]\3Z\QeZfϯr̀o;-l2̤vغsV|lOU6b9p4dKҲb ̦tZFr<\R(rbڵ욞YUa%a+Xzw<ܱu& N  zn `f$aV4+tb.B:1ۭs90@%MQA~1dꫯf[na76Сc٘HlUVA-\{aۨQFnTP!CshΜ90'uQJ#ݴiK4@{!`Ν;EuooRcP'){m۶}F8?~:q E0冉ӄzԛGjhwzi jC¦g3[d5gSZB\pt8dBYff&!yP즧ȯGWz@9Z%]$hQUC4={_|!Eܟ?_97[ 5HFD"͛!S^{5= JAg3(InݘK] SLaw3o /ÀMYS%):XA:u4h b6tHpbF<3kQVNWw,NOTk׮+n:`d+}>0c)u*>\J1b7F?Alkf&%SO=5D&b}%c;O8Ale5S+8OKOٳ;֭yy%;7l>#ٝ;uҫOv9 1NaHKWZVZ#?ou+oXzGA&;"}yC0ac, _'fWt( 0Qy}:u6XqJvtNߘϋ# L5r:t//߻-i֦EN-Jյk#VM5k,Wb+_dU@pl]ps3T#_Ć-@}ŽZd(u S)Ư}=wmfLKW. 'ٞ3F) p`"QRغe˖1haK]w͙r81:0S0Yd1y\XXhR0&KTNO8㚪ДZSrSk #@`O::V<@g_r%Z_ %x"(E;aPڸ-_xꩧ q>H1so8Y-?8ğ"ݤ,[ ɨEdh+RK0T|x%ב

zfH͸tO4;裑!(a'P=4+%p!WVIC{&f\:+3Kσ tc4'Kc0PyFhj{Ħ $$4ݹiޒKh֮}i";=#)[b5c8*eEm߲~U7UPX'%ilgY@o͟?_e(͛*CHgʨ:vGvM;a„ gy P:v]z]v[(UgV'(;tPr0&O:y5f貿׵>|jT6-5a՚;w&)űoni%'d1P:+;YVԌdvq%y%Ņ5ߚ}mw.(ntObj̦[] r+y:QEyXT9fU+j'#`&IyVןz_vyU@IDATޜO'%Lgٰuޒ^SO]K[_fɺYbwovt-Zmgal mYyn'wEnWt{xQebo؜C *?Є~wB1Wi'pvL%Scr3Gq% Pu)\{WEJX]W.T94iҊU}ݣ?2>@QzvZ"\ ~sssĽ' vyȌF?3x Th *J;LE=bZ+?호ME)%6nhZcfv`1F2ɰ• 4(<&=,Bz ['\((^y D00c(3(B]DT=P> 5i;wQp}c4M*miMwoސt]^b{ʒbcy?I{>i }Ob2 ]&Z,x+ V^V>ō)%))WjbMh(<a2:!l3_cUl0#U4c߽|ÿ{Ajuݟ}UjeU`܊}BBG^Vd:__-Ĥ ?\E{wSt^)8pP8.Lh#ghRmxթD؊_I=܄[`{5 <=-#=e]P*-m0(RkNh0g*T:{q!ï~+2\̝g|Wټ"t-έ(/ cdПT Iml™yFlBVՉ^Vjf>>PɟN;_aΝ</ M$r ;GFY{1 ;B\3dX^"yp[|MxsYZc;JbZ17޽`֩Au31G6'؇Fb#nD"NxЮ]ۼ$^~Z X.(ذTV>I(`t(fa_)e>?6:.66ODP3=E;*q+GZE'=}K١ډMلB@&g=-PDx1s.,-8m5,n*[Eu(;V{a4'[}湏~'L?rr|CbYXqľMeʪLNLILH.+w< N\q=ହ+2i"O@pl]psLQPAp#5UAVgdE bYfp|fb2Gbl@LT3&^b6q1EM_PQ^X<vɾ->cݩJxc#~ܧC >:0jV۾{sfڶv`Vfݤ߳i”;de}p_s@=?1oټztҮ>rxoa}צ~]~Ҟi?79Y?/ƘGJR{ڇ459}⽬Npxo/﷞,RHRKT  غ/\*| b@!<.ٜHiWT ] q>PHz5g3 AXeYbI&ZSb=iLX`y?~Tf@i`6zӧ n*O[ gX$Bp%ܷ5`db O/lVc% ϞɧeV0R (e0 l` /W]*I~+i8uT7PSl~߿po~B^K'DR'aC*m7ȯQE<[%%&ع(<]@gNt[,Qqpx.6xr.t㘪b2[;IfXHJ ^i.UQT*5_@ `noqieg<E;(C482R0lRi=d1蕒d'.-n9-%9;+xg\d?%RHGunSPiֱ4 eRd.9I*,Q4]wj\}s5/ H)98ZUإħ0Lט|CKlfux9sxCV4خ]^y啌73>s1f,j]HI?=e@*T8iUgp  O{ RwbcK4:]teʀ(ZgԞƼ[p(#Ԑ-`sȵ5oH4f5vNVMHxH]H}HlSR")oR3G+UI\}adRR488E @ >zDS/|[,Jv^Ymˮ9+6I غڠ Ixziʩ6Z!c4xj3XD2={[WR 53=et^PuxbɘjkFb8- #s2 2ǟɘ0 2>R!姞z٣,B`4s\@)o^W^yQWi5TkƖ,K7rHh^zIឺw>m4 VSc):˭Nw؃BnvA)[G)n:uҥK(0 jCEP :瞻駟*Uik90]#xL_t>Of2{Z*~ؼmnE@ Hh2OӁ8EJ$cԔEEEE%Wqt8׍km(*֭K#SgݣFOfتW52 ĤxY,g#xEfwC8* @`n?DAlByY]ڊ{[O9YFG25j:/#EZd5O#Ry=2`v1o<(~M"#8y!йsgY-p*C bbSsx @cyG_WRzqat%wҥK_$B}xLf<=ҜOV* ;sU|␑qYAڥ'xSjɠH-ڛc/IdYS#:CPgGwUJf~C)9S\kτgD$@xj}JٙU;驩OI^-wU2Z z(wrCFF5`:AG-ؘE)cii))pq:|OylO[ 6j OԒã7ccNC@̈́= .ӡf0t,;on6◚a#ןqԇ`FգcvNy;_ϲ= >5) &Pae7W_ܝ-pC0J2 wĈ{=J=|{T(k1^Xn3汏sJ38oVc$w~NNN CbW [SϺ'j.8 yszD@r-|oK'>)O~Te]F!;I1TO:$@cVAJωY :Zgq@֐b2/d2vP~%oW_}5Sos/ƪv pAX f͚ Jyin6p$0$fgw-O)/1,11ŔA%OW^Ž ^c1c^u90iH>cu+iR;w8cpEX@a#qb Cv'C2N:\~t̙>mغ?yݺEiGԦҥK.a>2b$MyTzDZ)c e1biIxк*SSu-#128c=6ެn_Ԇ{yZ9[qJL`09l;ݷ!`Z\(8Jwfg$m~+WV3i<(=MB>D;NlQg|'$i盎ey6M}:g'GOOoZiW=G6}d;@cCu트xB!'ЌIx$0*ү"mq B^nzʔ)x˜G1sڼ+/y{\N۶hvQO޻Iil2숁*U`G.ۜ}tr!Jn$!q`M׾!n`/cțz`0VEBgBuf/4\ ϚوII]9Q?*~ۊJ\Ek ,? UrJ,r4BUdFZ(W33No4 'ޯYw,!P8>Pu>kE`n2/yF(opD[TN5q{cA(Ƙ@LS$s f-ݲ {rY&'lm u̠nŅo9F:5S &<&--R Aۀ٦?9c߽{wH_̑}wgAaiٞf)Ls-(؝a݌K_yW_} h#s|N^+ئ7:袋.s 4믿^d :L١fӦM;v` <#kDM p'L?sРAlLTt7pÀZ#)[WW-K6'$66H@@4j ͘Rc9yׯ,i.͉Ɔ)sxcV8EjC@B䤴B85Ȯ:@ 0i;9Ceu3Bzӧ,ب ;xn޼>o~æ80d/Yj5k35cƌSBd0m-&el[mlKe[5P!e"F{ l٭!(ۛ#+=Йb7N>=YB&bUK5B.Grb3Vϩ*)չ)` $]LS^}QmɉrBŅ瑴']Y-11Ec֒6 ;^ݪ6+.)LLH 3LLN܉`rR?\UX@KVٴ}uvv)iʐPKKLIJ t== (QMsvJR&3{J:?SgyLq_rUXË 6rHUg*6l™9CD}NP4DQLjV+LCx&jQ`T5!^sE8mJaQaQY~ii ܏۲%'C޴M< YRWޙ_[ѾU'\xw;dwxW_9 Eӳ{|s?V x\&{ʐ}f/{K6=l̏'LysJn=f=Mk,a8tT{EkχZvڼ%ɉ;ޯˑ?rzf^OUӖxg;>Nr0w~ ٲԡUwV+)-+aVw]u`#)7e\^޽: __͢BAE~'y慻Z#uQPN8plݡp>}Ը?qL glVj;w|g3.Y-bߓ ׿nݺ\N@%6V4q|dC!p2x3J8[ Pq #0hzi+fvGI|4CPɍAʑql^XXXRR"C&ēL -ZjFeHE%~;Vx sգU. u?U炆'ZdVass_>߻wo6\6ge%bV\ZO7q~]FhYK'eB,rsjZSu9aUvY9u{ʦ-|KeBm歚.ڰuj$TMhaKQT(Θmh-KFS3|p#غT#@@a-#U0 U4/k4ʹ1e5j3mNl:q9a"} Ƥ1+sȄ4 `$*dBkRu& 0Pw?}#nLcX{Tr[Z1Mo=̴,SlRuZj?q?C3ykc-2X&<][z]LZo~!4`;os[PO_ҾU]GzPu'?I{d*|#غ]!O0}P{ǃ3<~oj: TjSci+ŭsM qhoYnVdkE'8C  ]}OӉXAiZV,U"ytc3RKi=PT4O~)RlQ M j I=&08;q>z^虉/۷{C/8T3h7׉j&BbBGߜvyMVky~JG4Ҳb]U!9}ZRZ'܀d˟/| ʩ%%< bx'R9?9Kuk;SKJJ wǀVoY{w)puYOkyg+7\[r: 7r<ݜ_5ݜ+6B*x꼠xWFjY[DBj`C1RZA'Ds𖞆l=frj3YVU_m-zEnsbۜ;C!p8#`{8~I[zUɩ8fV'V@}R]@@%umz[  eee"F˄ ڳu쨠 Puhy.׮U746Q M:4t ?P4oʠ6աOaU=%tzl.Gj42jڶ#4ٻ ^:qJD2cn8GLCNe=Y@ 368eUZh<%D>z(ڴi* ?^>=DVsckwF'i7RsWq<[JE=#Q6֫ !;u8@ =zYgDj.0$[ y\6{;jQNmEJ@qz> Sd&2殑u'lzwUi1?"iCTF~m,mײ eVfk/3YU sSY[|׿}:nߵ[raraUXi/z3QDLZ Ř!=o5*vҘr 7;0EwJSmp#=;v`!`f?fEN3o&8Pi36jElU<fU_K:C!p8#`>b)ٺi$Z6bq6xzNzh QM*یJuuTAAvB_C#0=j9v΍OXQP5 4<%={ >f_oӲeclkO)ބBT51٦OPR۔<جwAc뺶,f 4+Oӽ6SV{Z!'8C1#'\ znkp*2ZMh;x@ A`r'˼KF[͖SF8s 6l]ѧ05޸mʬ%lg+cvOuZuVOM'/]j̮\aRڭm+unAeL(Zgu9MIJ{;~;WHG?JkyDlk W`E'E xS³ӢXZV3'FkdF;BKhě_hzuh QM*یJu>j-[÷R0o؈O6WcMS f<뷊wwE7\mSr}]Uiə,rr.&85[tMy'v:A03"\ޝ~2Ub*L_][=mGޚ8a^Xٹ!}:З Ԍj$Nc TJ-_iRTe}*6}`͸ND2C!pSKf4`*mq-~8Nl Tڂdl CO]0b@]}.#_78y"{~r)[`ps_/bO/lNԳ~MLT[gl<M|<ꌑ3bOM_۷~2Q7r3l?*UήU*g|2 /}$ֿx^,cY` ѷl;1՞)1уg#dbQR+gpp غ:^T"'ͭ'lKW ,PiRlRuGdHg\ā9C *D"H<1_0RSi5xNmnEɘ*ϸ^#ء\2(Nzbm^^p5>4wөߔRj=/5S-t.DsW< >">PuWμUS/?y쭲\Jrug"war,Y~{_GJWo^xݟ+֙fW3MMFS3d+۝[/m_6mڤcVonہ95c&g1pC @x4dlBl}.Grƞj) PSƁ9C!p8xamU ^c8P=z=,RG/Dҹ0k<>BRo:Ic`~tѓ-Ln>[2ZfZ;ؿa_7 ߵMOgRulP{yq.T10PMaDhu([vz~]F\qU{c}ƈj+Ti3Ӳn;r#:h;^6 NL&xޑU-;h/A13Q9R__Q4!Y$'ji"P5@'4T'9_]9['!'|Ҫq? g+^P2]d;Tt׼udFA@l{k>hܸqŞ ܣzZWCk'R{w۶m-[2<}[mzi%PjosbkMSV*!̟?W:2rc…;u8+Vǟ̴io;>8DJ$lIxߗ.]F2Ӷmۤ%wB>vGY˔ !PZ"ޯ!0N"ی=l޼;0CޜEhSNRceaVlݘپU7[wXkK s nU=.0 ^H U xÜҲg׀c-[veizJjwrK.onlYwZ~~#VlKIJcqVݺэ3?~r8eź}#[b)|oN"Uc9F-rJC!p=b\2ĞXJڨD2z$RjXpCDl*@9#9Fͩfdd\0a:Q6Wc iތQ=_*Due=;'i,aPE%DN[U[Fr^On5MD7rC!p8è!:F_˯R=25A<ڐC`#غ~ \aqEF'`M.6HHƴh 9N6';ZqC!`"`{pɌ#fgEY~hlƑmڜՉy>PikQZ!p8_Ȼvh2sU{ǐե͉tesƏ@WJlƑ 'f99 8ЃMGO;C!}<3duesBix?6'=h0^ic[Q>S[͏_רs[_ fm C!pl~.+iGd=JA`$8`hP`>q$Q=׫x-!p84uB6m*, eƣr$cjoY`ynF5:!p89kȅWs'R %" !T"SԘGT%1 06j،D=Lq9C#DoV'[$1~HmL!}$cZ1M @j Z:T;@ غٵRg3E[6?wރy*a5ı9dL06mƁ@eqC!1'Q,k!#1= _@@ef_{6ϑcvJC!h81_C463) RfEvR0`sbK@@%Ul"qTj Q*M5jC!pyњ7d$` ɔ~Gیqe+hkI*-*mNvC!h8_#am/ralF6@O[R%qG$R0~~C!p4'B4@4&_Smԟq.v!|$-U[s'8@Bur`Rƙz9}6wRO[^^N ֣zه88)SOs*Np8؞/<}GBt֮t'~ˑd/҈h#=M{Nmxzѫ,|J"f2ƞvfҊ;C!p8L.H/,$|fFD39~TIRm۶͛7\rڵ;v((((,,,))!L] Z4m=SH: 7ѪU-Zdeeu9''M˖-)jibb;!p8AQʉ? "FTb_,ER|M6ڵ OIiy:rxBȡ"iXha8RRR$o!̮8ҫa!gS]hzӡC غs-\$u-%d6'R=+i ~L_BJJ{ׯYfݺuܹS8S -**,OqΧz &4',"-Y/ 7k֬M60z:uKKKC/~M`Et8@"`{/dC6'R=+i ~L_BZBu 6 }v.0q\If(`bSUȩo,>%QA oԅO򓊳J r4oޜwBAqcnL90Bt8Ɖ@P@"|V!IA jA(d/\e22ƍWZgaYTaHBҨȰ鋧mٲN:aT^ksqNC!PcGUN\2S-D As1L&&DO5PM^Z11}u[#O>zk;W!p85<@E0 !nAZ1W[s_}_ =ǐ4;8N̨ Y0d0lLd7a,G׮]{ a^Tۀ=pp:A@g V]4TR%gB@IDATnd5 Xt)Ho Ћ>E`L!NiU[OE_rF$m#6b mssst'$33yN9{C!ؿ8nZ@܂\$e!!yhT6ygϞB-vf$f n92 .$u4Jw jG8`!p8j r r U'lu$Ζ|S9ZTy7O,>:\B$$ֱ,⁔O޺Rd 8MY nqR j^G}:C|DJ#L+LGjZ͆s0-iJO>, h!`O80 '!i @51LuuKuUBR8fPʧ։։|֑pp 4")) aLUz׶mj4!p8!kŅEw$O$pC`mpIHa؜7S#<¨@H8,O>̛>|8;h߾}T8 بFKZ),*ՃhTn3V_Rϋ/& 7(KJy'7+V$ W;Tgͭ 5)/ C!1#)?tP$3cx2ԕ"KxRޣBEP ġTW9UA֦9裏3s$Ӄ/`x(ե]>IxH:a C@Auʕ:ьU!S!Xcǎ-d? 7|_T[qb4?%KxO<td dyI$>0ќH jJ`,URW̺ZTb#z"YW-5ebOyΝX“N:iw&M3;;VsC!p8"!2Jps<8n;i׮]dԉiyM,-3pL6CߟfԨQpO ~'Nȃ^5z9iѓ*H &"P`ϧJ2CuՏ ҜLlS T8W ^M AZ~R׬>qr>C!`C#y0m,doHfx@KNS0I6C{jsj de0TxgBEi(D>ŘOU5P{k] U>U1T T&،٣fKNŘ7}6mXo]ds fD΍'8@c@]n WŰG,H#XbbL+SXKxYtjN- {Fmlz(˜1cn20O #k"񩧦@Q$FM?4!RlDDZ*% ݻ?{.Kک7iOxa]ћT qC!p88ddԅ<\2mo$#+j8u"P{ʦd00-I>#J2)iQ ,=h]%b)VJS6JE^c3d{!0fxӍ,"'Tr]g8!p850ஹH"rHsH:/$ts$| 2.rKq+a?{oYUލz^{R ,[|Xb_}!j. C/Ha`agXn̹w3sY5rŨ63kehGhLl,Eŵ8[Qͦ}Fa0{fk96HD "P\OedcZg:I16U,]kF jC]k?c/RcdEVU2gtֆH)h}Z-9f BRUgDzD@fZ)bV۵d#<}hQ1jUKGi$@",*R[Tg}D K"%8p5Ȯv8.E:ZiPkFF`l*fnp[G#*)^u:KpfTFjZZFKJ#gZ(W\fFT$qBpVaGU=i'@"$0,"0T9tHdI2cQٹ}T;Qcp ]3OK{:g3:K%|̖e 60L3B#[ W8=[F ͨzv-`B$;FKubTvD51.ȌWTw3<]q3Q1h6G3D HE@u`0^2 e g*z6wmu]ޡUhFw_M8~'|mjkUgډ@"$@7zΒ=6d^o ]2eAfcc2(מ ^t Z9{BTI.1sJ3XO0mT)Cby3 jјrFQV_U18#wEƍC+Us׭yM=n,GK4D H9- P y74FQLҧ&uʘyi1BkE< UA:ǩU]U6R;nL@n;36xF@#8bD%x1ӪGQ [R$Ej9#O RXi>>ƭ۸{sjg8H4nՖUOYPXB$JV(&$<_V*Lu]FmcJ9V5pC3 9wi'(oD He̪caôBfGF"ړGƲhC3JKڕs]vȌȌYFt"z49CaF4 SP6$n̰]=*vqՖD;3t:]l pp|P_N&-?0j{Q9Qg mַ(oD H-}W.m!6S#:JP d)1#&m@06R7q"C͐]6jtv7&K'pٛo'~vPB%ף(R!lRWJl1D.=nQ|GhRAnn95!/DU33owQ.  k5`n3^(l% uk6HD 0 amg3*h#W4ׂzkي "7it\k^ο˿9Hao3jd,Hf@ ' b-2#a&R`;j GdS5 ~GJuUTO#F4ɵ8)n9o&L׾!2s}fU:E|[B 9H>MD X 2&O!qe1  ӆl nNkٚgaQB-LveKdt|v0vGP!ef2Cn1E(ma/q[ÖZ g|dFIk|]ViX/FiOf o{XZ0=@"$ `3r!- äayd6WGF[[+dp @G:8!{~pJ`/K!8Bͣ6s1v#P"_lw""נ.zɇ.?&#qb4;]f6&YSVqjC-&@",rR[A6` "Hk(bO1̆^Cxp\45V'C,W#TQT6ji}jYt!zZ %Pb~*b(/PaÎ qdW5Xn]+a~/~ՠJ#~4}6DVIv' &@",mPRgQ b3(ӘȬ 3"Q>JQzZnWgZg)=gh ƚ;a/d;"3lk;1ePj9#UP4e⩏vR4q c OWWTi@Md N]y$@"8 j)dހ@aD<@p))G yCיYVFyɚvI]ӝ0aPvZmy: z[voUDے.V/:x#@$`y{vqGR$@"0(cVT6*B!ʹnb-EZ5d& d4i$1w:y5t]_H5gr FfLJ3[j v SSbϹmt&@"C=@?&ӐML`gW j]@"2tl\{IlE2L ƾ."h R4)-lp_<#2+Jګ Ӫ,[C7wM΂d'oD HPdL5 B3̘t$cAf]q2GAf_RVף)J9m'M@LPbUJd&@",Z8:Kij4@& tj89^~ DBeCsU+[QUEV.+3l֭_c"eZ (+9X 8a7:[g!m9Cvmh!fD H!@m6;Ǟb4G}go*& ɼ GbRAf,C a"e3ڕw5T6ltVK/򠵾~?Ad&s @"$@ujO *NSX+֎Z?"RєL6O: |hY9[#D(J́0;ueV|}a̖T׊RzD H:#PFsXM3_̠4'N!9rCk XGABh|&5BZ&OlK,Npa&@rd5BJ2S BEfvi-wpH4;$PB պiD IszK\}Om*lewbb (?/v.ZBێ"*lUKyf}ת-r4@"$ @d [kN1'Af\ɌH V [M—Q윕mae;PvZ$>#[d}et* &@" & &Yׂ@9\!9dl7:术l&Yb9&R]QjkRhoU .67&`-fckUW֮"iwz[,X}odMAzK'2D H!`dje0YD҉DY1~Kvui~AdH |mDp[2Sz}@W?qfD HE@uY)TM?NYJ~6 JZꨲn1l3,)x'rp_Gè4U 6`mkFQ-7tX6|sԊE >+o$+:蠢WBJD H^!% l5dPk #ΜJ % )X: ď`2*lьsd &S-^K0:<"_Eߊ@"$TG xAwSaʌq"h-.vk9XPQU)(i.XF8b: !ę2(-ŠRԪ]3uZn"' X&= t.9zn:(zJ>Y,H s^g/9X*i$@"$ 8רBܒQdkFv1qEHQik&G2ؙ-ȌP3ى6t(>kCcEk]'@"$K-U0)kHTę[q5JGTc*UZ'v3e<}q\FPJOq)/.6 2lC!$]:/ z!?$WE{J~H6D HDtٍHPeB< ̺ݐO"Ȍ8bq< 2#G3Y0HD JZ7>ͥ/ȫbXF|j)|A64g5VH4#-9/Ip aVu!5FϣV|HjG{fqqN;QL]GŃڿ[(gȸjp\Hj_>V+{セivhKWh~LD H qHWnvzZE:_cwiR(?JZѣ%j47SACtEs #xs v}i~Sؚ&,:F=u53li]U/PD>ڈ4י`\~646~tR߬>9:b-=j2kuQ@HD ]-؎<$جJCfDK6ddو |I0#3D=]viחN5kLD H@;>W%+J{2u/n,.3|ed*R89mjK  %{{Z%{H[lٚ-xQ$@", 2V nlla" 'yl 3Gg0"c(_mI}iuzmtv.&@"$K-=RS,7d)(lԘ\B7 EHBEF!ЕLY%s7a=Uuv閽ʛә6~5u/HHZ$\-ICuqܦ_5j<:T:hjN{RpHp揤R KTZL4L:NQ vfy$8Uq2L"[6L8>SLD HAkxg6؅Zg,QXf[`3/Z(hD@bT q\ @"$@"0@Z7@fؾ#;Fib)R㤿H&VFƢYm2 2d[`E%- 50b ZEڶnRiG-hUoS>|&nd`S^hUukh%{pzJzmSuoИ );|ЮJR–R%Su]PT g46#@"$@P#NQ)R< dxmBiL,#9"LY^-[Y+"ßA!%])NF;5Oډ@"$R@uKDv,F %6T() 6jgہ2=xs+pÀKƎdV-sYsl\G>GٸFl&6~ 6&%uQ$~Wr^,y=IqIYhF,v+2ךhj4x<ӔBcCETHܺDK\Ur)}{oNI6)&@"$}@8b> )`gi|SnvZo8сԪǠ̠4D:SȌE;+ä7t#2G5gzD HT떞zi(5")(imfQĴt+D^f-" y֋4,=7ְr]GK4VS__ $z.VE\K!]mms<OO UU07/gHo=gI>P-w ;p)w2DR #2< C{>*F%&@"$)d6SɐbJD H mu!C+Mk='錎(:>Rdu;GLAB!Bd6LW m-"Rc\S&MdYwSU%+"kc^t -uOJ-ň%Czfk,,aZsz`#nU'yIW\7΂(YN#HD  CRV=BXcCi9H#=%`-BWdH DfDgf'Ub_6o2_pT?3%@"$0dXfЩuXD E lա]BPc&}lZ ֚*"H\钆TP1ãj_HD*.>mӒo? .N/?@ija6&HTÌ+Z#EMʉKh%oXhU8nEsyQ# R!q+LU"Qy!rQ]vemk|A ]V[B{\Qs4RbPy$ pZxZpFh?[#TkyY,/LxJf??$6A~@]}~ttx뮻n>/_hsMD H0G :2$ [a ɠXq-1dh3S=JVFFGn*)&Jc<;dF 2; UߣUXQ2/0ZfH7lF~?@녌?ֻ_ե}rO}5fºrG׷͚DF8{]Y<XHn"ozӛHHAp c/?Y~MGv1N VJСg=#(.{LHWa%8ZHb%L)Z#YZ4/p :u7nڭU{T %aۯA–S~$^q~~d!~kt4)l Dn95!#N=L3U=BSH#)ⱆi߫Ոj MD Hjo?hUFcS`i!b;RYnK 1N s芡s& (>#=؈=*rkȎ3(qm-ӮFgct&AD #Dsٓ_x'ts}~-wx%ch~Tvcϝ>㉏)FR'HTh^xO?,JN[M] 5,:yjUTE|Ƃ,o aat7[& jqv`)ajg ql~6^s |ζs !̜x<$fޖ Jgè(nK-ly$UP,(%[&LYC D[MD Hzi}C_~[DAf [[fJ0z5j)"1,ψoq9-j!1DU%h7Wxnʯ2.W76Y-v"0NcxUuߣ﷛'k\{k3_:?HT T91vXTWtH>U zHr)t&iFj*LYUklyYE V;oW@a{*dI1ѧ&j5?í <>~EyQ{f#q[mLyTj3G<yƸ!||nai*? (I#HD #tm5JZn+<Ui2~^|͍PeUײUucTi | XX'[-f (Wm=m-o_Wx.h{ _֛ 3ջzmFϜ4wKVMͩqD ޠ8 B}YE" 6Vu*Dcws&lMk96,"-gDte랪 5{A*䛞edHᔄWll(аvS aW ;2k;>c_*-T.4D H!`1QdN0n4x*mt3ڭn+{k8CdT: 2i.d&n3J&O< ˉ.j δ+o*eWxk|##WՄ?N/avԪ+a6sW\ik 1D`#jݐZ;84n{wԎ9o {cm$"νj'J֝S|+̢t+&mxv89ntnFcFgãn:[ˣmEW}oKRVxZ<)D HA@}{@ ;I7ZU<믗Ǚ9KhsȌ}ɋJՎ/څݮ \(D`0*̖tK T3wx:bJ0(h6k}ӳ{*n}0]}'|WglV͚Omp)c_< Gw&n<3y_斵˿eZMZ_EV;x?,HCdd2D>J!ޔKb%%,\Ȯ#FJ5mcә$@"$C?L 3f $X#\C-&en#.iE vhU >dҌGg0$E´sv3_%2{<~F2{0l?9/Uφ ܔ'?q҆RuyE+y[o>:uJ|_E~1r6}O9;D&MDk7Y"guzg桇Js&K#mt8*g$N65%ʏh+mtvӒѷM̓$@"$n%uoҭQʜk!2Bo":LET$ȌU]FjuvCNhWc;V4:[˦'do_r=gl}z GnWqrsnZ'}rk9fD>{bIvFcnK_%D썝 #@"0TX `"=S?wy?cQGjlVL,΄p-gj0+q0i6lحCی:dM\$=%BѡEZ=VW&@"$";nV'p~3dU0gn7Rɼx N0l],Z]` 3#@IDAT-i eqf< 8էZRotv^  &ٌuK#[S?Y?6붫wJWcٳnq>b˯u}U?|˿x/D9Si$@A պE)Nky睯Yg8Nəyuێn@3)m mLME`h#JF16[#fk͓D HD LV>QQS,vswm{XM[Fِ\1 x-2C[vz[hؘZKFy6:['HU?qo9'r[\ac~oNޚ^dƪͻFZyZ3Ȃq;l9ٳk96H j]~ /Swp84ĺֶbx?cjZ\!/"2XdLj܏-P@"$@"H0*Sa22ĀWT\(if11iSȌ$%' eWgrmӯ}w갹Ҟ/ws2:be4.gءږ9~zuٵ6]]Z("ԪM7dۥs§F:T+˄zA{~Ev9H~Cм馛~4\uU `Ѽq@W HR;2c2cD^*۴mx\cW,2fJf]V)^~ 2P"G`?[eq\_;b:^'`cl?5|u\}X_UUgډ@"[R-bPqqD_35miRk Gm!Y <ղVz#:wo66^vRܚ6k)L*7I lx[Z DDH\nfB G9>5}CxI}4^G,u$4D H> !M~x…^xw^z֯#3q-^=aaQ NkT?`Af(w&MBfx7bN]KJp"O5O-Q$@ϒ#^{e*{}Wë p]9T<#k~c$?A MI;H@u}- 648"Aav04\r\Vꪂ(8pMGfY-1nGP\kWTf1:I4뮻ؚdG=qtр2pwV>_|+ȗ@x衇,s~H;(""4D H>#`F 1mf0LD bӎA R9?o F6tS0ȌuvEC'RYL%4Ƶ46P(H#mO_f?OO~}y[gqZ3jUV:; li$}D ,6Е|o}[=H$ `9{ \eksYJ"™=/&gaeoSkI֜j.2# nh)쾻;D J}~G}{6r) ="M%Xvg1lG_Vkݻ*kfIAju5Q\KMozSlW1 ]6&bÚ !Ta6YdJ#!nXjY yXh*[zK :<ꨣR[?la"$K 7;cZk_0׎!.#]B^ϐm}ӸE 3" R ؘR{ZI;HeqߩߏjyV^rVݬGW-;~;t'D ,0K{jon_~_"&{Vc+G][TeA9Nud;K"uFMʣR/O-:_.Nq]E8}7 CF4/ΐT\uARP] O:jT$)%H~{~ha}Rl)~NjZ$mSAx6#BE<6z D|-r6O!v[p}=o*I38O|řF"$@"/8$rx 7`D6Ļ5:t=8xu_AjӨmVϚ>[hqCyjH_3%!njc忦]z]k[n}b3-_6V"Ǎ]/߶>0|TcFc̞sdnQm"@ պ,`Uct2H-+ m}|dͅ(;V“[–kZz~c_䁏5COyOnyVO I–%ijAFxr[VQTuZUN-0ziOV79Q[7kJaZ'8lqz*iB^':% #-OP zq+6L8>D HD`a0BIȌQ̸c5 3F4#TkdyeܔR5L01|kXɩQL#lg2#O~iXI~D`6ï~nʬGf?3y*+:fuaWg>'jsL*^}F|ht= Ϗ Rd%}G)(ȋ~ToibL(/QG? C_ew뭷ޠu9+JD Xj@HWdpl n"fn: (tђ hs>h2x"4"0jyƧ5gd96H T Ɍ(mw?1ś%L(Gvwy;/[)E>kXщ#ѯxP JmVA0Gwܚ[t#1hyD ƒo=kt(2Wo!1BhaE+_ =k8I/\)qN#tPu剖;"N;}HlRyMD H>}ї\rE]d<2ڪq02eCf-]w]A im,l,D HD`#0#G3;bL,wX5S_+qNJx־M7fI͢EMmmpgT:?KD*SnM AAmrr+ b%Efx]mXs<OO Uא ͽ~CrWVV;"ZI =T_hg&@"$xg q #j1|Ic1̥R^oe4F&ٸVzOND H@u4x\z͡j/ӗEPN:=9%hm9헴2H6Y_[wVUlV-t?2S굚l35diD$aCW\:Qb($@" 2]wiȌda2ijl N2(yAq_cYe{D HE@u wDn}9zҧr **tz KvS{Z``صWa҉/[o &Ng{M;-q@)C1yJՑ!Հ9Kb,|Z H]C6 }mRfD HE4>;Ȍy,쌭1Zh(wo1gx-"ml/Ff@"$bb1/Vdc;wf{c2vqvq[Nz` l]hkgͶT;%*tM!DZa];ġ*X$Hk:;xɖStģRs%3O8K6Ƶ8huFY'lrI~#1)?ѹe4@䄃^Oa3YPKWdMe]@"$K9̼o5DCfxg,kLji39CKdF)v-~;-[&@"$@uqGvay!8yi /l|VH4p][.T7Z[bVbCJe)eC~ܺ'=-|E.6Cv \/ ~?K|>"я?WV&:GiLuQyu2Vi[7D HD`q@do<;#2c3ҵ#3{srZk@?h\ua;g˧@"$R@uKǽTw$9 /9еka!#XžײOJ KKHD HH\9YVۙx3X1C!d1V!xc;G@";ǟwz+\||nj+ZgZ|4dqlEU|%P'4n#h]i@jX;Zk;*"/j!ĆS^IVBs GN쨢 viCrCcW5XU}+4G !Z'4;D HD`FHgsh`6J:xmnZ$jq0ZfK/>X33HE=Ee4HkUT([8 cOOYdgX*-4?ogp /ã:1F 7ܰ-*-U7ޖէv%yS12%@"$Kq21{:z?OXdg8#xvjD X3|f6Z ˪R[>0l^_/\|?,(e g~;Ic0/mzqlBfOD H%df7FC= .@f ֦dfZEfK1WWO%[,K;>}ԗXV_e9Kw$`ty+*!TyܵTug6}Fgdfs^xqq#Ǝ)MJcACfxvz 0fǵTqMGۇ;$ K;ο5fD HD 8UBfRV9lKcۑZ$72鐙8Sb!-9.$CSΘq7i\uVzM{kf?Xq[nܡ{WT1}d>z?pof>#oƬ|#WWf~?~>ʛzҴQ9~-gmn8^ϽdG>~ZfW>Z"o~ORpi|+w y [_P0 a_]:'Κܗ_ʊbm6 @uClxaf{ 7`EvyiG.qk[o4ovq]T޵`^nյ5Z9[˦'HD HyxM~^x)ˏ~Vq3"$Fo'=s_ gg~o%3 ?>ܩ/M\uZQAzI/_rM"O]g>O9m?1nlo3G'^sO+gvϚ'}Qx~oY2cӵuxr=QKƴzt1OMx-%AǏ´n*u1J{#v1sv-茀GH~/|f\r<wM7$9~oô gJu4D HD ~$qΧvdƎ/~z_F2#rԛAD`̙2~*ULgN:5<̚TWIzQ*笇ٕb,0O|UeXGy,mJuc̝ǿ>뱧fxV-UOC\[7?Z4 o}[^@ac,xvK+-NN Q0;]jе,$@"$K, b'xŏ~#diZHlEfp,hwvTRԚy!=H D{&r;ن=F__^KܔvV|;iY}Ffv-sV 9WxҹJu3nd3^G-ؾcknyur%AN%ح6^MgK]ahvO-7WVL8(^K]k~Wf<|KɓF պ!f {@,!^ؗ;㩧P41 6m^ݮ.1sgD HD lku2u23uTڽ46 "ǚD vi' .H]flK!ŽJW^v+}eʝClaذ9'?N:8#ȔU=a嗕g+=oE晏<F/sqɿ>U?|~r #yVz%x5 yx[?wۖ.^;kiZHn{QG9Pxyvlolw5~oz}hmܩ<^ĺm^rabIpO3[$@"Њ2c;㑙K.gmwʇN{iv^CL0R֘ TjFݻRiMsnDॗ^*;qј3fqFQV!П&GxfEˋlc7۰6ul=%bW ep;֍n0zWmob0x93nߔbxĊg%T.կ~mo{W\qW׿5kcN;d;IMS*v~{SJ{4Oۿ[.+-/m_'OL-} H*i$@"$K:ȌDǷwu|85~9[s5_kUVr6ii׆E{sg?Y( |LzcN:miXhBC`?+_eYcf2f[i.bBs^|Zu;ۚr2?P㇏5?n]x8gc޶9/Z$~jE@LQ$jIc6 Ŝ|IO3:ϐi35G(4)[6v.pŏ%pY/4 "!D HD`1DU&M+4(v;K$E?xE22S;Pꫯs=.=Rr@^A |a{=uPzu4Z8koG}GL/taӑyOv?|bXnl熏]u{]9nc?w<a7nMeh(?s҅vōxkӦW?xߒi/>Z|ْ% Gz;y_4i /@kSwmF9x(r-׸ UT Y-?aۥCW=2/Y|`]w}lV܌z#5' 1/믿&ӳ~ a@k@"$K3&}.;S I&&$m6уZ4j#BqY o/0Զvc!jPD?uv{m\pgB1kABC@{YSmd{GNDnJ9/u;# P3Iv > Hh6UsM7wQjgm6qD H!&p,:~ӟR:ݧ$7M3tbt3Ȍ&dvllm 7p63)mkVN. |i%yb|4zm#tdRs1A{1 ~>%AF`Ԫ+}ʪ'?q:|iSN;HuǎfϜw4oڥ=ݟx=/)?EirS2nIQ؉|Kw=ԧ+V|Qwf܎[ ;/G4㖻Ҷn[bUS5?lW_}+iy7zudkSƩ~ CΏy~ٱAef{q\ɴ3HL#9ٮқn)i״9EV{D3LaR+zj_uYh%~Km +jX;M.q UaT~w)2jn} 8> y$@" 1?~!~ViYjׁggg묳q'A'2B3XMoxѢ}2{TJKl# 2{]ٯ @`>z:OpQ6h/K4NV_ҹ3n}W)ig.Nw's:k ‘j/_rkћyߤG9~2v~{FXGWxOf=:MYz9/;~UߌIZcj-Χt4Wy4cfZsrF պ!f 2ܲf HR91Nd+Q 42nǴ*\;>*gSXn6@MaK y Hv CjEb +a1"fxXw2-%V`E@pz#NU|S*oN֛D HD`"`X'l&Ts?! (\kQؐT5 ݐ[7dF P*yΚ;-DtP;1 l-"Od藫"ZTs}٤O$k.;O2N7#d"чL=0@HBv~h\YQ箱Ű+ °6덋L̚ l e\H,3kͦ*"Bp9ph7c)9Ll30[EJa.ۄH}D H%땐p?#3su 31 HQBf blN2p7B5Xߘ'ۊ%g-G$[tha/l$XQE"&n5qĵHJ|F9v:Ru6EXM4k QvO3Ӟހj_ 3a S_Ksr6n9 Ԭw[ `Xl~=O22f |w?.p;mU^7mx)OzgMI]sSZ}Wet+;צW~a_omuA;6rijg&ֆ1vW_/5U0ngCq^tEd5YjW)t&B[o=a8b{9{*k IqͰ~bUFM{æ1 %ɇ+c%"(um5Z[ۚ" QY[vYP dqka]hIO"$@" XzOSs}Vj}lj ( 53䨵wAfbYk6h^% МeA=t~زJ Y 3 t%4,Ȧ n#5Ұ=ډHl:^_k9K)Zݍ7ޘ`juqitzi#_җ~ |zM09dY[Ғ}_>4cKf>ȫ=bm6#ƍmE}^{u'|B19{>?= ;%roM4x֣O3n-Fלs+g?WgZ$8rU,vkV z팻3E-Κ jiy!`"0&&;2LiڎA!y[hY9HRMfV#g7%h!gkL UVN7Kl}I|{ΰm-w2ϟ .F+h[] I~<@{|Fح vmDw?$@"t D$   '+ 1ZflФ$xq.0-sTxفz_f'+:JWAyUa͵|l%`QifEc2D{8 y&L*O`%ZLybrˌik 9|Ͻkﺍv!ojѨ҄0rPkMALNJʋݚ*4>u(glVUz^YRȱi㇯mpniA]-D#H*ƏVԟ%`}5|PEnfD HE ΃3h@w>/ghpyX /ZQfgɱR+hfX(%u,Š!Ʌq PTq뭷l2%q(M3M(mv-Y~ĆI`20` 5ӣ ?Yu"$@?";a E42No|-xdm yj{d v}Ș-7I??oj,ʘ?ӧ_rn1m"DmK㬒 g`\FWWdN#HD HDv |3;ڝQ eD:$.S8H3d~]8bBO|A<*^P9y Fi+|VAsyD$=IfCgZGt̙$@D`Xlyb/ɥ^jjF0x)`1JG~F̮;&g5Jus+~wjNאafULu(FF*t[^U %άb+6p$|Q0&cmU2s"$@"3v9\y\p%\C-&Nh,o!3?!KjjXVYk1E*`g22^ʰϊ5'|ߢjFzkg<(H\r$Xr?lFS+Ƅ/:{ǵM#xa]BL!p 6xbx씙QvCD HD XJ@<$UMl>c!xua Y7wGƛ'!KgmYvՋ m+/~ՖT[^$K(ynek/OS1ֲe [?cs> ~/_olVzD HD *>vigy3S dqG|Vq2IHFDe*ֺ못jUuETEdF6BNrryoZЌT&KVűZp3%H~d؄-xe˖G^;݋)? ƚqE]qJh\?}#K9헷{d>9  0٣ dh [nƌa5<~1*H7~x4>H?ϸi2L@os]v%P?y}3!S҇uq@|kذKN$@0}IګJ{M43V j:]ه:܊ o韘v1E ZWԉZE? 8W5g7ۛ-CGDH-hݿ#  5Ì>A>Hߪ*#`~7;SN]+8[(6AvrZF[@澳C%ń5G]b.y@@검q 4+6UL&DL[;-`E_tGdwKh]p=/z@$>8ay{e1a-NfdGcÛW""  PgSN:CޱufxjƫꐁuuFѺ:պ))>sG^۾Ÿk]# 01;}5&e UTL?C9at&ZZ..yE @%K@IDATwtXc5JUA@@ 8LK_SNq:݇Z7d~7}3. uUJ   J`ᗇ6B &ڬ!:s&`@ Bj`Kj@I@@@Ji{W4&rWCd( Z/IA@@ ,?nN~V2J# P!  PyS3_kp7  ZPnC@@#+mɁ̵JҰ   ZǛ   4M۪3m3I  P7]#  P9y+o{r! @Ѻw@@G`]gGlJ+{"{BGFuՈO  QS_K)M~~{A PD@@@J)3Qpw ս3TgncZpk דڂ[ v.o f`%)  P[rSR͘WxT~ v/LM1O坧H# PѺRYȬѹ]72*Yn1  n/+#Tgn_gU&?;[k7w u~aZ"/~"UF%  @HM:`v*_TE@  2}ɐRK:2%@@dX*ʙI@DJSbyk /UZ($  Pg[5 ݾ ]Ba@:(@>tn|vZ?pSƍRi}UZ9/bJ  ߖF:-" + @hMJ{@@@:R!    > @@@@:R!    > @@@@:R!    > @@@@:R!    > @@@@:R!    > @@@@:R!    > @@@@:R!    > @@@@:R!    > @@@@@kB@j@^^^BVGDW=.0PJf=w9۶uv.94osVx,9{fKc[ؒJ,eBn3"#M[IR!s8+cS$Xy盳gge$@@/Dp @E8.2U"E^ SygmC-zYgaNgulfegksraӸC{_lKSNH9q6ηڼw9)&r2mik䤎g@ֶl>gZ<Ѻ@  P_{A@_-u닧-2(T$n{5Q؜{*e7) Sv/:EN:ҞRbϝ:Z?g'>%s●  @M ZWӞAAi)Op}^^!59*cl m]ݺ="tp͎!xscO)1s{Co~,pڬ)+*]?P@@DJ @o۟iq/?uԓ;/!7o'eaUr]ۦ]6n+q{ang†4 )m)09ӕp~df~gNUմsl`e)D3|]j+ַ֙tb#?|w^jRRR*B   U+vDžϟR׾y4ؙPaBҞֹە1%Hlt|s阁7XѾl6WAsO[ zݱ2g\Ze**g'2Jh2Eoq%\G߿Цd(yv.r]J_mn-zg?PⱃLќ!.|yoWnqt柳rxo#?io6j?xл6f4ޚ棝7KR/Y6=u5 1e>UUmi6h|Poǡ;6Nw|mqz o=iMW[|Ɲq%o,X?M KI-~yn&];nl2_Fi@u@ZZrd≦{JKH# &uir/ ~8a4$$Ԣ56p}V\ ?iQ6}o -3{=Bฏh.QNxu;q*Wжެa-:S,^=I=kWmNAڳOhۣo^d gUI ܧO~K^:fI٢4lLѺ6}iN7飩mI%iMC6Y9kw.笕=s2z'eu8릚?ش^vO^7̮Cwߨ?5?*2 }>L1=[6ꪳ6L5z pR,i? }gSyyg@@ x ώ#TW;ϩOjhڛs&TC 3,-eDDx)vX[wf5Ee1 vma +xo]5ɤs9kVPۧ:sŇGulybOq2O=Bu_.}C9&mCuvx6Tg]3gK*7iNaTRP  @m ZW[, ge[yz_?nVW>g[RŤǴ'|gDf;1Ve$4J;[R05N?MNv;:ɤ7Vl=7Px\zmZwqla}/3+ϔͧue[41'e;uc>Bϋ&!f99 Sǯ:ؖ&3#;4oW}`KjǮz+Ԣx]?9CT̰Јθs/&͟zMn]p9kXkp5Ε[w;ܨ/f  ~5AZ;o9aDITŴuèߏzv4@l[6:[IzVjqM⿽=&?.ZHդ3\RttUꖵD¦ZΜnP{o#Lwk>oKMZ?}gaa?k&zr?k/ bzN\RրJg&mCz,3Hqlg?9T:oEK4SzW> Ma" zus U%o_zoǿӐ+%R7RkmE*ų^uZܭu=Ғj:}G=ŔaHc *֥U6Mڰ׬|ڼQ]*֯Yם)]Gڹz/;eؖ[4jo9yުiO쟫SٺqEqW(kҊFE Gڴ{l swtJ[IPz 4"ru&B  Rh]| 4t_P#LZA7 M^v-uU:gn{o֎-PMxJ4Xm`[OHڭOtnFY5ɏѺ'_5yQ-0nB}~0/vۘgp=[nBlN!-E!Rkv ZDO9J-Y2SLф9lްp/Ȣm17N-bf{h+h+XVB;sF@jѺZp5@ 4k98|82kVEz2sJ iJvB>]oS1 S'23`$ĚCpy  PkGˍ!TR@q[_<5;7Khj_oi*llnPN YkQez)&ZU4j?9`(>Ej8ޡ=KBCry36}jgo|j@3YM%5ACgb`њ w']5I78 W&.4ѪIg%q>B7UQ@@ xﳣ U+Isi0ٕ15#.:z<۶NSZٜnӪI'j ם mIfmQP ׫}X%L^5hghw) mz/}~v)؄?ӗ&_{ehdk+u 9{&7iQ1Z .uF*r9?N6%qF%6jӥ _:k֡]U:  PuDΖ@^@k-G>ryy_.yc޺9Ei3froCjF-ҪgƲ[<&iÜj&qLa)9 6+;cK<-N-. w{K>v}h "E_3am@ӗh;bm 2grk  @- ZW " PUO(ZWАPRt]Zڝ m %<l;^ULT#{أƱ:nGNWi Y}/_;5#x_ zlP2E<|"wА;..ըO!#[e%n{۳9n >x+:{JZC໕[gߵXV9@UU{_w.菫5I" Ѻ`}r@ ]EGy4wr W՞ W{jؠ&f֪9j^Zi[p3x7g lsWY}}N]`I>Zjacؕ4+=ѣg뚏Gu.EG&޻L%"vݝ[P~W>vջNhxl& @@Z#RZj7@I^I89+[jf?Ԏ +() G ~@e )HOբq{}z/Sw*̤>rtSY&~&::Ϛ*t>Ki9}/ G]_g _mߙ|}UӦI  >urG  nij60{ž̒i@@2X @@ y+jU[lڳԆ4ޟ35@@@  8!  h2b= hK!.xϲmO8!  @-`&l-{e tlr.4'[1"CkrgA/^΋zڸ{iBnmb]M6^pesSNJ7 LU\@I+@jѺZ@JKYi]D=u1׮]uH[kwg n@? $ P6CK}kT@h*6у7(zMkBug|  ZW-"1F|"8N@,Ы}RC@Lϒ@@@@c* K 55uԩyyyǏpy7oޱc)ܫWvڹb> 4%K ݻϷӧ>|X%8㌮]2  A!@.(Di֮]ƺt>T?ܹsM/!ZVSxF:Z3|5o߾~0 @@064 6lXzQ :4::Zwѯ2p  @- ZW * fjjP#:uTJ"##+T•!Cŋ:tԀ  P [}@~UVxǰa*ڡÇvi*3d (y}[r憅[999ZP7.5Lӗ]v%% S{3flݺ5##㩧2{Ι3G9rD9' iӦ͞=[ۿUf ܹ\Mۇ(kzRtjܸqz J+GѮM6?7V@lz$={۷WΚ5KWnK/JM[N#8 jJox ˓0\˖-^g}*ћJk^&XjD@9Djγ' &0MyALJ_1 N9u iscǎJ(f 4.>PƔ4_ʘ>*T#J,`Sm۶)_7nT֠'|bbR*HBߙ(rJϜ͕VH Sy)*ms%WҨ.2*>мm(4yd2ևƺ N\[jXLR$ˤ՟כvc05rҤIy4 *8vX*|U╊ڵKTSz_P& q.XWB#C}W\q0ggE${)bjuqOH  @3P6 mQtFSFlI(zL{r C uJPx~0:i 6Tgs|Nlٲ#T̙3mYJ꫊4LSJ9g]4}rPds6LkHvU}g^  Sϕ_KhLgB!NP ѮPͷ:cuա5(!  @  EW@J5)ӌ60Ct0@|A1/[vn CY5E6]nB75jOcteƣ4RC4}R(mt^Bp4poȑZO3M[ZhTfS5muֶW^kȩ3B|z+TBGڪ4R[z%|TOz'bKͳ/\ }z աjХzv=eJl'*k<۳$@@Q3 _y)wߙQLGigOLLG,*4 *xC'F3NMUAs9G i<ƨ瞫|=e4\LB_5{HT "k~ޥ~۾!ZqOBm]we{뭷)£fܼys" Ѻz^@7眡iWSKh[gm)Vr?ų?C2cےn 'َikrl>ܠA݅G6 I+Lx믿 7enF;6PK4Q ՙCj_h#V&}Lɨ( 49ݠ|\֩-iP :n6"bzf &:GiӅZNAO;ڴM'>XJy6ȨXoݕX!'(β6ZV$@@  Zn#M@+ٕ E32+%%E Y4|ɴp)5 r?d%&cЎ攢] UR֦+PІT3тkq*nyf}q6ZuΪVPdxTb֞ȋ/?9zǏwF)gBC졝dmrztBy#S@ 71iԕb4Hg̘afw:u :L1>gNKi7Ҏ;jڛoiLha&Mv(߹Ӆs:G}Kx+ZmZ0C4p}B'r);w)cPZ5nr5xTbHW'?c{ :tηYVU PG;HS%>mauV9Z@ I  Ѻ{dt@ Ihw5UK:iVi 9Fs 6 Y/9J04RGk5cW#Ȕ%W¹>cS,֬)vLb:5LuJQ(\ȗ:m[%6:^B k ٰ3|,۳g)f&=m4={Kשr;OH8{J}-Z1ql6ᜱ0$@@Kh]p=/z P%qhm:ujO/s]4Xg NT+ͳ!{KBMU !9WSP+ÚiCxSN;E'V ^)3Ԡ=4uG}嗶G}T=Wg쨺ɓ'? Pp3uK&4ٜ)X:ou&MG}d'>vT.&v@*gG͜9S: Ж .4i=YfiӦV15wҥ&SO7P3*UqN4vޣ  @p  Eo@Ds[n5Uk3LR9J ;\Ke40J49Nv2z)aMbȐ!fI1c ZLc|{GvcƌZȻ뮻LNA[OUL[UP%-V{G@!Djȃ ,jÆ ӔFcڵ g܃ DՏ6C7$>%ubsةkAK{]D H~W>0[4!ZWRfzm;9ba.Y_uٻ91ÈU౿iQӢuSNG{md:hh@!LX(R Pi s ^:Uylcn=mሞJC  6aIDAT 5Zh]~'*P!/OwTe͂wCmwƣo uYA~zFaSL1UmK/IEUՓ2U{NE\vՖ, ^ }s+߅wU77MS @(@ΏT(w,-{~sq][?1g ܰ5L-/9d6o#]1~ԫ4嫔~ڙko-FԣQe#wZYO͙&Z=۔/$I}M~ls&+ԘjCm4CDtjt F%$?>(1ow{#SgY7yM(owF:yڷ466M:c#T ~n{7NmmCM|=?zRѮ3e|dU|}{sN6߫ rk|*2km7i YF7]Gޘm76m.v~{KhHsUk0y&ylBo{R>{Ӳv#];XZQ#;XY 8IiӢ_% ˄h90{ˊ'a&<1CŶ /W/{ܰ-k.CFmX~0=mmտg{OM[67i'Lhe^JjM_%=7=j{ڔ>(zpCoyGÚ4߫K9T/2z{n}^~a'C{T 7'q{A:^ߵnZpI?TY%WKYҔo_ѽONӻ-P(]Ыޢon8 Sh?5 M@эТbJ"鐢XQb]TJL[c?r22w֟1}~5_V{ΐ¶>3?vVnK*6wY=ޤ9бP&?;m>O6Kgsoݝu;LGht2+_CE^m*jzy}E3;}114W4v]'e]Oe%A%5Haṵ­W &?4pyyP yl}OK>?)$KnIؽz> ,|\WvUa_᣶?MU'کܲ#o&&IJ[BM+/H_e΁4zFѺ{ܸD46-i J-O ]hCzjtIs[aT=v+V~wBj77[  P]e . Pt쁛@ɟ~c5Om6aCu6Gg$3MZ{Wo9sLd.71imO8CuΫҗx闝9Jkh S>6#py6'cM/5ʗC44t*r7l^~0ML7i ՝pvJz꜅gMPQhcPW(MVo/3Tg;ubv_tlMh|sS:ū4vz$*W77p  ZxsZDvk۹7hr)3bȕzYx3js0f楥Z̔is쩘5M4if`%!Q|^/"ULp^yKb adb;Ӟ R@ k U\M"WOs~WFtnkoR%Ol7 #K ݥك7kmiݜU@E&is<m`|Y`@iqlzϵ aoyq16?I6|~N;?rcE!n-c7] yaqWmEzh]qa\[$2I5m,X n[Lþlf ^J Y  pH -X/Yg~+fׅϾx.sVs4SO#9 {IW>;O-yɬ60ڑb( |Mm;:s[onnrە:T<ӯ4c &q|5+[كlzA3hɼs>zWUe-~[8b1c<ͣEi2^6zYb|[/&?/pҢ-52Dtjݿ *µزȴʛ[DTյJMh\`L-X]qEc9S/, )QshR׾Er4:ɭv+sVӜMks[m[*l5(|.|x(Tvv$hu׶c&ABbSfiZ7L;ND|ՒyۡIsgdjE§ROM|sSzE7 9B@ ZW=TH`t>ѹ= RJ|#lN_4$?I/)ڹOלCza \k6Gqêw|`9N;W>Ю&=(4ưGI硂q6bGZ }9E6us5 +)Z _ZrNn8[)5mLN%5*h6]>rS" t;ܴTV@S(A:ii[ Ӗ+:ks_m`Buj7^&{lf賜V,i{68c_JR5sLmx^6UNUjTj6@8uN,qɄtFbcMRvo=c6[(Ѧ/OSܓFR,WyRwjm $=>:|/E2UzS¿=/fUg]*=򹫺voKdG4ݛ|,| CuxZ_C\U TѺjħi@t6z"'H?j{!B)d/LM\ѺИ-=L>e+g pg()SPMuΒf;爨E=:ibW(滅a[gv#V$Pf4{AVqҖM-ڴd=9m,;^ОhkIS jep6L+c#BP-QgYgiEH3ڙ+e\Fv,Y{'{ZEQ\ғ-+T}|>ݵ:@-D [W/}zbMz52Z_yƏtI*[77a1g Nyn^fW@^uO @)fE}Ϙj*~t^`qLobn/?vSY&,0p=lNY ͚tm:Ped^OâAȠJWvi睆6-`=琷ΒvMAz^okKI;njeH!ORON +ĩLy DS2GiG,4 ѣomvQmo=)izsMF S>ѷj[3֩Bߺ'oz< ?yԁ=777zU|s+-)@>ugO @>K|cs[IǞ3HTM-`޼Q"}*'ks/7QiP]܅cǜ j5EyVs ׸P[2 M0ҵKBttȎml 4Й6y=;uwֈְӉ.gOa=kZe=h/T'4&(rx > 嶫 3U'lYMC.>Lpi:jhG^@zSmKiCu; ygl k=2Ӗ v֭ZL'K[Ex4aǻ jy~:qf`jF`v mPPN}l<Փfw< tUەNVi=YrdYsyի[Y!@)@.ڴ.ѳ2qѺE+9k#Qmh9GZʉhZ_z~Fb G0\ZΦ5Z5{I8N.j~ * b0Ennj.Lxf 85鎈B f)7,[[ܥy`Ut==١MaHz;.I?d4aM^JLښuhӑ]OYiP;Uš|73i3K^U2GTqY\zNux *r:g+;{IM\E{A26-[o艢um^uGo1bǗ2.`6oԔLW_u>KRe}KULW7J@@/P  PgM3t2m[(謦y dS(bs vh'JjYl#|SlDnbN#79$ݑW?,.jH7mlB_?];s=7mNKio՝J^tΎL;9hdyu'gCjm4yꬤ)3Ef|jq;B7sK|u-(f3+8at.-,} %{bh]ķj(`vRdɫJkf 4zB% ;sN gI)| xM|FHܓNz&-OĜ;Do[Ŋ}-z'ۨS(`uXE]KͭW*y<8@@Z[W-4 9\`^d~zƮ):Ъf_w`eʹ";~0?##cg 09J7:4g5p\PVzm#ʪ}~ܘaAqKʱ{*SsQ_?V f O³w`,cS{og,UCl\i =UѧGmZ#!3/CQŅHS i~>=ʸ9qc=AԴ0fbN=5U䜅]Dv\tZCo*߶"n[<~g?4XqLM-ykV(?$x /(LŴ"u,>[P,djٱWγmrUUTX{iBr'PYF*;b!ў1xBh0討bvՆ<ԗX(5S!Ž3S2Ώ-;|]|P~_9b&遝OJB*P"U Zj#E8ˈäyLY=/>n -|)@|j+4:o.l|[A+Z췷4xܑ׊'Ly#*_haɧ ߵ{OΰѾ* v uU{(+DkSnɴ{:s\~s%N.g δ}- .ڒUͶN@K D+WW۴@88ĉoţſnàzjZ +svr x/ueXnbREܫ2j/볷ֆZv]Fd禤fmޞq{~NNTnZK^mʹOkM V I+S4{̍۲wh2_ψv-˺DW{㪰.| w!M3l o}Z}(Pڊ7c#G5ګ`9Hm풽c6՜Ēc[%+PNIݮ:ͭ'E/S nֹQL'2;qwTέ]\B@#uYs @ N5F XmjI͂$Z睈   :@V y)inn)qbA@@| Z9E467?fTsvmն鳶H_A(HWU9 ! PѺ2`F@hL7cF}Bk@}@jUu+u~cd]k@/DH%V?ӽ奺Wk m@J":*VP1p `OJq1    ~c]T     ZW=E@@@@OMBSN ?~|DDD]{OKK۰asƍt\ooO~aygtտS  @D% MvZUrrrlbn ::cǎnF{9rȔ)SM۷Gbbbϟ/}w}ׯB@jѺZh1@@ hdիMF dJOOMN:vm䳵^jСCΝ+G~W?Y|  LuFMC V@_5y^vʍ!C]xCMs  @  ZW]ARUVوưaüE~~F6)2ZvMD].Xnnn3Km4;;emvS411Q(l2]>,k.͞L=)ׅ'o*%E@3a+*Fy@<"_ՠ>}Z >3޽{o~֬Yvf[q{kݺutdJsZRZ?$$YXa+W.X@LRmm۶ը^z~*m駟ǟ}&s .ܽ{[5zmcƌ|YYY*аa.Lcǎeݺw޳g3flݺ5##㩧2a{Ι3G+ƕӦM 3TЧiӦJ鶳M{/ә3gܹ Yfd%l=nYtO?$1ՠ9s6))oݿtTT` )Mo}C4S9()!g=tXqm۶NDӹsDt*լ6 zm1¹Dll74 /s6J@@U-5# V2h ]PM6|e ;J-)j;mVgSͩ`4ؼyW\a{>[o)YXE}(N)dn)ؠA[|'|bGW)b 3mM}S&SM+*̡:{l)sCnA1&{n;ki0IΚ-`ƍ׿{{B1,ݬg_؇_z:*.MI~_*lSh55 5F(ԇ"9~ _~ꩧL=ѣm)4ԠGiSdv-=ۻwP=DK43b0%DH`DD D i TP,"+WCXX!6~g͜s}^fkScG~eAP@P`PWk ( |9H, ndgN4D`MTW(:ujes`Z&Zb\FuUFuy)rՑddTWS^cqqݮg7ިQ]~EP gzqF ._JVcɓ':kJ'jҺl<{Q]\Ǚ-& fTϢE2ǼRG,( ( $ܺ`mVP`0)ʊ,7&I1ymdrS|t6V޽YTO[X} 얓ڈH|6mD5e}?dT\gB5#⊻;f1.[o7oyD-]aemٲhpq:11jaX2fb,lȺc2ƜZ1  (B/2<ɶ&9s t6vܾ};ϝ;GH51d]tGhe,tdy_1My|_ YpO=rW!tt7Rf- k,+ ( $`Z7* :m^{ak0PqXH@}u_} ) R$#7oVD{Ee\|g?[u6s?֑q_Í뮻vE>0I`(ZK8@DECZF79=ѯz\YD\SXo۶->+Q&Mc7\XGB lNH^|0OeLF-/f駟 @3:tMh^[L['{nޱc BfZGOؽ.f,tb_P@P@L놳eP@ҋ\&Ǚ(DG\mݺ5ӺUgܹ3'113jx@z,dQ1s@tEr0PR⧬Fo$VLdP#7־rS&y̨rM(eB+hnݞ27:ciJ25BIf!cZ"oIWAaYи5j[ܕ5ɘ( `Ì˦X{RS`AP@P`}C<6P@XaJY!;e妝t&&mvp#P4W Wbl)[ؤ]͍| søɚʪt6cN3kFoMe2S 1;kܚ6XO&Gy]P@P`uLVP@֗KɃ"2￙65j+eYh20щ:B,P&qP;q EmRVhsum\sN-4mrC~Z[n%?B0n\K^i2Ӭض'i}0OAاMkq5)\'?{Zo ( (JLV ( {Mp;eFk3eq~{g#|r,$eW^yO>ɬ.7deC|bՄxz7GSN)uĎlsݞڍKA/6qi^kwqjeE4 uLYa=,bޠ#qP@P@L( ("RLϩi5O3N~`q% d0KkgϞ$b7p8,):EȆny)r:?OnG8aVӫpy6;W(/Xʹ,#ͳ_Me"؊(c6;v! 3xn=ƪ^x:t=Q/]V5z3g⮏>j)Mܘp*Tz¿9=ܞX'NWqOEyeFYr'KjYºh'y7v\`\ޢ ( (P@P`%[l3+ {[kn$iz衇"KۗȿH^-| ! Tf[Ve|ܼysU? "Sr_߿ۗ[j,ewq[^m۶WA'EQ]'b]?u+ ړa7 [V@P@hL?* (<OW ]UO専nj%wȑn)d瞋֭[~P@X/Ob;X,|"KA{1v7X‘t8dAƍ'ChYɖs"b=Q?mEMwI ^}ոꫯ~ꩧvf|%Ak7.yW-,5WiбdRTgtZG;4R2 E믿ǃd2DO=z4fk.g6) ( (@[uh (k$+RC-Һgq iIDkZ t1o^|\JUfqO?'CjjaΞ/STR+xƂZyvNR߮!z&zR@P@֛iz{WP`Xys+Ms+XֺYΝ;M{kfc@.]^*@{KUh ( (З> (?$#~pXHo?cjmz |w+b> ( (0Ӻq'{ (p |8p H;{,3~. ̜ Lyx܋mW^Y3/"ŹA٣} ( (2@P@U\8s6Si)X꤈U{ nTyHg.خ.:D[GL 5amP@P@֭[P@P@P@P@XU naΪP@P@P@P@iݘޖ}U@P@P@P@[P@P@P@P@$`Z7e_P@P@P@P@0::P@P@P@P@1 ֍mWP@P@P@P@L~NP@P@P@P`Lucz[UP@P@P@PoӺ߯S@P@P@P@iݘޖ}U@P@P@P@[P@P@P@P@$`Z7e_P@P@P@P@0::P@P@P@P@1 ֍mWP@P@P@P@L~NP@P@P@P`Lucz[UP@P@P@PoӺ߯S@P@P@P@iݘޖ}U@P@P@P@[P@P@P@P@$`Z7e_P@P@P@P@0::P@P@P@P@1 ֍mWP@P@P@P@L~NP@P@P@P`Lucz[UP@P@P@PoӺ߯S@P@P@P@iݘޖ}U@P@P@P@[P@P@P@P@$`Z7e_P@P@P@P@0::P@P@P@P@1 ֍mWP@P@P@P@L~NP@P@P@P`Lucz[UP@P@P@PoӺ߯S@P@P@P@iݘޖ}U@P@P@P@[P@P@P@P@$`Z7e_P@P@P@P@0::P@P@P@P@1 ֍mWP@P@P@P@L~NP@P@P@P`Lucz[UP@P@P@PoӺ߯S@P@P@P@nAӠ[IENDB`barman-2.18/doc/images/barman-architecture-scenario2b.png0000644000621200062120000067664114172556763021611 0ustar 00000000000000PNG  IHDR^>sRGB pHYsgR iTXtXML:com.adobe.xmp 2 5 1 2 Ү$@IDATxTquwnT-Pŭ,.ݝE([JiK{̾!Xn>INNN>d;tC@@@@@(    &@@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@@@ *Dr$(    D8@@@@Ѻ ʁ    :@@@@"@.*Gr     @s@@@@ʑ    @@@(g߁i72[6n߳e羭;mߵhJ.[pr5(ߡaUKet:jI 6ۺ{ټcoB**Ro*Ѣr LF#:t(7@Xn˃楸\hū/֠Z%aN_}њ# ޱ"f"i=N|1nVje8eE\iW#rx6G7䷔ {7i/7N\azn߶k_+]P >rbڝuZt%,[u+l]\¼OڒS8xSV3;p0HvI{ˎ_;DL[ Zל {ԵikP3|AVq~J3cw+.깔 Ar^vnwRU8M3;sAYMY7&zey03wWOO{]O2o۵-b>9E@O}5olܶQfpEX>.X=]7 kU1.YW&f'?-yӗKhړ_օ_ҺF)kri/2߱g_mƭ}ݥfU3{ዜUHSm S.䔽 )Yq׌e]:rii޳oū+VT᝻ٲ{jr+ԨPk,֙F<(@.tv9G +\jbE?t]54Uw}v;̍ + ߺn9]Sv@3_9tisaK+Uab!qM΃Y _M}.YE=^}bUJ9ذS=ͽܕw9?*?aCjn* W׭\Ҟ@k>>=+_yS)+=w&A\ @.Dv!O #k5vmy羙m~C+~:mm~ر'{Jꂟ&};|~7n߳pv(q:u[ ?K'&OK +T.[[a1+} o5?j|'?-)Us*pMo3VH:ճDsIVlלȞ? .q77 .^Gw|}2ՔYf{n`@ax-*%kE5&<~R5舘kBe-\D$6۱io~YawLsZ>}yun5^WtO?>p=;=s@ P.S#(ʵNލ-gosWnUvݼcnY*)ƹûgvff=Q ZK*^KT)[46/aޮceZnHɤ rQ+.X:Uճr]gʍ;5u;VnڥW{vѼRe<:#~]|u[w;^X|GRWd敊N[:j?%Pl1RUV&yM[x[~CRE*tZY%kYE dwr%L.*f׷Hl ^1dmZ;n) 7f}oDќ꺫լXE2{q[mk6 jVlUH|z%}0g@L.WsWp~{[L[{(mHc_zv֭ç-loVۗͻL#@ Z7;/ =y3Ry{¯'Kܞqv༮TB"|~|ZU_};wj4 6dwьd9+У4o }Dj|kVzlp/5lo5{*箿￿8k;ͩYkXQ静3jњz]^7tmnMVKѺl<3f^`j^нn ȨiS]Ƕ6nۣv__8c*JܦݮZIAxbj3q"Ci?7iAm>'=Eb0oz~] 5a/fcӚWJcA1l49rvտgfTQuE cc*%r\}u{O^*>}1n_/i*m}^{R,g6;KmǢ8h^XJ`h\^03ѰZi*|rǻnxuk?'Z#yJ }`gM_bq:tf<~>XbQ׼?{ӚƭoM/dƻ#nY A~,6r5lTqdݎߙL9HC(ΰzwj13jʢM~/n=O}۹A1K)LUзICYQ~xF,z6'&6s}F!~v,F-gO}5 ;癯 P>]? 5VrHQK;gBE:E:9jUUO9Mx?>[i7^b{^2qPe se :urg.H(u6ړYӺSu>uށկ IR:O}9[w_~?W셫۫.ؼc3tf`H-`\:'GĒu;ѺDT;*{ >vQyʡ譽` Z4+"ii ,"hk_pj!&{oӻj{kHEcg;@[UO.%jƹBuu*e{oJVn= RGTp5kߩ(]SUsl<3ѓcf,W6|5޹_#e|TC++E=XvַYƣQ"C9C>_o8Qыe;wl;*snsn]Z=f콑_BˏRհ@~ِ2Ezkj0+]T{;tdJ$šLB~5 L]kb^uBqc-sթcN<"'] ӛ=_s>"wl`wSrZ> V{vS\OɅLtۡ7j"dЫG%2+ܲ3P^k- ٯVl-5a/l;>Js SekNZ&B>>C tjٗ=jAJ4~/Q |iUUVXgBVM$zP YHթ C~7P߰/Z5/WwdI/çIb˶|9w$D3L$ `io= ʔ.M8kNs:;i̬u|:'crѺ\yX٩<*.p[gmuͺ6M9tFtkkD NU(7B?r)Tg妖o 1E4Ň]:v9CY<3ʯV)H_i<3ŝPD3jo6jtoykbK>j m:E24kCz։skNz UU*|:(fj!h3ZI噡EM猚V1z]:72YK7|`=*!Յ!yg]49"@n??@N18¯y]s1ֺ_ҚvLhHV q?{-Qr2([MX ji >5 !}|޼5n!eN_V.w03*x)vfK^:pܲ;j|NByЅANI)Us'}D<3zF*q2|LJvg-ߪwdzTn EJ5dmj4`T**V \yuePַ'$(ij D[FoX/ߥq.M:61XN3)Z\u u{5Tw9 Ct?ay/fP?mѶ~yq ȧ_orZ*v1DqRzJW^j`FPkjQcGUE?i{j">SwҘY,H Yʠ^: Jvf-TN5[)۸Fi'/Մ/jVo?qyޑuKAʳWYS9=_[16_չ.R(vJc_@wk-#AZZ@eAOaKg%ʗ*|{f:5f4M[};w[{iFgT*PX/OaLK]Z)gb[4֩D_lrTmX|lů'\nUrDŽ<>n8=Tg'&h:W/zwT]z]GtMc*| b iz4ѭG;NŠ>sE;jVOeQܓ0䋃vԻSZWOݡ4KYCÍ*U,ֵ](UHnGudGM&7/V˟(ܱ3k lM;&/Էo`ӕd71Kcf3D܃bDS(^Kv)ZVQEviUa=q'.h3ykr TtR&4$̖zO_$YĕnmnxMhĆRqj6D  N@ )kWǎ :4?L՛vV=F^pOקfب=5stsWԎ;xG~t?)z :f*(9z¿o3SSկ{/4)Ljnȳ{JEoU9,Y( Po5fof) eiML:c@Нm^+jsn]W(|mK*hy۹+V@F6Cu /-TVǼE )>Z$Ƌ**̪ALWN(5;лL;4ӆ.))w6U,["=me`)DPh] EB  Q{m:ߌ:w|뜕[ٱtݎw`O0}Zw̆c)UO!Ѫ M~/6P ?e"ԽKjYVK{Pg}nl$Hw'$3}ީSB3tPآSVHӛwD8|/*P۲vَ+ڮzצY{tܻu˙CEX3Ѻ'a^tp5U'4T Ь&i nN|?ޚ\CS?F<.ݶtGm̍d kCɎrg؞^],[sZo~\br h됩L]5rZF_CW2>HT%1dY 4 YSArTr~:Y4F2wFUCi :gfOcTm7vn%:G0!"CCA{ip!UݟѺ žM_ؽ3Wu*;7 XCL^Ia>iYjޙ3a)>uGS@M34Zb}S\3m9ѺKn_4{ʫ\݃_4Vߜg~>n3i$mJ%[)42%QAP3^KD̬Y!N.^gcS\"uR߯sPJ1 }5ֶPAbV6.VS=w_٥-?!oS7ԟ }/4$9}Kmum)T@U5'ʼn'a.zCٹIn?"wJ1; 5~zCEOWͿfVgz~m5 M|9dj'E;~v)=ZTRƽ(9Y֗ -;q9OwqP;?zS +6٣y%K?hi*O|1b3YC#>]Ĺ'frѺ\v@P|zǬç+=Z{V$DC 6(CEYۺpqdGU޹i/_!s37Jb.7gӚ猲6D;3SߌnhqT?ŭ[gf:jiP9  Iosk$zchސ=e.6#{.ks?_V 'Į׾<b:s*tTk[0/g6v}ִ~Sdލ-c8s~a{Nc͙rۈkz(:ZkiB#~`'7Z?.umZѾ -b)W @%TKzUM#+C>j{]5m$XtQUl=Ϳeć_~\aRMu~t>}4?#8 l8g߮%:?gfnZ-7Ǧ騅rB;A 4Jir#`BwiNDu,nXQmV8k9V> lr/Jtkdh!4a^^<~뽴DԫPYnGx#sǾ- `DJvرd*̗SGwSݛU2 Ua>*Ό~+ޜ(̎*ԬX"n2 @n0=a}f##g1墳_FmsW it2YMnUNz475:h&tg9M(Tשqo9Ze8C/x[WY~=z3ۤg*x*Ǝ*3lߺ%]J9F̴*'N7~X]PQsVlK1g:jQPX0Z>=K :Ow2|R'uW5U>|<5Bޝsַ?w}BM[q?sQlzm7,(3*na&șQNvqo'Ss6լ9EBa1My(ʐTigzCTkㅔ-Pe^'j-6*Tmi~;_`6k:ʼn*u/4*V4jft~ڕL>즅u*sY֡zef,o}T̳5c+..xٳc }++kB7};ְ>2yPh]yqժJ uӮzq:l hT?1/+$}vc]fq}袣Nֻ]vO1ip(j/X Ld`r:k?o>{΢G֯ןrZ-!d ڢ[JJ̯;=8]䘯ͻt}/ӎci܏Qε^vl:+ʹMط6׼4;:ۺsϏM=g/OE=:װߘFe=tgg33} g]s<7S]/{/O4^O<}66w:j~ GBa֢0$XZ+DNTmL[ytn{g[C vb몞- E!kW5;Ɛ<ȃ?z5]䳙nP5ϲV&9 ,EJrWSv\NE9Vwjcs4u;ܳfg|:樳 {1؜bzYR`Z!oDqJӦ^9u?VBu~YxgZj:1 o*8u5UXS \k __uLPsכOkҲNej#jr igu=+ײ/ o7ȞTFUEu=Oz71A?nu]zS PTDeŽ@IDAT9ڡzg^wR#Éj-N]yF䅛zv ''ϖ RsOu Ժ[3JsO{ OLʤʠۉ+Oh`tsfzb*ޡ\ pahvPutU`%tSyRunITNfzhiekG.nRtBʯCTqs}rVMv]GCёqs>GIehcdP%2GIc#C6_/X}ش~3V?p9&e!Zz?yKUF \D|/Wx{¼ zcF{zì6>jk/}flBRӰK*!Q$7'gfrZjg:SR T,o[+( ղJuxkYҪ[а9-R:w[0Z *|/0ZԹI1-*CZKul0ehm\.^vv՟r*yp.SꪢѥQ+b\VTN=EreG翙[+syq8k.}ݣ3wKgȡtB'jRӺe]zFE\>SE4L#@ Z1;SpTNQ U0H+X zQqh9n}7z"~/+Ԧ^o~T-!5z S$~0O>͙uq77nz뭐ϨtEL&*jeCVSlUcԊim- yu9i g%_ Ehp3w 18W39%05>ݳYn7 ִJO\J-5:6&B8u쓗U5oJt U+_ܐ'6\f{u~^o>o,B![3!@PLڱszI}}SNhv[IMDujF"@.I@=,}%uʵQ*{ش}xKWu^fnO|1Wsz݊I>`Wo9-m`oUGױqL!moOr<} ǔqM=MV(k)NDLq[3kܺZKO/^>x̔Cx B;j)DB1 P&ZWLKrq&itm WS2n9o<ݱQu@})Jei3;xWW8׶WΚiH]?;sԗ9tU;joqNZ0#?^[B.M*ױMkITQIsȞLj3j^BrC60!>sR΅"=MhҾЦzZS=~dr[T7o:z#*籏\ںl+YӯǛS~eб `c>#+=j ŭԋwemܬҒ ̋.?=r9'ʌx[6sE>5F?kROu+h+{NwiY !!@\ QLeWvmjbqW'ꄆV@ ut{A:w"6؂NEC-*{D<|xLCg8 կOcl6TƻX&fXl;OkCQz7[m ܷs~sXz~' ڷwZSV?`_4C9yƺqeH(iy]FyO]5wŶ+_M)FVbZYOnSN5uX%ŏ7wiHxݎ;ڛ5n:nuz]VL-hKtMrk6ZIpV*٠jUK5Z*|C8R-a?U3ZoAR-=C umDgTv'a-+6R`]{,T " j}mlWmw|5oҘ-96_+*Rbc.N!*:H/s(# QԤOkwl~keP^-mP>?IPJWajܳMl-Y#[qzG7 BSY.xI $+&v=vnu]V|1 ,վr=}zBo7䋃~Mt@'/ܤݶ{R˪2% i+etb^ԧw?nQ3֎F?;M5ӷe~(cÊU^KI%x57ΑÏiHZh]t@Н6Mȁy>#[琷}Dnuzyfb93-Lyۮ}qPfqU ׶+_:匛{+J ,V0], Մ?*~KCARȶܧX"+Y4v)iӱZeJV㉲ 麡V:~qմ ȃie^,ΟaBE$1ir@Q"Ld,=J/ys㨹M>'Cb̰L d) Jٹ) H{Ɛ/)䇯+ן SgSa +:?[#w2~w(S@@@ W]GMk]u>쬏y+~9~{~lkjrlлghwQ:5@ "tS @@@ ܻ_7|uksWl=v1dlY̙M#)u:@@ydչ;X#9VlŸ'ӗ[1 9H9`QT@@%jQ[Gd _rɪw`M/5aRG7d}d@ P.2   Aຓݍ-;M^ۉ+N[$T /;uq]IDZh]C@@*p~M"{׭Y֕'4Ho Ѻ   @߾shSG]PteH> "@.[(   pDV#uLʩ[غ;,Z(@ {e"{:  i|vgo&xsȂaIB>tqKb묂 vui'%C@ :j& @ȟ?ߩkoo]UA-*#++Y :t(".q۞^B)^$@ȹ_qתVlܹjӮ[v7% +Y|"KnU\ErRr@:@@@@UQ&Bfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc    pX    @DBfc     ( [nVZzu۷W\f͚իWQF֭K(]e[~c7u (G&޳7E͊ p- Ǚ ^$#СCygo!r>~8paʖ-{嗟|SEr˔)Sȗ/W_}UX1l޸qʩ; MܹJ*~9X_츊^vegqFTsg޽;n -K*$q.]j/FÆ q^Μ8}7^r}^{53f08eʔiժU[liϙ @گsϢE%=O>䓃 r,zw]\scz~N9G>w}޳>Z7~g扑#Gvԩ}<(on4duqϩ{8'|yf 8on޼y$Hќ9sySڵk֬Y#<{ܳgc_)x #\Q!w+fO9C3}~oɒ%_~eҥ=c F -iV$4 }~o_czur_F2ĝ^ JX&M7o#7ݖi1*~{+Jxe9y5gH xD909k(-! MG@z3cǎ[p߮wJHy$nql&MsP+;qDCt[XU;=ϟٳgW>fYx{ת*ٕxܹPJ8j^gcG{f͚g;J.WO26m1'O[_nݺ"(07I 4  :m%QcY3RrB(~'p* ~[H,;7E3QlBQshssRl2'@ ْs:t; J~><#/StDTk5RWM4k57cg\Z%:(e&Ry33He>U^=vX+~L$wOL+>3a] # ;69 iug]Cq>9n. *U~W ݏRAro=wOFNb=Gh . uY @{] w_CԻn |hg:ui؜;ZW~ͱbՋcG 櫦ne]E[K%/GS_C]On]6?cgG<~/kcW/uGTQHW$_/-瞵~KI{'_L_ dnu$} n| HVL_awq-黦ۤ )SƝ>9ut8бT׺/1wWRk"`jnݺ]wnA4_G=ڗ{=GŪèVcR|vK.TH5pYwmM;ivFS4@cQ*QUJ%8~xw/WW_}mK pTr'5}]>XN*-Ҹl{n!:3Dsy+U';)V>IC:ЎitŸꪫ>jZDa>{_~+I,ݽ{#6Bd/&A&~sܹ)2_|0 O$Zeԩ]٫Gn=uRA+ɩ?l0eb]tүɩڻwo;Weܹgki]T\*Ġbw2}+uܹA+&kAza#\>]7okqg>Ct*kOvI(uӦ<=Cil7|{yuK曪nOtp|tj|R{2M[MĽ&タh+993zq(np?(oN]#V}Ut$ٳg͚53cu͛cޏky6&Uż2L+٢b$q=>K=MxΖߗ\WF8|{ru ~=oV~P7J>nO(3@v .yH@UT;Ƒ3gO^hrG^,fTQKH"z1Ǹ3]Pc@~~tc}g㏎6ZEó~u3 nѯ[gyF}F}j׿wr k׮^}s̷U~ }WѴ糓'ek8Lt/4:lLgtO?IïTҿI\TB9^)ӫ =v:vP_]x{@'ۤXQ$ZKwA^ʚcMhC=GPXGuGϹR:`=uE}sçj <Jz# tͳgvz6{/Մ[B/ҭQp?T֑,ar'IZXyt)>CegjZU!Ǒa%f;bz FbuLUt[l8y,Vöj;6gDi99^hP:u=CuQDm_~P}gȳ3olCCg/ߴ"ix1駟\ʯ9wGpD<󷲊%/;CqP5mr~'GFQTwΝJq׈8}j+32FǑGIadT#3)\V~< (,_Q/> 1%֕`@˲],HF, {cħ:M^NB"=韤PPf^ΜU}ﭶ{ lb^ cfR-5)ej[dF12csfԓuQweGZc@eԗdG-}Y&,0>ecGx8Lq^X=Fjh6 Z7&Y_a+txscns=9Se\ 4'm>ϗJ538rax)C&UC 6:Xf,,,k; X$a"W>#32@L-sħo5J<b1 5KC 9`UENպ2o|TbԳZ}>5>FԁKbT]a 5RXi};tOQWewB@9U,R6&%^,PL\gv\5 S {~-ٛI`ml&F|9/CvjNL2Mvje'Bh7gLyT7yΟ|zX_6q(j_Rqbb.eeT'ON?d. V^& 5|EٲeBS)ԙ(Q,n OYwN:; ]q1: #+sHG6Z M$u誟![}fYȸdajdNc>=;--{xnU{6cwN8a/O`PsbVnT&-sjM747_^};ZFdU]( ؅(̍q^ARm&l6rxqj~G?Ѭ$Ddoz"a_&X*J밝wޙı<QcHU8< B3)Mt!^2.x"(wQ<`!@]"ztFDm5RG)fY^C`1=-u8[".qǘ$wT{ȕB]WE65$GUacCh.[^_)ghFZfJ^cfgHl:'wr^uV^p>#b;UT,=yҕB=o]#?,=]y>֥Ziݡ|s~n.H <^;~ecDͭZGO#um1b 4V7tC÷P~?>eNR-D⍑RX{g9 ұ*.͟A|Y ڀ`W> ٫dlWJ㰌A.R#3)`Ƣ$g()>̨u[onG1eƔL/CF1$*'Me؃?MV,&4I'a6&&.yK[ס2RP&T eb뗧&Ou0f뢓VבKҞ/o_Pe%..X֤Gee6zkZϯ1{3H^Ī'q]\lu n |.\9{曗jo)^՗4{rrWfu ns'z4'B9[jTg[sMi(3 \8,T>wwU1zp0Z+JM![U朲cXZ<%-\֥{ħᛸej 3SD0"   lVu3)Ka]B+oV>cteh[^ݢ'ɕb E>uzbm^Jf25}$0TFyuC7x]m"XÍ;gb[v ϒutGG$1-zF`gb+պ(b_1)J)\.W?g,"f'i8˛*+ d#U}= $:]cE"Z6 T@lJL.&:tiXPXye¤*b*r _R.OBg$,60_<8V-m2j$=X6!R](0=gϪ6d ѫUΒ"䐩ĵ0fo&|.mJhY: ʟ{26bmO:'9V; mjd~bT!X&}U}ɮ}4q106l0pVϾiNvv!I3M 2%Y AYyvB Cѿh ^,zjBRqx>KaݐJ_IbU V8Mu<+@++#g W0ƇOj|.9()l0aɅY4dѓK>yɫ!I^ݢUK+z1vHL>0Iu!oFX }5KC 2պQnv`T K1s3}9D2V+'ξLSc3{lR ߂X.;!#lT?Շo8% P|x1@D6IafeORՂ1#+:QұYgG{ [<tRk>K<38% U qxZ}ݗ&:b(պl>sVdz0v.x=T?r*e*'zb#Py-NVj]_6~*s`C6B51{3)ٖSf5d{O8%U{9ɴZEٯHgΪ,3Oޱː`(U}IZܥBciv~a5Zk<*D1>IlVEeY!A&VJ#mŤRx*<ԙjsB܁SYaj EUC8m]/#ft2.x2,y2g}RϪZGj+S:iF]ǬW;Lyoo)E^Sϗ*oTtਾmmV\ȥZ0v$*f*! 04̛`-3aP>.>DzGNiB"ɕ$N?&U3Q.XՁQ3yaVSt^}ffT'( ^9+ Ɣ/6+n@lTpB Y/VGe1^tձ~9^ODwoij&fHGY/`u5VNJ2ɅqhV#,hJy=a莎3O.YB0J%&$f,L7GSix CU/vj]ht6(Fd`K-J[lR 6e2~}ɱ{UGU-JCr,?6`M>5.ۧ9YWZC5dҍ0d ](OvP!Ua}gb- S$ͺDΌg FVבL]Bg,i<IĊ:$"Y"x 1Ya_u܎JUNiF]KzYRC^1| eH<[{~K!ߢ*L͡ϗ2S]]0=8p'匫P5fَ"0w->̶Z`|cKo2kX@IDATZ?TҕaVxzXw*f6_(&0;Z&+ ¸6ν Da*g!hKB (.)/)'`'cx C 7g8c` _敍uO$WJ^mZ!{Qi{C)|y VOm2ӱp3]Y0,G d}<~`m6* #KJg+R sR. R(Yq&TIٰF^=ohBUW]>L<]9+cJ=3COCeol#R@nJ=7Vt2~+d>nEQ颥a#8Ldifu-V:ћ/vVҴyrd61SJ"pKIS~˭Jt!vHHi M['M?ݘzÿCN= h4yK)u(m b/ CLXA/]1'Q_y#ʒmuXcʐm>T^4$=CN|=R ECސTuҐĵ0fo&ܖ {`i֪S.:*:v{iRSȞtN⺨S+oX}LrWT={׻wߝ axn裏=|`!gJ ܤKog':@v*gjvKiH!߆&˞e!辰l wWٵjX|6 ^UP2Erd0b}/'aw)jMzfd85K>dڰyo)EOK+&kT~_V݌5v"yuK tM@ktFA,VZ\l;/'z(aS Wկ1٪aSZ5d8v!V\6m:ʘT v(Y'+a敭EGyiViFzˢA^+GJY]8̳K{*6bYmQ,RrY^o`1*ô!dFNҫY|>E/=ڐ7TTϒgFZ7lZ 0fo !|Epْm:IgiIm:RuLa]!>f?dj#q- ih6m^j,`i!25neq+DnַW?mPb\RRa}I]v* \V6AF_җ? oQ竷4fӓco_c@ב*1>VoZxܥIu56SϦ_baJSlgj1XT731`Uo u(pQ΄<$ V]uU–QXO.;u)ZnX/~B@o)=E6SA{4jളUژwy3UoY%0TFy5%P}F3մw_>)ӠbMg9RwI'jb5f+Ge\zbϪ40òl2)6~`%GxVe׃88>JKkH-GBc׾-`4ώ;s'zuĒ_Ala\aذLG]֋(g{KÎ:IgU ZZ撤avQT 7D,q F4H*ac]YFV6> ,@IHAvqW }m}ɱdvʲ ms¶0DϸK.ۍjrĿOs,V?2ArͶwǓ/Ku4.n3&E!B/KpS(-7 縑W)$Tz{z/)CVt2p(TX˞)m]V[~e`N6 US(f qyynD.$RRL1-+n~zAZӊVßgG'yDӼ"1c-5?iVSFgurl*T}vl5O소 ^ܶ"KQiXG:%+B^ U3HlCG 1Ef,,S8V^yLVG-7H#vf÷4JysFݫK>dݼtz?.^zxK\^h[vV㐃2b'pÏՖEl9WcxT&!|nI 'x{};N6dğbb!{l&gL:ӳ‘i5VvC6`el 6>1ܢ9;AC,>=ż\qC:OiMv #K%ӸsS`9#G/kak}H7+?.{iӗa2r`r:ו/| V@/&|g0D`du_ j]aT;մ~_P!X~x^tE(x;?_ym}C@a|-?c *q0@kW:2(F:bh2pU{{ʓbPck"!!(Bݳ'> ^xKY,$Շv/1!4[6(Yzɛ^:B+NŞ'pBDiTYehSr7뮻P,pȞWdyȊr6D5IBG1d6VurM)0U$AD]:K80oYIQʐlYۗ1M%jvQ T<,fD<[S o-,ç9y5?A;~G,mQ~ 2U3Wehg zfeG y].U@OwYt5\3ɽ4jy$[/[u1LpT͉_!F֥^ \%%ߦ壜^RzxV!}De/$B+#bI`VD<׿5 vh|Urςy($Z7ͫ)8̂  oT:;|N)2. CPƈ;w?L8B#(]JG>TOJX0dB&C(ޜc,N¿:k%0]e;'oЇ(ʩ21MS-qi8feԿ%`$~iҍ!ꫯ^OrWUm:I'3f-5i++gdUM-]\vqG5IB5Ӭؐ{XCK53}j:W <=oi>Ymj6/ ߰!pGyp̦KX.˱nRӜUo0!ӟFVxbL2(ў0&Fk[Ҟo zr5VcIt7ᠾ)}8FYSFm/G?mZJ٧/C,:atQ,p|Hao!pG𽺥ݰjٕ[ j5<ӫFsl@ZV) 4պ&l'TmҊ6ËTǸs\| `R)0U!wWk㐪|e}.g gZW/ia|_oˆ+ێҺwIz?) r[ke1KK˫Z+s S0fo& *G|hc]QMˤҳ ~XƧZ-:[ٽ:!Vujx/%dU2Y4xgV ik>gVUa]Iޙ'YOn<4-~NRzu$ yvt16y"S~|xhɫ{c˿ט\$Оv–0 eVu1/>񰽃1M3eҪ n yw"V~\m#VXiy’j|XK=qWUk֪`!(gV`8sX52 AycVaujV)rAO}SCDgfVLrj6_,jW3/b٢WPzN2UymcHyj=ͤ:[*kHG2V c?H g{9.W_z/YI^bƼW%X]iڇIr-fV i2jrEBm`Y(;x>V},p?%V D4c*:iYƈ7nSUUt҅̔F½;luX9>|Y*V%ߤP.硦ÿ=I/M܎.Է;!K0ёW LZ΄ͽ,XJK,& 7]zאUGcTb>'q0Eck-V7v9 G**( a !# }_.-lKkԀ JUb, .++}Zi@iôw A,$7 OO6-ד"0VCkn,|U,"`|b MN<%ߤPj[uÿݼ ͹K5ʭiŶ,[f{ٙg%~|ޜˈkw VːH`4 LQ\3%0/ 4!#㮍'=oC)ڤ= .Fvuo)< nCb4RCc!yʖC6~e54D4^V_~Yܤ%(0d/ &MAJc ɝf8Ǘ=?3, ôYNrOňk;CXOuE1[B辑`^ mc͇E4c6)b/OsnEU)Ӽ_z0a!/T3|%LZ=aO-6CE!x"K)iqw `*>YUW( HyOf >R0l #[ؑ/ezmmŐ$ ;I$  H@'pbd$0ggy)r}lͶ+n,*SxT,$  H@$О+r=daZ-P@`;W_}ugioZdEVj pZ5 H@$  H@$ >#u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3u}`W$  H@$  H` pZ5 H@$  H@$ >#Zg fq%  H@$  H@j7U$  H@$  H@3SYy-$  H / q88TN&?~[[{2cW_}m/4:y\νTj)V[|uXԇN<@*ʤ:ns{&^q0uf^vYv\{W/ (Yǝ~3j|j9fXs9l%9Y<1m?|M w>Iv孏rӋWɹ_!}d |Cʿrs7{L7 =<'f~jV]lwH:['l@O2AiӮ;Qkn|%Mڑ=(8lqz}+rx]O[b'{W^=.#'yq9gXb߷W^}grɍCԨ.4 zR30qSkp;/w͏p(Dfpzzf| 5Yw[f.1ϻ~٧_q>v++]s-4˪Ny2Q0\t'E3O? -NxFoaq1l)<S;L/.efvNQuH@$  2պQnv&wW+-:kJuώ4% ޑѽ>C8;>X??ma\1/Οoq|[=9kcE9>i(2( u߸8n G膇ι#)cԺ_~5`eWot1Llz(z>S.@g\}N ٣^ nM.˥R]@uv`{{8@p>ϱ▉i=޷g}=Gȋo8Ô7׉bm"2I#z%wG8_uqϐ{}SF%åwǂJ{WN{~/7Fg<{OHW(&3o/ Zw^+ؼw"-n H@$ $0gfmK&9_";5GTq.Դ6?6|ϏR] 9~ۂTz.(oxfUK=mxwrӵw>LX{loYA qkj ww }n|}ӵ㥗_+R]ԏvϘ r/Jui|fϥ.gpR]<{gxyQp T1?Mk?'~vu*>_g'ZeSC::ijH#ꖀ$  H@u,$  H``J{ {/zleO^g5&SC7gS {mV?x2u )?.[Sgo;" '1Wm/i1V+SM1xM {@ B0LCcno8OTO|z'bSl2K}bc6/0ݙ]&Gdda8_n*d1de>`HxXtɣO!: }vv}lʈe|3eVX}M'pr ^@,C¼N /s>4H 9:)^C!*g\p7M:O;Tw71!7SYj]c?^vO8>ZZ ['v)w^{ =8MW=A8^t:V0>m 2vb i7NSEQ$  H@(Pf! H@TSNyEu ϒuC8Ρlۯ/0KZiϖ(- dC0rG3[˸K-0S8*D>Mгp%^|kWВGuźGq>6A7vyϷƍng'4lL ܩn4n H@$ Q Z7 BF&fxe:V\t*`X)EӞ~~)+:ՔS~K#}O7il!m|*Գ 08$i⅗&CW.e,2O4cv5}װ\Z~cygJ%3ϿUŗ\8:{8^7#9fMRmڎX6IМi=3M?Mb^x)\i|ζ]B6(8M#iD$  H@@a(,$  H@$Vl>48K+:R`+j;-fcqPZg^IzE䳴.,l{g{bv熟>+؅sDK8vkW/Q3~? =c}7|fB po_–'Wiډ%hs1߬Xw?`2)sqaiGcc~߽]m0haj㷤3.+} YKӲۭr >|{7Y|&kaڎvg??zvQj}$I1 [S/ u 9ӟӬ{̃&Yt/чa0eސs{犫e&3X*zHtGGBl9[@IDAT'utԍӈ%  H@8պ1@OKg^;.)͒ ҄̇ X14R.,!ܳL}V@TƩξ;"#s{I, (Ju'~h8z'B@$·&̫ѣy7QtH@$  }uc,$ %֦nS.Sۄspޗ6{ʸ.7Gn֒g&"$#2 ?a;-]^_?!\<]u>r~r=4ؙor;WHw{,ۡ;.{aDO,-w|}Hpl悗}mtK\~V۬6C2=p%!bl?rQ 1WĜ̖ $;k+<ߌ"2 6 zг|g3Ԏmfkmp٭5fJ°"^Z4$  H@X Z7Z2H@ⰿ5ya2pϿ#T̗yVQ R~) H@$ 1K`OcL$Gf}/P<ݥZt7]uDk˺#ZkjĿc $  H@@F@.$  Haً- g,VcSC H@$  H`2$LɰѭdG>zɮVxdmopÉ{#L&?cL$0r9,BsX) ,j]͸[g6bm禟vagc]$h A_|׾1X0_>W-$  HTz$0 5Xc6LF`W"h7QCճ(=u]C1Kw_l.Mj5U@[HuGuT $  'cz(jd K@&R$  H@$  H@!Z7FbH@$  H@$  H ΄H@xgN=W_}uꩧe]f2{n{^xW6|?0i H_yb/ITJ@$  jp _ӟ-X瞟gO>dE:R?|^xf!={Mo袋H?<ӎ\^, H@$p&lC H@o_WHn6 T׳Lh ?dA:3S838cbI@$ %Z7 k$  Lj~e2](ł .袋Xfmװ.!1Bo|k s饗>c`C$  LT&V&k&᯿/k7s>,iWzҿ 6z{:Jaxk5Ia!)wUӼƷH5\l{n/#ξ>lyl&uSM5PoMR3$  H@@OT^zH@dN ,zt]vYS7df)1ylY;Dg)"/r-+K.Gp b,:Hc8ʏUKEYd% }Dÿ7tNH79&'8sXL !=q'7馛Rv뮻[o=<7fp-oYopoiRu+!63 +_:YgE|A2UIsQ L!ю:+}c5פ\p[EFsS Bp7IB{1,5%  H@zK@U>蠃G d#Z8O;4oHyw3cx|F6Ժ-M?9='O?z- %GQG%.BdXi}$E$BZ@ i1N8'; <ͨQBn֊됀$  H@9΄9, H`%@+ tq`Iui`$hR£D."T \1Tqq> nT(եg1*D > ҳ+-jE. TB~UT 8Ju10A( Ju1Y$'4x؝ZVH*Mi[dXQւTD!y~u)u gQEAC2dI4 ^E&[$  H9R$ 7D  DsO4'yvf:'m^xa/ST ty6AL2Y g1kuWDAFI8ɘW`1!)xbBlЛx<1C &r 0yI+X(hlML )w%\y啱cBbdGaj'nbH, <\sQT=1x@`AH31U1GM.Sr</#U) Wɰo9})vX>u0 `|dNo\ҩgf1sk(RSNS(RHtеW2`."n:|T{T]+6谰"D/Dǔz\DrB𤱶rK ID41nHJ)C$  H@ u#A4%  LRk!^r+=!)(GLRKUc:&35%L e}AO_c\g]&EXH{D~m o|9$P~ìa.F.3 ,GI njEI+$ä<[D֛o/ ޱ\BH4^Z.2]ԐnQBA%DWX5+ QʿH{?|ьպ6^0eɓO>9K!>evk?F >?]a͡rT B91~hti4܉@+ H@$0rTF)K@L `n+>#Z?qo}k#P?FCpH^RTX O4ZdDd`Bu/dJܑ'bJE`8P@1AcظC8IbHYu);)t4"c8DJW  aٮŒ1'@&d_$ e>4ATvxF' Rx-5lÍˀt5\$os 訨~x֋V)z] i']y!bT8.u H@$0rTF)K@L ,ZR,lٞBclcziO9jmS+QLTND,"!<әH,Ly,GbUAOa>䕨aID<1X8iCT6S-Nj]E:t*,懤I-rمL-Vb.sޚ%d*պpE`|id#Lo#cZG.MC09d" ӌq2G1! H@$0&~qMS&sUU!fQC$:,$[Z<< p_*ՅYl8rgg Np>ĕp0.OE뼚)3mZ^G9d-:k6H.Y{^,=2pH1{f[(}Bl#V?hfQ`K&X ,G)%!%;GФ0e%b9(?]K+RK_A\܎#̂q1lFpj°[ $VV#zvͼ->.!b15KfiF).kOTX 6wR_:FZ`qJuw]iL1Y1Y$  H@='Zs&( H@o`vߡߵbXqJvD9IfkbIEΰoԓ#MvmRHCc*°l88d(NE}'*F}3N/^QJg( ]\#10 pȬ["(OʊShynQ =.!6jlvmGӳFitPѻK8˘`lI z4cIpų:$  H@FjHP5M H@;ZNJZCuϾ bg. =FQv 6ťkuQ48t^xaL-Tk՟SO=brB,t?8c5\t.2ދ?OFw`6:yQO YT&`\46n@Dh!cKLM|,i,].kS9fR/w&-l ikvS!qE giօd!lf(0}db+ S$  H@"Z+# H@ gm( :%.`f8'bʺ뮛yaФWI'~h.$AJfTyH6blVqz,_ܪXZ(G!TgqFL*xbp[J 3y!P"q6Cb,ŽtQf?> YYt#D g9cBrέa7^T.Gbmv@4/}vַ0iḃyFFĚ?repʿ$  H@%.ij$0+AD.TnmWbvJ,HP]e$8$J487X.]0NKF)GUtCK]"/DD?aDr,ƌjf37wltVe Aco!tM6٤Mjd5+sQ8) N Jͤ:Ά IjQ+Ľ +떀$  H@9u#֔%  LЉ=]߆tA+R|p%BXT+_gu[oTDcQf&n1`4X<#-FbcOTA-ˆ#B\^{9Uؙm.6xw1CBHM >SAc!iby`C"2diրài̽t `+ %Z0&GHҩ"1N!~- "4Ppu! 6f6{ Mr>ĻG10{x-h?}ᰣ4NZe=%  H@zN`yI$0Fz_9䐣:jbL;3<3ԔmA֚ǏX3Ok4LZ`rd* '/ޘ/$5ĜN8!DAw}'Y m=4qP^Ԕ,㢎 c9& b"fDan)x$K\ Lw{|_}H YASV0FSBbQ(Uq~=[('MYM9&K` H6ov&򕯄~zBKO0$  ׭F "ZJ& ,\c+I`>Zi)- 23cH?_W[lV]uU"3NZ @1X8`PniK:I # H@$պ.E&϶eu| :t4C swoيvMwN%nhIu8nLtq o]>!@!r۷g8V) ! B@! yuy, }@͚5;vhO}=Q'B@! B@ o] 0 B@ҥׯqǥCMUUSB@! B@!P4hW=UEࢋ.?~|Jqա~J.B@! B" o]|)9]ti۶mn۶mAB@! B@! @.ӈʞo.b#nEh|5B@! B@!uYX%QD 8숤eꐠB@! B@!P(Y&9!A8뭷ÎHC=~kDGfvB@+~>r ze/ EcԨQ!6mZ(fI ! Ey+G8;xxxv˖-ꫯ^¡%,j! @vKwi{q[fܹ:|p{B@! e#Z'xbjլus]uE*!B#{b_! (8[Wpޅf"2@ҥ=sE'AAB@!o zر&Mb8v4B@uuh2B c$'&$dH! PydR3EF'O>}:G1֣G'N3gؠA}X~}:u/%Jฉ5k kJB@!PX(Sw&+k׮_~_r%:zM~7ryu%aÆrJ׮]s%o„ 3f`NrX+RJE-n=U[nݴi\ m&fӇ 'm$R ! A}2b XFʑPGu%qmjܸq~:RM=a8|m[Qe]F'y ^dpԚ5k1}4 JM! @G@޺4 #@]_|5v=i\Ii.]{qh#j! g殿~_otRM֭K:On.?xСA͠wۆ \m6dWB/ e9dj˧i*}4 JM! @G@޺4l#ѧDkOP].oSSb/ӹk8KqS_ݶȺB@!Kׯ瞣>:jmdD{[eM651zSLy(w/is]w݅e^zMdѣ.% O>}v2i87uٶm4'f&B w2" B3T)|?D)btZxJ,)Ld[&_B@!cS&XejԨA2kx4orF5SO=v5i*9X7tC<=eh:*xD@rS.R zo'B@ P#Vtɋ9O?4{.5oܕ^xL6ͻu5xG{V.MH +T\ sNG7: {?g}۝k׺~^@gۦAP1 //:/Zϝ;N#?Jhp<ӈ&] a287խ[N8!蠙RM B@! ~P{<=NqCr=5L+"mg{zꩶmejp?…f+G$~yf˖-ˢ̕eϏ{kǎӧO'uٲe4eʔyٻцcokm!8uTx *ضVṬp<>3K4e׮]WX7b Љ+B@"uEG8,Y݊ТE ~zʅ:>׿[(fI ! @C{9lғI2%x/~1[RӱcǨfPU0+·Yy8Lf'||駟>q|N$B@! <ʄ-x= >תU˓^ 7[ly!Y0^Wo P! 2@Ճ6޺5kXM:uD,CeUor7oޜ)kva&O_z%2^q0i#.,,y ! ==PKW'ٹR 6r~-=Smw5B@WʣfEaӦMz/ӗ ]u]~嗔tڜu%h-&h0 yqxlkB@! 2?Dw矧4AB8B@!PPl]{q6@;vc'On۶{y] ! >@RJy $v3f{DC DZ|Yɔt%Rt6_$K7rڵQpÆ VN?ݶm9s8I8L)ɅB@Žb ҥK)V3~{B'1vޭnH:Ws޽{7?H2⷇h#B@l!I1 %/索*s2c׾ϭW^yU;aa?կWވ#p&FW?쳮2m-*m0gpI,7#38pQl5B@+8B3 \r[N k׮uߡC#|gEoZ-ZtQGfLX~OݻGwq ={6λ"z!  7pK5˄-[l>}ajEX[kae&λcyy:=z$_~yt? y_jӟϕlgä4Xn݊F׷o_ |cKKUߥ⌒]!  ,ʄ-F!n߾kY971oc9vFruHxر#Ö+WnVni̙P-… *4t&$GuӦMaxE =B@d zyW=ǹ k^z9evڥKvv&Mbڵ+jCw}s77{͛7w ñ`ël?6lk, 8ilj'%ʏB{w}zu6 7n$ףcB#&TŗdRJ~_7_w}'8EY/B@Y*hDժUPluA4h`{M2Җ r ?EB 1-hRx^t.a~wws9;ܝx( ! @(S ;:A u5r1cgsĬ.{YF]./AeoVun6.>e3oȑ 64[ϣ*ǵ9C;0w3a*r 9.!?c˖K P>cPbO?h;N5!/3!bzd! (( -Oկ~K.ԪKr .lAp|Ԁ$bnmj zq/_|7֣Wx̅B` puYWܦN,誣 kEwYgqjD3'[oMS5p\s}чv 6 s,r*z #2A"ɭ6dCfpE_0u%18&z"! (̅Ajlǡ톩G3k_2t*UNjʂw <^xᅤVqϑ3a~@sye / B@ Lѷ`w-1eF:wGqDP2v׺ ,@̒mJ5إc[僽 + K8H瞋T>I B o]Ax C(_<{gqF4$Ç7o܎MIQ0`ÆDFl^VF &@|ڧz ԖCSpGT5§M1y[riq9ԄB!@XVM J`8,N|Sԟ {-Pb,j)8RWJ[*Vnmvu&:Dխ4H5kV穹0ӆu,f3KН8;r {79/$5+>IB"uhbi!|OG=,-YSdKwƯ5kv.WR(S* MQ8{Dϯ2  AW\qEFA1诵W! (tpBE ۽{;tt2|9}UXM ҏ1C!NXlʙ2%8%!5xw&Ѕ]|H~zMiH .c^ᜐ#oLahB@Bui1^s *Oۻj{ソ<ɏ(^LMEB g3 #EsLbuĉ&̟WfSvIGdS}I҈̤tB (GveVȜd;[N#(݊gWv˖-=IA$$5yK#r0UG"T!!tO B. ]uUڵ# . c ! sBf8MF0f2HyDpJBf82+Qփlmr0ٵiFd&1ɅB .ﰕ92OV=J:qlM2^CۆZBH5"]=蠃jժ$NQv䫯 >,; 6c}v[BZ"DLB! B  Yv-UwǍGb@fLJr-zlܸ1QrnݺըQ&l2K0"5y̐ɿ\rɒݔλB@d:QV /:vHaak_~Ѫ؞N_Bm0Za7frSJ-~֏[sk׮̙C]!AdȞ).PGM(SWBB@!P0uAfg { -ƺuw_R8PŃ̐i%G~"3.\j ! @!uy, 2R>}JsСCijTf&P[FHlhz,'?c͸Za~_pJ I1I"Bx"S#ݕӁ!3RGCx]p\>>}ԩS9mg'4hdITM! E@޺)kL1]E:,PPt髯o[f>%Δ9s&7nLHa*75=Nx4t#ӦM#!"2  e/M4!(Ą'-! A S&#)hFb(|HdѶmVZXg\qyfR 3)7 }0ݛwXRB (6>EiӦn`ܹmgAMEydzKҨj Ag{.V^pM܎RS[-!x:':><LԸحL6LP} D怃1{H1(ԗP! @1A SgTp+N>J+@IDATFx]!@ɪ$&;3mСVZkb ۷òK/y䑌8 b ! @Pl]1(#偿!%fZFz-2; |fIѨrR8H JcǎfteMoCD47I.B! xO>om6@fk$9SNiذaB[Јr`)3K:&٠2rH ԅl=^s5(Lqyy E!l8o>˗/ǃ8cJ-ޚ^RF/S $38Du|Ʋr/ф?B.[JWAm! Bx"@uz)uׯQ NDLo!&A2cR^ 9.2ȽM-! 9@@޺.B!6{;v!K1CoP] ! EҥK_|Eʒ@f &-a _>CfQVR۰%$$\ uE&L@f0'D9N;裏f&VycR! @vPݺ"&}!P ( 8ڵkG:j̣v2#Ƈ(B@" o]vO/lذaC a:ͪ.ݻwg _q1>yTߕ1fϙjz8ݺu,vt>Fpf%3;Rjg\6\<(8q" /Nȍ7sv-NLm! B `NXf dO?a{C# @fӠAv OQ2':@ŏ2c|Tq:t 1_ƒB@y\4  :RE^xt8H7{{ͅ) w=slɣH 좓;k֬ &xz̜]ήx7Pi{:B@!  8A ^裏2OdzYOH ww7[2<:|vSb<03oB@!E@޺(&|FKʣk!P[$c:aZdXP$jKH|PLO]zC ]vm,W ؙ/<e ^ ko޼y^zK! c[(6i;n8-!u¸Ϣ,-[īE;[y 8z7<,Dʜ?#;:8(gڴi$դwnenY!xjf$9|wאN¡8ϗNO! B pCf>Uw9W5=qWжm[ dsR -Oٻv [QI2 QfFl 3HKA} Bx"u 5ƍ'Mc osT1c"qPiKSD!a#>47C`!1[=e~% .KиNܹs "5%`hc!B@X G2ՙ2b 3զMt%R6Fdr{ 3g(DƑ S̛7}N0Շ>ȺB@b@Ww@+ _{^xaц%3 U7`S;;`.AyPTF3q9[cԨQd552;wܽ{뮻%?]5B@FӬYn>1̄>ùov̥Añf͚~2tL|SO=ꫯ*!yϻ֓ P}WyNL3qd 1G<U]pKg F'Bc9z+*t-+q5]FΆ<m 6p^[P>qԬUPC!  X F<… 9`8QX!0x] ۍ0PRG-И~{@<RB@+X@pq+.ӦMX # [nƍW[CQu^GXxVF'%:q}g'x)pV8=qs԰[.i.lc{9_~e% -B@\"@.'Nm.QՅ_p147) 6  tP[$A]yHPg9M#N9q%K;q4[ٔ>38 au\! @EWŋoFOn1GSO=4|IlɃAa܈yg6nqrৃ1d⥝`#ؾ}{^ ;)E! ([WH_]0|g ×G)apWN ^D]ԅ$mлF̖d&1.aK X 9]]B@%Iq>&C#yB>&:iӦ UpxI8\wM;@d3`/D>B{ 3UVEnq.B@!PPl]|)RaB.6m0*!w";:*l8B:FQw+( # q I駟.Z 1 {[ 1n0NI ! E &瓾$3DѳJKpPAyP'ϖ(/gʻ4:9S2/AvlCjBffd.Lk\C?0)qo'N̷_W]u߇dMB# o]GaEjN<i s&P(D:vH+]xXSu ʃBDQE3x+(̈W7t= IJD;R7v e/믿K.5PҥB@yAfp})Wd̡ 1"w+7}!hd N 9!3DM:u֬Y9Ró`/!3O&,>+2ӹsgO5xGv~2[nȽAYB _Й-4iF"τb #zjժQ]\*kкVfP ]knq0#FP#Ұl#8a#:r*8/תUmO~~̘1h#q;iw[IItK.F-BH"@dЊ+^{?2C~ OJ{l˖-[ (YBRKfܻV9h|36W^׮]Ij>g+/7?O*[Ȍ{F.Wնr2b9SF,m,s\uʔqB`# o>\z`EmLix4Iq߂ڪ.'w b5mۀvAT.YΥY3/2eP*1T҈blq?/qjk: $B@!P4`L\;Ȍ|v.V9nis̱ټh&e`擳Qfز~hR%hP\jeދ->SLYq;oQ\5+kPT:|֯}{/l|Hu+_Nxn% 5+Ԧ~@)C}7وf3M0^4|Y2x)9_)3cܝWvc$(45XLfҖ#akۼP^[o /_wWfMhQC! @F2}!zOG9"N=T*E1v^AyP~P)AGHiO(V?֔_ {a{T$mݐ_ɍ%R78Iac;#c|y~o5=o}ӕq]J<^ |Fۃ+)[w~ޢCkOvej;So㐩-GIs;^\طO¬+ܘ_i{ӖRrȝU祇vlT%~Y;I߽vG@޺"EJ|r8J,`N]Pv2ͮ&"BkgP }aV7ֈUc\\6┍iKWZwXOPIq%8;ΠXkwP~s=0]AZ]5B@".+K!X >K'dQFu.+wF *[a܈yg18ys4-p[Mw!&A+ܒ:c ' 6м.c4B6! ~l;'5~ӣ1a';NAL |w?ߚgϥQҴxk7s.Uؽg=n{7W< a1D@?K#hdGzf̘1Tu!o->ZucftY\ߔr匬Q4|6|\qޱ#וDg8/\~P9FwV%B@٬2ڗk0d#X 3u!Wy淇SH>t=t⡉o?TIܰ߆u>}:t=I& U88B+]%,4mtYyPTv5ӱùB׈5j)[l-f& >V?elv>V %QZ6Çϙ3D1!-}W]uU xQ}IB@kP;k+]2CfX\][v*(Aa+ùBQ+Ɉ;g;bP T]|}{2C;=#)fak'ݬ5}s<2FMfiH1J6;I3猌JYC*SpAƇ]L!z뭣S#^ƮvSuJvwLZd{\SpH2 .A v[䞳f2rɋ:Ȑ0fYf]0%hviW[9~=%7'3{= >wy_>klŊqq-"Ȏ3$ر1|H65kdHv& nB@ հ=s8P5W]tMsc!( 5jJԙ]]Z aZS^q0β'l˨WYpo3IӲ;3]zݻN(mzj6yq2C>_Lٹ XRr` of޵̻yzlńC.v1ˮ>Am5_ ՛lf32F|w;O2.t3\<`1޺I <܆Uxgܺ-{%x&,ɑVu+j@Q31n{nmޏɼ4n9Oڍ=Y'r,AnwWvw\Y?ue8g|\>A#@ַx>ZCT_&!ʝؓ$ ꒼(,FO frԬRF/3asK[E·K*^9-S$#74"~P)1dkB@;̉8v.Ip%B@!/ lȑ?!3* 鮸;8Ȍ묱 ʣf)Aalv-)7=#OǬ*' WHatԣɳtMj)TCx]2OI>X}g֡9Nֿ7:ۡM^vLKxoM^mpxj+kִVmzO5<rd^įw p䐏pi,9oϷR%+X(N9bCx@#u scq+w #IקmM0{|"U݊Gѿc-smn5b5klwꯈशQ<xw=^ʱsܹs۾}{j۵kדN: JC;(tfȖrTbNJeZM{Z譨 Rn6, ͚5#Ē!G*c˖5O 2N^LTM! x ˃>`R;KNɳc9x:!qӠY4beWbƚ{+ߔLפK:ֲ>UrBh'M n@b& $w9N(;@Y A0shn\W.,+ ɋc)c*ocR>1w^d:ot.:}YC%([l [He59,^rG޲Qh ;]rt>cVqm݈Jy%Άqvw7;O/ՖhO;v`U_#7]SZ}dC#߲=:vj:*,?L WTla+o]}ɡ)pY'qƄlbRۨQ#h](M<,%4mDߎOбrLƭӶ2wf !\JqE|pۑ0޲) P|pؑ=ԥK |5B@3qzXj$WiժG-QnT]+ LGzM23ə2CoZv*jfK9nh;g2ޥU3 CfQ&I _$d!AvE5y+!KgJl:[7'p~`+C2pFuMֵYUf& Hl1VَE/݀ [](GfMkȜDe5@ƇB̭k\Wm,oEl]<{:*i[|K5dF_dZgo1:/jѧ!M^{/ӷ~R9S[ߊ ;>noXe By7n/Mco8bƬu" ]_⃀u]I"?~5DBI]DADsc=nݺ &*hiɃ3S6r)kXe+3Zv8w #Z}k!Me6C%񛇭f~uQ}裏(ؼj*I)Zd )HVB@C2dgy#Xpl%H: vM#3a噇7pns,ÜYI 2Wk ]ا1IO\զƿޚ옥撌ݳ̚^qQK6qQ OoQrj_vo}|V;0noM ^<3kgM-d}'멋 H-;ƍ $ t W*BwؔviK\y3ecqs@XNPpe=پ}:. NoqQH:7W߲eKNUc/>˳̑dR! @`abnN&a]f2+E<5jSҥ>Nn0Dn,3en%LtlffVٸt4 5=`2|HQpjժAk92<^JWCp}L{V +mۨ=?V?"| lWHA9iK6'xY6fF1S6ծ`u*)yb:rmwPʕ٬OZzL岢lNa?/{`IlǢYk'FqF@޺ų<#6wޡ\ ‚RW^4lU@9/8rP5'+i]08 ޵}M#gOG%K.ōeEA,.ZS:|ໞb/7+] !  jw`͚5駟f k(,j׮kueE3v2GʮY;rw] k^wASnww&([twh8!3x5TNˇӱ̷48&r|jiW#;qR%X@ΠG&Q_ArM_?4|[w仟6(714 զAd s9dvKB:Fk!7 =A˦L\;i8#V*ѣVkʃ8eO+ n㧫^:t{nݺ5x$&K{%B@ BC7Lg%5]CJHС~:O훳4hQSfh.<<}kܝrk$(Ǝ7h QvXyP2CNgsO>۷gLUAѥ.U9ċ]ODkky WW뭻φN\Ź:vVOޮf7G'<={u[v驶o[c\zQ;ԫ^O gU(S݁}s*ctyUhԈ{ٛi.E q&/C<_ d8Ե{.qtv>Pm!]pXey'U8˻?ԕ6#[vF(6Wε{9?䯯6mV|vu5~ߙ^1f63wyimô{zҥIտ]}ʮU(<=i+; ; pko'(IC@޺N@Ut#3o핾24~H& 5k$%̼znm;B@! 1Dmo@f(ZPV B#x4i¢VdI\zO^=^ɚͬd![svg0 WBotl{&9WU8cV,d-gJ |9@dSJ^Zw}>%^\o[*eT^cϿ1oq~wr+N58ˮ=S#VK7ܸJNٰo߱mkͳ`>o̵%E,Cj g|v{W!6ndu|7g *8bיvѱx낹E@m"ztzj{9  6AeLUzviu1!uq7wi ʃBAaP9NN'w Lg6ZpzAa2j|CU]8Z˔#h^}ʐ)S\q i#ψMOK! 9C2C+nV( ې,d| 3hUw'4Wm[8#^Nmis,qĬmc?;UjdOe ώwɱZ't'[ˏ l`{tiS_fF14Ci5+adkً45o-hv!!e+9 B@! G%Ν# Cf 0 b-͛Rt5 zR^zC޲g0Ơ;D'lfLΌN N[Cft$WCx8j!3T36fA-!/\Jl0eQ8u|_L^ [")P/YfQՅu +KyX| BK\Vf:rנmXk.VhFИ.MӈA{R>F.k̞={Æ 0]B)Iڵ+m84ԹfM 7tT_!  ^Fs͙3 IK uj:G Եl-yFBӷlM!-p'!z+*qMy8<1@%_|$l2iXM#K3hi(pQ-F =w-Dݴi&LS%4 ,ۏ=vСUVYrKO4yd֌ s~vKfFȁ8g{F!U]r?4RB@bt+W\|1c 3K&3&̐J8 ,#E], :9<ڙDrrԬ k@N4n\ |0 I圑wB@//=50vq᧻͛g6zBLU<5lD~lEZeƥ4q))-3cՌ3TO5\#2]apDWg#V6PƃB;Ϭ{i-xVԖ:AsdPՅ>%6EB@! cbs48hV1עE={Bfe6PNT)ֈlմmDZ}i ؆B.:[$qVg/MhqWR_ BD"$gqg,wB@!-Hw]`C=*Aɮ:BX D]V7|79\la-WnF Q Vtͺhrהc$5|.$3/\RR2C]N7MjB@!P;=ˋ-Zz5n; e{TuTQvJ*frXFմ1hܵcG%c?9 : y\n]JԵmۖ2Lr]Eެj,3a$gܤ-B:>%9342WgQ[! @.nر6vL 3P ;.4㪒]d~.W<2k sY||={RV@en$Wcm^kل.%@U_*YF9k7{BI֪ oE " o]"WpQ]wy 7"ݕ}Hv#IOmBbO. ʃ(j-aT5'*vƎ;;Cӗ oul5ʍe/CcxC+U]u8]ǎ낫{iD%)HA!  {K2~ #;ݵB@b ָqFE) ڳaB#dc9\u<"kAo4KwrWZsV9hij4mW3eW1ٲlfVǛ%nJѐ6gϞMBS`:d=gL ;"Rx$a3uuJ$՗,P$[A@޺ܠA ̙3'Ouېq4kflN[ԾTɽhm|ޛkuVg8<@C ɨIujb[kZտmmw[MUkIB 8wq@zֳ;vMIQKcxH/?r߆ w*1~H}F}>1e_X]c wOf ;O[g݇Ow:լ^9nd=zʄ #8 O韮qqkn?R۲SW}t ՄK{iAR|脽]X@ 0 ȤIU{^=&0\c H@hQs]g3xLcvf%EXFJ˗3ٔ$,}uEy^eġ=!`ې}_޴i=Ðwz[zwݐVAftI=2u9X uT9dSz AzWqFϬ`xܹ YT3f\s5KXj=>dy {4Bh>-=U_\o2lP_kG^c>tuMi/S{?pLՈyO7T䱻|mc;u{6-|o̝GﭸmNҲ}gW7s-[]e.xSlWs #_Y0rw{-H6%P->d~zfmEo] [u#nSS7^cp*, Kݻ9Qhtb]z5k`!^'P]<4KhȊ C0.m)gl߾=Dfؾ1n;ŏ9K6v-]C\Q㜖RE1Vږ޺j-xga$;I$Z <3믳.x\2.uӧO'Q ,Hu7]H"W֭[7>}vg7WUdR__XȎut(;\Vg,{7C&Z{?\*}|ʲgh{{F^?\6*4]`Kze'}ޚIiPgk>x̯<\_n[f$cH>;?u/Boƪvx|Z"Y֩aV.(i:;!Fup vx43 Ru0}C5pJ0RZJdWTD,ymA(y^^'^%սz2d94 4t$j%amc* ]&!`^z^/aV>u]kܾEz"ṏq%j*{o:р]eE݋<d-w!3XA"_=CP29UJi(W&y"mjgKozncмl^ yzf솭\%5 1vc ح,jȫDkc!`!Bg?ƍHH} AIfhu*{^ۊuz*%xQ\ Uj ϫ*AVϮޫt3۹dă<2N𙥯̦ݖԏ*1Hw}lB`qWR>uڹSe4-.*9_y#dKۡ[q_|vjfΗP%GFk'Z$WJ1s& cpiq`Q'Q}A&\X[)֮]K 0Fue]&!"uC*9cסpIvZ"'GNt_JkcɑDbviEtZr+?0Z6`fWV_o U'ȞKT! C0.CƁش!NT2C)K.MbWp[b`:5$ʘ9ճkL-VdXJآNN7­Ѻ\WaVĝrFhm*]IN32P В]U]]EgO!R#&;xz ŐRCWwCxSWi׉oY2!Rd5)z\i4HoL%P.l\3=2b%3 DkܺUu6aPJBwu!OrV'G1gV "yLn78&Dۣ!`%=6oL# 3tgrLt7ƞSm1JJճX=-q5@֗RA#nŜn$ƔBeYʧ'I'KhYuD"ۍԊG6*x9#K}L&KI䱫=xy24IW܌+JSOv*/ǯ7bؠ󩱚gĊ1KM9)BCU^oeBQR\f= f<㸛$nٺ-[w5~ p,FK[R9k֬ǕR / s؋WIiHEږ4{ʐqDWId[1 Ae̯Yʬ<]!}>^]B*cɆ!`pӝwv1cߏo)J ׫ xF.qzRj׃*1VK> "ܿ|j#lϪL} ]3U0g␓26u^՘Ugד6qWliWX_L{57Զ;>6}5;-}ɽ^ս}<]w/j,[wQ8x>Cp`$/ƚ0Vd+ٝ)}s[{ik 3OsW^cUb:Q*UQ0>seO>--X`Ŋp\j]500 C`cIv.0@a/;vpRW^ye/L{N"{c7rzҫ*C~B941ɰS8*x@feBfcذa׏FFJޫ \9ĵ1!pg>Ȯ;'9wue61GU';`yɿ-T'a 9>~+Ytm#I'-,7}c_ヘ51I糐R: /l4փ,_QVa{93$캔lݥ5/w!qv 2ꫯftرg}XӧO<;4H(ˇr5ߒա }I|,IR Dp+&Wq-1_ge7@ Յ,CЬa+w2wi4Ke0 CDW٭,.!3L۷ox}( ]!} gRzLk>V "(dfZ1I>xVq[,Z9D!3lMCkBiR'xs6"2NM| '.6\ҺiSL{^٘YR[éo;ѴVJG)-.9泿{?"ż81A# 7KЯ3Ly"`ٺ]a_:֑[r%2ꫯc^X,$P!^!zEY@5X Z 4yyǘ]]AI]XG[n:t,q1'>q\ll!`(cq5${n,$OXldJ޾IYo!> \9ZObp'{F]?q9c>"|MRuX  #|h |&9Ydn811Ai 4uj3N2[ղ;։j@^VnZ'͍V/=[{Ok$e%E5'dηem?)mjZ~+,8<ئq(-)GVhoun?ŮK]|-cq2oW?>{>lv0/3 d!X$η+TToN5jEX[dqxn+hy&ῌ?XJRȺzv_\ Hl9 .5Z1 Cl`БòC2CNg…m۶1 Lɐt^UAU-^t=5'{g׉Uzc)',EHJmR/f˖-̑dvUvBfȺzɌ6X(k"*^ei zoc܎gpb@_?mҨAOm翽vOɚck3+ȚEluLǐD'Q2 !6fjC&O\`pn`ɫҢIǬyB {㍯׺"0=sLڋx&7Ғ[0~[=ߙ(fdw/;ju.j~e~ B}y!#&X9}w9qI:v:c(l1tR>%,Q *Px^+{ J׭x%zkJWvCU9dիRB6]]<~3A{$JzA#tr(!Cސ 3PyƂ󐹣C{F"(.mG#C^tOoj㭨J>.l"z5440/2#og.JŽo'=i"\x^eB}\{u%|Tj)GA$I#r7`ߙ}Pj6~}{g?Y~ib!_'x}Ɲm6jG Ull`ݫ5-m]$Ȯ:lqɯ8pfcX(x5,:lW^ݜGd-s Wކޢp݇{d}[eY1˓(WĶ Ee [wq}K'ZݏP[yLI 0!h} $Mv-*8{Zx-ƕi5=2nĭ>>q+ECn!|J~'a!K#>s! C0.7b 3ގFT Tdy KdٳG6`3H WT:LVU^ !;u.Z0sGWU&E-JhŠ C0.rٓW5֭Xu6$y%)>-.G)EƵȮHu6 XWw="^W"B=d%k2%̐MW>"301^Wٷ6к!`+Wxy sN+]x1K .=Dг ] qP$U^cH]cUȪWc( ]]'^׳t=w㉷S.j{ɭ[2>8eʔoy#_q6}Ϋ_)^T~zA40 C2qK@VnƌSO bݢEX ayt;/HqFE*2Ԝk7nxT㸆""E|9)ukokIom_jDNBܐL՛YyڳTɳ5 e.RZJ7Έ9=Skq#۸68p4DRdWWR۸+7*4ޭrZ'!{uh!`! D-:GH"s/";Hag67 Ai,azܹfb}> u?o!`<Bf10 ;эrЙ655k6{0pGcs|[i?BfH1ÑYu!eJdCWE|cQ]'nCi|;Ά(}[sB$xdCXn ͏$v.VŲ< ƈ(zf1.MKTp{SUR>zo@^'^%Cz)%~J׉Wϑ{!-JKlW&[:c ƟW\IN~Qɂ7`?!qZ׹) C0 !@JH ð\[YˮA%3%ۋ4Z{vT1W/q%wJ2!Ȁx+P`U&nĹESz৚i't07braď.Y@NA7`@ `ٺ@|;='%yKP=zq$(zq-_c @tyTH"ѻ1gHjt2nsEҸ>I"oR70 C1.bWbnצMa9V Vy淿Xg1APߍY([t#}!'ڜO]'*e}ƍIi;6AgϞq=ĽEq{W=E^W'rSw4򛅳FH#Q!%ff X,[7`?%!3̳۶ma.&ql1ۋ\tov*Ճ+WJ>z1$ԢkzseqZn9d%;:$Ȯ.vWm"YIXkEqZ}ĭ=!`W"I2âBiXmǨ$Sdԑ.[ۢ*Mcs7 uW*>EN\cU2<{r1ˆ*hHr-_ypk0^eu8$u7}GM1 b/› 0Hձ! U1 eWz/vp\霠_,da&Ų x"?5mydg>a.y1 a+^r&x8"rWGC0 C2źWD 7㒝y;;;6μE@듞+NzV|F<ޭ~ĭb$F!3ERuƍc|(:e(!?P/~߯{iu~=$Ȉ1T 8I`C!$5Hm\VXܗ!-bi#-[8qirvwf=~UjUcH8dzm.g%"ں]9=kݸ%DT$ŋ+k׬YC#!WUj*ݪq*^?4 C0WkV ^w!3< d-Z$X6 !r**ҫ7W^Y`Iȣ6=k]&*d۷K(J#15*GWIŐ>S8UVɏd!#Wf+̬:v\iٺg^A _/rcZSLdyKbv%LN;ZUT{}bN&PW<eB^'{^ec2w0,I1n<@uQ!q$@CuXB陸b:Ҭ%y9^JXQ&w+V`?;wܨ ߳ṽJG9 U%d3I9=ӐfAfX:1xӺv f}-ct<Ɩ7ofn3гMʕ+YXtխn}w}rKόTЮ6gB~f"&y:6TBꉍKN/d֖!`-[<@D .ZhΝ@Z~aW\{1p.Q%(x^e#{d'8}4{Z؅tZYl頶dĹ.ڜW`OQBu nCEq}\  C0 83胅Y7HggA5Y4gHJ N8ʐ5F^N 2'[ne9y:ec@ {zrwtHw몜-h-R!!1j 4l ,ջ!` d,[7gcb~6w\}q ;V˲"k""}^Kv ⥑HeL]Y̧ب[YYCz]] ,a։ wvv#%[Q&GJ#WTm0 C0@`H8F湓c&@N >CWNMf@"!=xK.M*ḅ:22F/6$8d dd+s̑Ǹf&4 8^TVL%K?V0 8cbKR8. K ++JXIl)8.I=@"%'p#2L'c׭.n<Ȱ[sP[.cpYa0MsB]Wv2TW !`@"@NUQKst\ ;JZP/Zl}um!D2CrϞ=/8Td0q0inQ\7PM=Z{C0 7ֽ@ P7ƥuֱlĎ;d[lUndTx\Bޫ`ƮҵtVɅ|SO=.d0gs֌U"!iWUR%Wo2NOe!~BܘM6 C0z ci 1^}lF=87|ӦMV2#<92Hƪ*i"w5qHhv%e  _k*(%^c Wҧ2N0ʍdC0 #B@!p'@IDATa Ǭ; cf9:??X=dxތ3BneCtƪ]'Wj"D/kJHq8ǫ_I^x7!`!HRv[ٴƍɓ/ڂ=yys'PK CBǧ{UDT8T=nʐ YJb22૤ Eq' !TT4^50 C`!`ٺE, ]XNik þk\h.i; #Lkca2bbKjM6R}-[:tӱ3~.,a_?vN+1r>sM6 C0\ٙ1H @ag:zrv̫p9ċ{Ib%֘ L 3lNĐ7%OaJqKΨWNeLT۹&!` L,[70Euq1JV /tIcqg xKFe]B q/4c}TI(uCnh `YY(40]5 7q\6\ZzG T]T令6m!`!Я*Nl(4di,Vw'ѣqa w*LA`,6yzVj3U0uHDcܑ鄐60DBRD|*qFK]~5;OX$N{4 CXn 8ϑ[x1I+ ؑ۶m I0#g'uDv`N2v*@^Qc ;lQ}\d`ӧO[ʀCO[҇"ޫL[I>=!`IƑgg)(d{e13f)7!3uN~0dz׃*qSlBCy^V|;RGR?ګ&y=Dj^{ۜWk8^00 C`#`ٺ, f a$֯_54 /dS0ʲP!)T- E/ޢJ U۷o-fgPlA|:&ӱ:FRun0nC5*[U#ڣ!`!0`m,n6̬]t[rw協T2c*jZ2!YzpQ''ODŽAY ܙ3g20ykĿWUjWJCMc!0lFWp]cA[5f\Vw) \ذG  >ꑊS;vsXK%d {uICTU՘OUmzC0 CE qDzv׸oeg̐b|UƎq;!302μGfx/㘸/`(#dYʢ'l^H2*SN&R=1uSE*GC0 7ֽ[ep\nq&"1T֕G/IRCHݜPۖ:cVͼzIq.` > MuqM[]EyLe7M\o4!`E!EzUd0Iu1RX˸JS~"/>-U^Jjl !x|ݲe y:Af0ӦM{V-'#qƭ\Z"2NkwDGC0 eȍRfh LG57ofPwΝ M+/efi]X P {cޭĭ=z<E mmm3fa {0ˡP!cޫU*}*c%w7+10 C!D=Tẑ_̐b=d8}bŊ4z 2dz׃*zU|uAftd!3Pԑg$TjiE!zu !'ċu-_mN+G70 C`!`ٺE,iڱ Ť{K k%gD<ƥ j0$/IR%69R8\̧{IAY Mg>g>Inq'seH>]!d5F-U[0 C@fX@n릛n0 a3;&[Bfdx~%3WaU/ 3 rqQǿ.5u굢+uw}2Ju1<&!` L,[70EigBiȤ`LUca,㽤 kfHaÕ,i_ ?pz0]]XBx=i$d\z?K= C0 H{z9CNE]Afm,LFvei;&1TaR52NjB"CǢ]&{2Cx̪EωU/Ch%`?Y!`!C0U g!7"#XbaX>E`9 #dX+;a[Hn$7e:s coQVP%ԜWUʫ%)J$dq+!c>kVK]K C0 N#QB2C ޲aÆ2bjY HU$f7rT*C"ƮVإ2Ô:"O7dB]f ㎰,Ym^469NBކB^}\)^ 齑0 C`@!`ٺ9,? Tyʎ~:đl3[2ώݚyaT$R;i$0d%Mz)FOO^f8c\iE($q5SS r`7;!`@2~$BUYTL(Cz C0 C }ۧGJ# MN">s>BfCL|6eR-4sadx$$k+z5{ti8Q%c(;\?ny\ EjB8^BZ'!{)j"rH1GC0 eηH0_W$Υ ҒX#cQ,sX{l $;vv׈2X-u RR;k֬ٳg3.yRZROmW3cC0 ٳB{>A&mg?0!H!3L@f*FA1&x]QlQ,pX bA.dMy7M!Onl C0.0[sy!# H/-NN#N&j @s!l pB(ڵkYBRWWF0N/_=V|ߕ IZwcP9#J!w3g 4dCo)uJ2VokUR1ǐqZ}8:1!`!A I1Q2PVs$/DP b.ʕ+Y a}ga$`GMQm}tc\4cq Af0I aۄʩ!c^%COy ̹7BS!` (,[7>G cYc2D6刮+NϜVHt"t7ofac=p44c|XhΝ`n0WI+`Ҥ|IN gǙn,Y~+#nuA IЇ-_6oΥ *[ C0 /#cHN +b@r*bL ?=O#dElaO? ;Ri$$G C.#fSiBfp%Sbk2q̧W yvp'iB:T%WUJ-oQP νU-z-3ƫ5a$v7 C8Xn| ,.P'*RTP:\ÇgՆKU"8r͟?/lBqcN`=IKA咐$4hTW^ܱΒ\l ;3Hu}T!qP> TWI^ S9VL0 CpcT̙3L#An-e ^bu2#jada`8N~P!/BK (2C 3(YC h0"g羠u5ψ>d1ǐWU*ҋHiQN׊&!` 4,[7оGNmL.c;<$$~}%.0o2D/CF1~NYWņ3䞺8!7h.Hұ8_Y 琇zV=gg?9_ C0 >@fabWK !kFp-Nke\ Af!іg̸./!Uk.R8K }ؚ? ٬&B1o C0.<[)x!JaIvqqI.śC /*]WfC۱c[p1Fr3gd TөB<ܵEQl_X[[˦h{|N8S m'lB~Nb,pŊX94yv7t[p@fH6`,R$0! N`q$(!3̪cG<Δzz^eUIjPA%e%E$늺:;;;Z7luT *ZZ9`\Ëk[U]o&!` Xw6D pc!_ƥaP0*q9i2N[$8bzdXU6gaZ=[ 6VV W⍦-x޴iI:Y"fqxY7{vc'F>ѧrʘ ^eX%wф{\`!`)!1m 2#3'3(<# 9܃dAG#Gyd֭pwCf$^2C3<CfԄ3OHOuoQJ n4':ϔǜ8}ZհWTft%%ŅE腰.2] Twԉ#Mڊ AM)4jݐ#>#0 C` `ٺ-,pA GrW1ǰ4qG^eܬ4„2aLc- ˜ĉrh."owg_gVʤ<֡ XJE( GMEϘy>nF-U`}Le`JC0 C !d,ɐv.dFxN"~}nQBeL2kWɻAfؙ206I)dF"b)a -K_1fjB"//?c猝ԟl>}cԈ1#g.1f|eՐҊ֢[3x675ן>ySǎԝ:TZ5lJҫ5#yT3!`/$, EV'k. "vK)w)ns>ă,xa0@CN:ʼnyeP]9o]]Om6.fҥK79"*[uSJeL+9srVL6 Cl}@W&LϢ!3tȻb#8ey7 @flB9HRB0qG@ կ0 2g6#I\{bY'L2b$^KK42|PUQ#M8mvӵSwtOKaKO5ӾiZ|l C0+52;,,EƥݶCYc'pFɸ~/fse|yYN9o̼c V!<6r6ӧO_z5ђ΋ʫbH)!TT >ۢ) C0 ~(W2CikdĘzH31]02C0v뭷2QpZ=i; [I6i{L@Xp-@fxYDW}Hw`rMN<dAit 66656647eNBd𸰐و\%S56>S1dMyջO[T1rN՛ !`O9õ AԄD|;2L()2,C M_i;, SO=USSâG}tϞ=p_Y3B.Qhwvf:љw.ȽJBƩ" qV9d{[ҫM0 C0R!2YZBfp!aǾl ۅeP'dv#~P Űl+?, `뮻2Ô:Af֬Y3|ؒwH>08D;ZwϺgW _Y`xϞ]]iO?Uz^Ə4q3f_1{ƌd]YyeiYߍ?ء}Y{䍲!c*v&-{(TGC0 7ֽ"?>p4 ;t8&KhKP Kuh7\Bsӱ<2j:.Z!ϡxshB/j򯒿6\%RyT'q!Kۃ*ZC0 x(2 I  !gqT<$T~4N.!6989yvBkX& 2duL1s$暉l)<ƽE4jl>uI^1 啃U=|nٺe;=S3l̘qwgwyrΕtt*,.)+\U5r)SΟ5˗/ZAe*)-!`\,[wapV X "WwJa$Ȳ<ҀЫܷB1"ZƲhS&K掝0ZRuƲ'yJ&r4p2҄=!`D@ N%A"ENa5ji ŻH2Vtd,2HJ#go/dӱxHxC^3oH!K';lk;uk&NbvBֺ굿\ ;w8utsKs{k+K<)Ȝ RT8rƎ3Ws@Mɮoڸ?}V^uHw2`c&T >lԸ76W9fr1{eHwLk!`  /bFAqiY epg';H$ IHE&nGX]'see${Zlڵkىmf,03B ̫*P#ޫb*}*瑀1Ti#gJC0 C O`,dåF ;i#/PH<Ɍx2Ê[dƍwܹ~z # !ɨJu^%uU v8i1YOkk?4nDQXOGWU\5e+Gx)+J٬''[o}˭~ϻX) cj⬹CwTV5=BÎ~Z}=!` @,[7?#O"EYp/J0-6#g_3(zm+$R9UG)4wܹٿ?ؤunC>z2*k(er$ڊ ^ N^eP C0 C@@չ7 9;c4!30dW=+ uvu}&!CG>"3l JoӪ U#Bo?xd+Yz]y`}M,ٜ_G55;ۻ[ۺڊQQA&w6[8|p٪y\^Z]|Ғ^`BYIw~7+x̦1 C0d_5H{1;T2!f*(@`pGw8ihh0 8P$}1KG9e׹ꫡ4ۮk޵Q91wkkv,vʹK__x56w7ge׽9(B8fcWAc ?zb!o;ضUͭ/;y yv2ufXjwQ196m羽ܷ~3`qӮ\ۋKJ43eOP4׊&!` X,[7`?K,\V2dnJN.ɻFf1 %E0ZaR!G=Ve&Vqz!x9[ ޵Q9d{JV 9P#ޫ6U1NЇ C0 A2H ɂ̐AHNȌ %έW4A>XB~JDOP*|ȩM))EΎ̝$xzL;fJvx$fO0 6ii.i}Zϑ.x;yA:>\ɜcN9}Ƭ+f>x~6o‰]m LzlQdue^,BF}z8B#QG}94flLifz$ 'd]&e®Zڻﶴ4S2e2UIXzBpEUr4`bjC0 ]鲍29` 6RYZeQAI$䌄%1u x<׹kyaP<,cKMM "JLQFM8Q$㍂'rhFuu5fx1s1={`1``CEI(oJ+waIϟ6F6ko{PE`!`D}ty2vȣ&"|FquPKX f?̰3eZ0m(RCx[Le#fݫoϟݼ93x.+@),?γH&'Z̰ٕ.®ʬk*"qGJ"un2Ǯ?o'MOYy%՝>uDaqIqiy;Jd{ZHu{4 CXcn-Jʈ4 &Ar 쥖 \CjyzA,f 1qyOJ7xQ\^|{n׮]A 2NZɓ3f9h۶m۾}믿,9~gpPK?-CI$ Q3eZ ;^Bf]l>dskKGuq8hy˯䣼BkaɆ!` bQ : %CG6#"m"%t9C]أ.Va\ =gxp5 rro!=~xq`)8gLgF[F_!Xʜ8qD̶;w.i;~7ذasȜ+iӦM0L{0ȇr[ݝw/\rԸ S8K_'xZ/UMʲUvn. 2g |Lc$ձLdQSg 8*) ZZ _3~P2;i%>r~82q!`θ8B(/i3 h DHK];N=6KbOH.fQ b.׃zS [}YuXzm"6fAUx(;:`ZrmLci>h\O>$&1eʔm_ܑȝI(Vl X ܑIfƜJʇ .S^Joҥz׻2ONڏ6mj3lQH| {lNilnoɬe傒aU (s>,\/$AWĭ=!`}d &]V WD ݨ\^D *3dyLcx*U1V^%!}<`Lۛ[}Wiys[~Em?~%U\ Y8LK U$Yc̞0੦44wwOk_ձ>3cyf]{gkawɞ7OdPYIO54'7 Ql͜!\VQZLΛrҙ;ޱro}["/}Kǖ/_. ׏8R C0 # =d;ătF =ud\ c ]'1d<.9 Wv@KKfswiڕ"u{/Zw$'BVVU 1x< BgG玜4EmΜбBdEYu p$Bi29wYos2L%)CbcJ?|{Nijbkf3ₒl.>lk,.C~(\0uҌk ^W^)xQY$v7 CXn`~:RH %[Ǖ&|q.09%_B1[@gXUWE*#h<^jxf}dĞD?y6*,z=?۴㵚-0l$E8S  ;lТW\5mgϦև7gwVsiys2K={ml>r~Wͱ_3|!͛v۲Ǐ+ϜІWʉf3D]_wX7ztk+Lų'Mԧ>x??gI]jU7zZO[%4!` .#O ~x \gG%Bf$U'w4\r߿2OR6o}7G [;YʢD1^1~(,-lYGj/Ȇ.]L,`:v]E?\bի:g}ښJ2!I!` oaBA IlX|`03 mZY%wJjꔝaL]*RK"(/~q)N-O>Fck[6v>#$ȿuv6?]ΙF^u?xu%%'9,ѥ^fzLdچ&=m fc}MoYs3h:;Wn^V{[XkcDɱ]Ol C0 %ANΠlBLJvj"SQBfb>|t]'>ys*DlPSG=́(yg]] L=w\vƷ;::;Z9iY$3i,$]4N% 1PԳo ܫf w\;oξM;ju@sN2+]8l߱ڃk_ؼ]7-97.9n=yKv2 l]Bw7T9;o[{CN0{Q)g݄j !'Ɇ!`@>P2a쐩+I":Ah  =w4Ȑ AfggD6eu[U7~E70%UGzŽ6;rcܽlȈAt4\i4F ~}uΒ"6 ed2i̓ ϔMG )1jȪTs<;yfUvtUV\؂Cf| ٹLŽ=J wĀ%a;K~oy[u$`QWT\XR&|>I10 C` "`ٺU.?`0hY9B8\!nChp% Yt1B Ӆ HSh.H.0)6*C~Rƌp 6G>`uM_]Hmq<CUc1;mju]ySutu9~}9t},7uܧs}sK0fLV^kn;W]} ~ϭ*akC0 C T}bBDCPnutY1$M@E(`\Y*JN .dGY]2EDFNȿWW6>覷jN:G4}ĩW,YP̱CGkyyҴF{9|G?IIQi{[=2eěf3-鳱C+M:mLՠ2f&fc$FuzGg7'vU~WT +mYBL$sIeQ3a 8~#TcJC0 ^#h @gꐅ(X CQVq~EXCd?d:Dq>ze-\oQWG\Q9+(+6jc&YtA[Ɯ}J|.R,8Y2f?ʤsK Nhe]gfڳ ݃ʋNV?d} zH8 #ˢJ$@aKJ=߸{τܤ&LsȺ_ٔo^D*L0 C 6H@!/ %C'I:W.ŪbɅCFu ARmv0]a88WCG 'rŻ{Ĉ~x+’Uw\wUyi׽*RŽε_'8xb196B_ZY3;|ދ5|͛7:Y ȄlLo!` ($ : Zn@IDATr(QV#FhpHΎ9z|?hv69gTyj%%>{ĸIZ*,(h%/&s.&>7Im[NnΘl!o@h琏ʲ̾tg?"4s珛3a(Rpb - 3;L$Çn|fcmm45brMYIX9hX8-kop p p p 6 pn=OaWA?%6|SlJ;!O=,>Y.tK| G$M\P$jݲY,]#Dp8ya7'ӲdA~w6ߑ Jd[omB T]stΐ5no}k7lvx"mmm8H\T ;4----- iD =oGM γ':&!9]* هqFkEy~P f6̬ʙ]k7,@%7(3*ƣa(ȃ t\ .J[nnnnb֝ISs2$Tǫ?(n ps1#n)K[o5UaQ> f, 7`6t;q;Yg/lI^9fw7FO9(jp$W-,,`稨}PMNLY>SN[ 7s6 33 b&vf(|4Ko! F1gr ^Z1Ӛ:W>˟;cCoWPQU.X{W]!zXp`phm<C8m55%S KJ&.N*Ay3B(ši2sGǤҬY9vo<^!v=8uΝx@TjM(KE$Rdl' 4>8Y@/;FB Y`px gD͎)t)D&o'aLYݼ~6-QR9UZ+4-I$;PA1 HO>NkUmAM>bX9ݿ bϣ{Dwq90f#H]]!P8P/LrB(iNxjp4;{{rssc;-k8'e"[WOh5/$$!t>(BHd[ش(SxԝHFB(bvtP,?AP\"PRS"TBd(juMbjZmVSMyA S烣9L:a[Ͼ/=l1Ɉ lalE`rJfCګ^G2!ʁ a">25b,*Po:3#Á QS*Y/Юf2u98 }o2 EeUUe6g6Ӂ5]{z}cQH+Ag|ixIY|F^`GOm ,oSt9|=qM}ǦK3dT{%ӫ9{ں݇ŢAAN$7 KV$1 -----^L6pPO  w DE40>! W[9@< f:D<$t92EXܴ@L&g.}M흇Ѭ19eNW`Ģ}{wtX`je5EF!]Ѥwf[`І D@1?vHE& ]>Y_uZ0yCcpZ[;:cqNEvF8Z7Z ce!L:\,m\pU n K!9b @=$ t,umF@|\8ԩ-ZS U 'pqPb/Y6*ņA+*l=ܙgÆ3u_YqJN,cH\zj7_>`vOJRI$i1K{nnnnn/Ґ-$IEI$W'9^qPpg' %p@ gh]U%jХfh6ڜ1W,ؽs릘r< Ϭ!dG+c]ˆbgEhJH\oȈP)*%bn҇19\/سn2D5E$s;#Bw(mkm CԱe:k0?ug߹F8Z7 Ibt}nJ zKgX<ӥ>n-@-XPKgLhc P?&!eΏٱ # إwa:h9 z]MOJܘZ͚]Y4z1l>㓕@Ιi>8>a`Td~q p p p p { KNg  ~zΎ:3ԍ'.gi3B^qṽQ0 A-#f:h1eumlh0x&H&e`#'8IFk :;=Bh$ [Ij jBljLp9^#TkrGQC}LqUaփZPhл68Jq$X16ԉbQ-----pb- ;8-dhH,783f ȡ }B8ƋmY]ڨi1Sȑ^k9} 38+j'uR8E ,t3246 CةN Ƃ, l(È%'p .uL'4qXb6][Q3- ȏ<ȏFOfsR>[u#nR.pP3+O8I ~"Apzn\C=\9&X x]Lb&p).atD퀦)$7I٤DHLz}+s|(;?n=Ʈ{9Ra6-@pƤ⅓+We (ɵ[d`{]k`X8 {35,#`>9*HW$*(I2 D~5$N:g[̀YA3PL$6/-UwI[nnnnox X]s9\vD (-?D텕AO8pZp`v4Y>(N93ԍ'F M=պҥ+.@@xޫ4Xm{{f+̲"Rap @621JZpVṯe GH.<~]IHT_a|^ :(<75ʶ?mdw6xW~Ŭ/s;*L/)s?6\Ѻ-N/ pz'j\^6#"Db1GFQQpvA^:ifr*bth{8^%?f\Ma n>ܒe@yc~LJꢜΝqJO ֺnBkƸ m@$7rb1L&n$j9ڌX%JslذFlQQ,"33ccmhwOP^5k#^xF30MF!1X㑨g =ed\ֹӎ,T&?Z%N^ ؾgp;߼p*!T e+]{<񼳊i3mųL5U?7OXg .nGz#"Dm?$[KyAw t@j+[H+E,8R?Yc0FChWĺ&r臟DV›5U'81;D: a)wu= Pi"GP 麄^p8Km",cs$  y^wć+]PRo(sp-ѺZ?-@]EGpdڦv٥.%)h"J755!%\Qb4ai(.\ o' 4pSq__gߑDVd,mnh~@9R sڪϬ럙c`_TA0HYm k֬w2f*>\5fN?>iSM‡ h.%9_FcdХonI6p +tpے''ZcesNG%t&f11n(f]/k*,&{Si#.?RTFqfve sYfNb"\Z[}Kz T p ďAy>;b1ϻ]r{ 8vaqCr2Zŷں,ے9[Ym!uGn?4jjtEzb}ޞg_Ǘy4BM6QtCMHé@*/Mlѓt|>p0apJ5@*S¦H]ܕ͢ȣ 32Cc[sH ;;'Net3$.Ip8Ռj0?s[gWhs;yك8=Lo4M(J;@,([0F 99xRt@IPB JO(uQi3`:8@wp.pк`HS] 4J7L/i頽+u8|uYd;? B`Qg1S I2v(Xs4"OR7\⶚>œR,))Yz5;Y:c"BxIY/z"~cx9E!j!JQ#"ޡg#-FhYxpoXm5}R'2Mth>Coh!\x]T ySׂJ@Է{Kr9 E2iɪ- tPnͤc=}Vc"[h,!?Z Z xC^oPBq:5[)2s`C\ç"(!2%C e%@S-~y{[ݿƝ T}? gɺ3\1'kyw6/"] c n;=Ijcun b0$κYlMy; 7vA OWϠmeBwAJ8ߨ\XZ>$oz&?b#]טdڌ;5x|va5'ż { D<, |v0XAA]85ecZ "YZZצHdq,,]*'$H/}R5{,' y_m7 ǑN7u=Pa*зnd )zs5"t{2IpWy.fQ2v0AL'uj,c_,|k!KNF]LI4,pmje BJ,O66x0(xc4Ͼ^Ʋ"E>F;_[K2:op hifO& sRL $iu) $ ʪr^ FXڤ*|!?TeeÑ|qB,vFbUpx^tRTn:i1{lrжMlrb-5\~)ϡغ oC ʊݟ2Vf̞| E)&F32pJJ /ĉCiw>@58#ZZh?#^d`/7]20M+Сƶ?KWjhn{h?L =hCr)wݤa'K(wզ%0#l tǏ!!5N[o#ۉt^lFB"-CS/G}p"oyZM/cq78 ؍\.<!m煷^f^yE=}P <'J3Na(d[`qn\Ce)r)f^D5Mg8&ԚW׮cb1*z$L{ęک,_j;B(e;S}fq6~heN`%~_$B Wg(r) 9exb-[G: %Y,5Br:+ʇqm B#A/)򍔅rFˈ#yQԠ H:sQa\UP׎Ѹդ?*b¡?pA `FkeeRO lp Q:L O̿oڰPt9^wM3gƮiXѰh#iP!|/+^|XLD[ ձ.xfxih֝TX+3W0H[x£4\`vݤz>nb,E66hvbw޽sVs_ %o>Y]B@j68,@봃ŶjYvʢ좜ín+ pM,G}V`5Ι3O@| *vC"7kN0sX~ܮ~.tG߰ ڞ5W% k^.׃) /6Dyn9}׫%}|%i ehȅY7\к`uUt~T{G~h3U255Vy?9q߼h[kl.T`SL~69p['.8n^ p= 0.vIGDUsp lcZ_Y`l vblԃ/.-{Qi+'M->W4q:eqֹ⒙X4wNiL@ѶNNߪME~EŜ'b^ q^}"%< gZj@a-2Z-x:p:83b) eR)r:x0_Bf;[XqmWnVcKVgC:J~uNbSuT$|?Q8Qw3Gl[`/˳Yze(G}`.Oˇ<`C4Vo;;M~ 9%+&Nt$j"EΎ*$uQHJҥ2 xRpz"j<}vcjfC,.dQ>&LS~i=@ lލ 0y&;L8Y!$qH >8m~- H] qBt4 $AzNBK,Yn3ȑ#6m"&ml[oc?u03䭂%bpk%h6>FVEc׵Y:Mq+؉8zT7<_T1pj]tE\& Jy1iL`?1(Ts7q+߼"lPAPb#ψ:1'&E/1ln[WhJضWΉV<.ࠐetV3Cc.p"nn+(y^%PȚu,s\G˷j3LzBY`Kz։-XpHλ&P6~ ~XAo񳔀J z"& v=Lϙ-s^u>l!|d 3Oeg20u?ufy#dQO$/ +wKpб&áulC‹-FD Ed|{$kL0#͛Y݁_eU7wH0ʗњp8f4SbyǑ~Lvr `ls{*6#Xч$'vD$YEo{ :2uT#*z{ݑN#ީiJnA,Pן8X'î$E:z:} O-~rY\%keu9PG5l`ɼ[tGN'~7ߩ&D> ¾4F!$5 ȦHdr \튊 پ};N,:Ť5|C qԁT}i|ԞI~+8:N>̵pJ{k|A:Һ{}Hi%{"9i18p`Æ 5kV=MtEz9q,3hchSpMa_Ch$ |:*3ˠʞY]Kh2t GibXy񒨻Q O98ƆJ5#Ռ[lAg, U̙^[L3(c)ҐYQeh݇f<uRǂ$(uZ}aRNy c@1J\ v<SCZ=ڗB"DIc d'q?&(e?!T n!0娻pf3ٺȞK _L:R&D>vȤ/"]gF>"֙("R:wLz;[)Zվ`w>ZpЃ@Wq $A'P4D8 xqu4M)sYF86@@l-|(#ѯdp'DjJqPvhI2(uaڄY7x(ƫ& Ds )ዿqi9_+NM,"["?'r >hO֕`> \3S&E>l:Dڍ{9cɕoH\# `8[VD|5rS ] 'W)ΝTYp>4|{v3NQkAmnMɨw>bX]fQԇG|5~DD#CEl86*36o }0Hb!wYN$JZΨ4ߚ-ڎrÆ %OѦ8yܚ{ `b Pu87yPGZ($h&XW'u[.+z>I;+BV hmVZ ?NdIXd*zOek)Z62rMr7:mA:t 2qu6uϗrr nkh k?¢9>'gBXppr eDڕ⒙Ȼܯ^6J"@7M< &O-26\w7,PU:̼uCx=p 7^Ĉ3)+DgYm  *86EGgO3b!m`XpUY#(%`8-0 V(fG!Vmi4Uث\L otR]>$%S%HBq-`z3I ?oAA>{]-G0IȧA,J!M7A[  `uB w>&9ЍAı).6Ϩla#_m?z;X?,YLq 8Zwz>w,滨:G &NGp:pɭD*2P","31!v T-PEI(8l{TQ`Rw1M c{lݒeBX<Vڵ ;gy&.&:]lp~{,4Mw+hLƢ~O mCQ @WY-">* lޅJL8: !Z{_i}m'BG٭$ÉhѤ<@4P,]W.È Zg#Btk"[""m11wo>Q}K}t:|$[dcm~0a*-eE^pRJV{ш%BH}!lKNP+UVGoAwkc^I%K.hǟ2v ћ8xpfI2ԃVؑ,sm`d2Α{v ߹*ߎctG "Հr0X4{}$F`ւiӦO6zR:f#e˧Jw1XY2_'bR }N6$THZHXYɦbxA獜/DaX@`.qStIK@.0!r &wʔ)ׯ߶mY {Ks¡'h7M8z6j<~:K+7uϝ*̙wjx@8 n5EF*KǗ^:9s&CҲ WO%R7G)xPCyQ?6O(CnrSl8d]tƁ"Rʄ &~S]Kc4Jvv3eN;khţsQ8A#fХjwR|@&>ӄjXI;u[IoyO`{Qo,DTǥtE*Zg`'bʀ"~Ԥ"']!S\ {[XYJR_21F_hb6ކP :#=ΙQ[xt9Q+ټ,;ZCvglgm`/@20Zbu1Tf8aƫ "=!D鴓Mx/#P`&>%pӏNOxh/⢢BZ4DIY .0!-fCQ>{]ׯE5XZ ]?U"]:/ .ާG3N/b7T-̶l`mw{qNh[nO۳t)q@> :xl&#K~ѢEN'9u!ލi< _ 1TNѮ#Gv`CvKsﺩn}<#\B"k#kxoHz:n1W矞a\1@VLmM 7&\d[T;XMQ3/!o4u\qm}n+5 t" \V3?94q;R BVF(D2G̒r""!Fݽ,QpѾ U; H0 Ħ@IDAT@f eFai- !X@ K_cq*AuCSq=J[Qq&٩Œi[,G{zt&/C ѬI,}!89a. +%`8gG !p#-.h@`5 'F8X7ltS!=`0ѓZfL_x;n<|kt8H7 ҙey[  glHPؚ]Ennp*M+GAO>+'KXy{fOֳ@f:(.V??bLgh,܄>‚j}0vFdIq岞gIR\mC%Gc@Nn ~߼)"kJ%q?iDY r2}P -nSoʋ3B=@/*9B v~6yCN_ Y/K"!Ɗ|dc9 T݋(a?jfoWLZBԔI>|M AM\5N9]NKPjl2`˯zw_hn6Nip %^ ;q NI p02'J1 8G9@&҈ЎY62pR ^Y7!#ь_]EE9[`ݝ=qQyu 1{Ŝ}[2o:|ovlٍE;XV.˜u(vv$Fq"G)xcY@͓Hg=ńǣT0jE:[fM((qNpQo4'<"R.aos2`WL #m67u=k[2b omg[M(g n镋8ݻw;8XtR@bŁb1o;1qL {!آi5.N f޷RT߯>J Y'B1 Hhw/Ab2`X+Fvz2{օ3tش0 (ڴa<S޷n}u%β4)qy ȅj]*_ Jb?.\TJ0dp\!۩J>69% bՔFEagyC+ޥG?S"6SGAb4 CF`]"GE!8b2ο~;zC.D9Hkm$<!,I-?d9#1̈h)2x)dgmҍuEe7tÖ?uFk$E@@7EiP+4RevBV I /abcč<pvCRrl}GHou5z suC8'xxno+#COPpU 3(~- b+NK譱wߧmM U>Ԉ/( Sx[[ 8Z8Դ@r%uuI z aU7(bŊڽ{0i5;GBh[N<&9'P^;`linC{p9'-n;pIm͂ɕ/AV6wzN(SSOGmjj> HM(Wr F נ$%0J 7N!tAqtpx8<z,k|@)X#\(Gro%fCfP@MLmx_.sXLc׬oǪQISV@ɼ.q@.^y?n+6$ņi8rUYnM$*@D9 QXC'u8ꊤ8-űy}/G v dx/^oe )S_2@dNI*C)2Z/SޙQ|.j6`fG\s":wx &[ hרAK5`]#-r4#5@Oba'FZt ZpĎH$18Xg1dNC_8ZCii7}7՘zSFtTf*r $/U86 Q61bׯ#C3xiDw_Y7\:4jf]jA tnS;ei5xzjwDžw*fw> I8Cٙs3mw7:BNqA/y铝S V-Za+ ~9Yv󧟮|>2V_NMRLcu@"q+]T"~+>foڅBp:ƯDt,|Sl8Zᴑy8^Yhb;U]~'!'2mݷ _h &QB>ro"&ZmwǢJZF9Cݦ^ܛZ<)0(/a?#QҶFSo'}KJ)X)0+$q![Mф WBg*8n)ž> NUtSBٔE0%`\a9"aNN38(FeXĐۑp kLdX l{\M$suBrUES* ;z[6Pu;;mmFuLhnŤt3ƖLSO>d[[BB,Yd.TpeSTx*PRǏkjO~9՟:1O(2aG5f9-Rːo?B*u󕈒C1&SmngK^ÆJwL|K ʟ((.Qޗ߅[/lj0XMx/˜ y oJ\!jgr ctHg#t؏|lńXsKmI$ўCJ9WxPy,Ⱥ ̠0BCYl8|wU?f[@mu^u>MqD[B.&6pRB$$A'T_;9BĢdyx( g 7J Sk G]T'hgJ5b'")Um Q:mWQd9":YVcG <~ýw l%[)ׇ=ع? !Ї(]̉^*_Ế_={('5M eƗd -w D:u9YȾ^8 am*K5ǂRReVAtPH)ҢŬ̮wVUOf7Y^zeC˿.1GΝrʭ[Yf%Vq?>]d¤ Xxe΢\LɳƗ~F 2k\IúPKRjIɐ=y{.-q9w?G!vڵVB Ԛ-y ˗|s!;V=}ݪ5P%Dis p hh!?lb os ,P{ݤJ=Bg&>>9=hy[auݎf[0㳸|=>Օ UQ/5Jl{{qjԖefٍ@reϑs 2^wLSs_ʜ1pQɘxF[_xAϮ5/F*,޵vvƦOPV8Ot.1J $1eh({ \cc5d8VT[iсd;_ibVZܾKtdxNWZ5oϴ7z<]",gUN[soDB%M/Zm]5;Qр/w:"Vtfbqf sN.ccbuzUՕ/gϛꌲMuo\5ƍ蘰'J},,w!n U/1Ϳq p p p > pn=v(ibDcDj Y cj)?ts8{>úⲊKOYךx 'ʅS QRH_tE :(BL(D x{2Q!-Ug~fd^G84wg>|1+,,A֭mۆ.?شLJSK&K\K8Np㣀f=>$yI$Z&r0KnhyHYvy^q9(AA︳j̘uH,ܪu[yAX!m jCP(/FbPQ^vsƺJs D5v+.O:SuM7v[o0Ú(>X~vpi$7F8Z7 鮒Wfaj6'Ya$Bm[(5O,_x_i g/Zơ<*Pg[l/7kZuqEۺSmf+6+ۺYKT9-&#?z]}(J;9Q?EdF,\֥FuYXJ ~!@2w|)a௛SYjȼ|YaU겾xqh4La5Q|!CD9EKHd *Eo<\eU49^]=ns|;ԶjCtNyCtn-ϳۭƀAZ3qc7 gYfUgYM^@{`G:i7pe1mQnH֏ngXsP+FR '$Ip:Cp2fENd3iT=L2 d7|M$H4JRcVSbYd @C!${]:k]lXT0s9l,yHu=,*Mo}K.is^hȇ Z%,d-uͲH;zcF/ކw7 *9U1cr]:c : }ݨ;rL8RqXWt) ͊]b׮y$6'_Uച]t93j ?91VYH+(~s`w˚^u%c'iuШ4[?>k?7u5&s, qۼ os>GB\]0 x}!;&M̉Pam폫/qCGox+5sG;~uѲ ع]umyy }XʓF'm]yq8oߙiͼ~=SO?)Oohtcfަk?x—hԷ ^CptA%ѣDE,ea,"[ e^RΆ!#оa~R9_-22hq$3wS5cN 9 F6Nk ǪHvҤI^x[nyso/;s~}3۶ЯY9ν'? T[V-wnV=̢}u0w<ЯOG6% WWZA 9d]M'L m̤aY ̧cN; m*ʅҮM } DnMqAUAqŤQ{wx?ll]dֶ[N&O 4]*[h3'o@l i*+SC82^{Un{|xŲųO>k!G0߭ŬsyǞze _|yO<1^Xduns׶ς)*U]뚛F0v̨:c̙6ACi"B;Zy/^߈> |HNۓLJWjGD]:DP$)R;@NXJ߷q9(}~ӖQߪ݌@04G)!glJcd;*Vdz?/|aРAg}_I'̘UF;a憾ԧ0oC/YŬ;w5  O8ܿoN+6ول[?_!_o߾z+VSI,Xt虁‚ .]R6& 1?YÈS7W7~G;yd~eubo}ZeGoC 6F*^E4cd7UTl8iʪտq:aAUW8i o^fK^]l+W[nӦ-wl!%k&9rdKk)O2b^BR`Ú_}n+/FKv & rD:H:.atJ,]12cmFh#\e` gfew#ql*ש䯃EEz'ޠ?woݖ\G.|8з?t;30nwy#P4%On๨ M9oPV\ɼO~8>1^~=>:Y̲Qݣ>zgqGt737?I:~TyLJ vC 2b3b`V.Ynҥ)HB$nr籉,$]ײ~ȠԶg^x˝۟}}V;\K~ߋۉ'T shtꨣXw77g'_aXgK/$)xl7iڴie [W%uwy'Ta xŨ!y)-h=PICֈ 8n2]tV; ?ロO&R>?&L9s 4;ɇ4a"0H vvaUu۶mg?{:=Kti-s {ACw˺G^Ry"yGpN[i\|P7#u-~ 1cHy&oN{$Y.~K̥ϬX =#T`?ZCff50^p!X |ޣ>uN;HLa-[ lT(W&|&JxfީͽC7]w ><< $NK [G`nB=!>:#r{Z#Nbav9QvQqЃڋ~O v5;oz} yj &M;t#F%)NagL][7nXbW_Ztƭ;vgA@/Ops W*LXjKsa^o:fE.Uz:5Բ>Ru0veK̼s TS+?F5`\AV?x kss؀xٺ6&w[2 8A jhq6aPT<1,`ovfsߟpR#Q4zwpB1ρMG8#lxD2&I=nAۖ'J],&˗/VOa&2t5r@TA84L#0@ꫯ㣈$.at77T;NȾ7 ~sF1mO|r;w.dSe=PmL^"UA73HmJxOgBAjQZS~ӟRKl=5J3?cj8m??R5|_E+;]9.h,Ci f@M"oHm;H4JZeBFbB-#_ q:aPT<1sK[a5;nwˋ^^!Ǝ0f :lU7y+MZ-a8JMƒkoնc֭_ڪWXy붶MU_WfoRS>(KSbҡ_)ɞ*RTxK=5Zd!DIgkl|m6 a%g{޶k5:i>|ڷ: i <5SCr6๔>j q۟~þay61m˽mQQY͎}/mW}o[}MLI<?7Z_MӦ_ e›oOV sf7͗}8Q%|ݠsOn߲tPx:w{zA2'$=tz" ڱ*G,'4GKK˻n^/g~m2C3dZ ?/[@Ip!hKC5SlX Q@,{8mk*Ru^2J.UZ0_Y[yzTNK~*@<)#uRHpvsң-UW]*3d\{գܲe @*u£A1%Kթ%l Aعʴ|M7A{/mj<_Ru13 W\qBa<]* ˗G =Te ^t++|jT1-2nM6&Y}>M;?6ǶCF4dxM76Wu`7ݰ~ulٺO}<1ȣPZr<]RD%VUGZQ;i!P۶KՆq&C0כlݺ>}8W_럼PeEg.}'7ٺƖ E;V?rk1J`Y~uT،/ƤE32W0g3U8A8E}/_b7$/L.w@5 [7F[s>-e܌U0q'!Ok2FQ>i ˗iƄ+-ӷaWM}lwU+6]O[m XOi\];oo>-q;w̔^q@1vD[_o!Bp]_?{<N񉁝{{34k.u]V5ǜ{R~aC=?ؑYQ 2C`ז ߴef~˙9ҍja(R j Xc )kFx&jI= n>l5ee(aAi["M٠tEVzrvu_җTϞ%ӯ~++T^$ƾBm1粄G@ֱ)=Pbt!Y(dꘙ(+gYTqѳttΜ9bf^ -2) K]D,82^T}Hbc+GZO/wԌgu\:UΰSzv?4aB cdə<.pu#V*Tv&7m*l<|m$}w5s ;GFٖ$*F+ɉ!qE\{b۴JבrH J'$~D vtMʱw M4!c.5$;8nsU//I/6UyײU<72E Q9(d2mބpW£9ejG+ryC.:3tNe!(\  `BbguVնg=WL 4H׎ބunZ_ #cHF1nj26#Ѓ 1#i3YiDvЁuنyUH,˜s6E1'0e!}DsKQa3&HY)³0 V>VT5SOd41uK.ROd--/ U7,Yg&DB5; ^.KR5SzkLñl;d̰@dEz'4hM_b+2@IDATש3X?&<y(yƭҗna+.'DPf=nkѰ]NvM0W:tesRN~ #3ʠیH ~cǎY4BQ.٪,ВiuRV {XpI:3=/1L^ Ei+I)dv\27ODP3D%{ʑT+5qGr޷Į m2_uC>=J*S}UFk%d>Gݠ p1[qۆM}Ik:g0h\TY6]0F}j~b:Ne}zwUCb;z [y v$pPpp"uYndr].!DƌUR\C{Z(*be~Ao!e S!Fï)['[mX̑[%(C Ũ:Y8(/7 %1QSetJ<̓=1 ɯVEe$0V4zh]-;oAAv<˵Kk:w:!10X]A&N euhr/C 6`1@bkoQG546n*b ڎA&"C7K $/.!4u9 uE ɾ->IxԳv6CuQ%{g;%7Θ:qKWys޹u%tz}l1WT\kWw &縹\69o;ݗlݛ|hCO -EX {%=bw˥<VN'19]9 WT *cqPT⹐^aL ;˶,2I[G;֘O\?V &9nhAGP3.7}# 4q0 (M0Üܹ4SJN(/~OoSK%s/957W:u]u/&i Ԯa:#Vڢ? TE3b**d(+yU!\4lhŊj ؟F[YXuJ~h=@Pw颴F+1bxC}Ƅg[KX8ֱ&9nI%W|/&QHDWRK)zLo!rJjIN5 l& OmSzUxpʱ~/MS'KW߼U=sr*w+IӦ i=0VcoӔhvj[|GH#2B?F\bϮI[9b 'm[_=kMYd~u;6$MGINtN'bsfXƊs%+ v1Q#E (F h>R"SH$Ji3V;X+0ϛLb(;1gJ@ =sc+:c2|{xQCzm=[(/lEK٫N_U E%ĉ,,zw7@mWcv51gn GE"Uxny]$JWKf.-r>QrY(l tWѺM_ĜT<;8IPT`8>fY^!C c&o¿x:/YaVZ+bىv%OG{!\jOGfS8~w\ f' 76ك/:kc7Zq3wʺ$ͷݿ~%X7j;am}΋}#PZ:nG>?ط~T]晇xߦˮb7W|?ѐRҾF p,t)VU#Pzz}!c`b,3vPؐ,a<>552x+G53Y+ /di_̠1х|[M.Y<{W.E˿KQh uBFG1#<=_.ULl駟f؀?"PAA|!W^et _:~ C4&Y c]Iѝ}Ӧ!O[n|y*y ǜ/m[s@Mu`azѢEerS;y>U@\"D̍ ;/>酣immeFdN'x59HEAX0p`m,{1E*T/6e㹟jüH9U?wcuY.`KgŸ'sx,!ՉTCݻ 7cܹwл;ahM_>3`m3bR׃uGyㄖUU`O_1Frɹ215VyiǍ~Ƞ~&E7nqڻBAQ?x^ Am}詭> sסRz6w rtDz>AlryY]*X\, uB1a~}*i{u7odB /j]3FU1cכ5tۂY'n䷖W| 'ȋ)~Q^5.EXfHyu*kM71w%jTevGz tI'yʌ?eq+S]Jy7 ϒ˳>[{tf]w5|ú/wA;8sL6ky6n1o||Ya$|+P08L"BwCҙؿ =';:UaĜ<`r̲Đ֤[n9i[ԕطKxϡ %]j6@IbVRmf˻=( ^16C/;鐖';6?}xG[~U_awsL{c>AxՏ+K;]׺'ں{{ fW6k^!o;O}_mjCG=O=u,$79eǮd=lbM% OmbMb%.Q"ן- 2Prp׶?pe;ƉcӘ_Dתb{C[mwZKvK K(7}r=xG-Dao}#>I~GLk1ڮ'>]kjͶLLG+M䄂{V/*_h%,ջmKm7Ct Ʀ[ nuXfEVÈ 1 7Kcq"Xv1564(#߿騩 y)3wE[H-`2 M ⑝x;ɬ1_ $;}m''K/0Χv?EoxD9vD;ß tF@1Ew2IݮZf͛7OE/ٟl^x^zOs9votW;ozӛy|tꫯe=_4gy^s5L3/IE>ppo$;og RQБ|ƀK. -ߟR_n3&.oV-$3eG.[JBsZΆv1}w"s&.tYIQixAKsHH y:{eCLTykjn ϞN-Ԁ i˿N[a)X_ÇکtL|6dB/Fغ^|s׮It}ֺ9XRv6rg8gi[s?#F ښ /ԶGLud:hr)yճK^e :aJWʃټ ҄\RKJx 0wb ^+z xZ;*$H>5RL86)KS1"A)Rx`\3Y"=,?fL#qPL큎{.DwXKx_OlFC{Xj%~P"'L"wwTa?G gn]I, ) γJ1UA}̹֞ÆC{rIW_b+Z@]Iz ٥]!`8-%dUSV#Nbaf=E1='?5ίMf0_或GLbF5b:ȡ/x .X2۽s12h$AҋĽ,j5`:y#K <֫z%Ͱhiw  nLQ v?g6`i!e|2%#g4%Բ2Ta>!ywzf#4l͇zHƒ}l@l49KGjI,0gwNJqQt;g; /dz]{;mlG" ~ gouZ䵫Ы[5 )Cmk&幭!UR@M!`l]M &/G܅bN{ 'ʴF+g+ gP~52Q3>}l- :$ + tቇ}!}vm~ G.ǜ}4`:3XJThƴ,>~5SS0I>2Y'| zk : -3g6s4]-'GkieZ>UQ 07|0nwǫ$ CUWʌ((W37δt63M$wl_ջ7Ǽ3!E[E2 92[ݷZ0L3}#bv.514t.='2sXzvV57&>i'ϜҾc~{6bf ̟mf~KT)z0`98<hrnÚ/`!/JNCɢڋ.Gb:䓦g͚/``HݜEts" V:ci.N-m8]r &\+}Xz>}Zi?[񖷼X<2ܣ6|1smhTEj $0|1$yE.Rڄ&t.dR:^ #gx ǰc|!`r*f2rcrVƜ``4 *1>{zMóomm={7}Қ?۴uۮ͍ 4u-kV,d>ggqń8GJ;WWס=?O %m0]A7WdTV,D7 ,YI6w5YCMQF;s3OEfMW@l!ؘ%7llbfA㠒փRBU<29^eK;H-+o90pe.G:f5(R ˙LCfP['D(@m"Гhm"bQ&!EˆAGd픪F6áa+aqRC0o^pc(JЃT/'bys~KXEL.c1KNJ+!`@6%-d[z *zfr3ΩUy0`UƚJuA%uc[E?[&KbR'jsb 09M>!`νHr!1b~;oX,ER1fOHmT.uB`AAe Bvi!`@lѱ8ʝj.R,ER1fvGmT2VW<=d}!4Ƀ09 „Vy|VцYx:se݌u 59Whq+t4)rmM8n niXσ\ƌ+tk.֔!`@~!`]t$-]?1A㠒1xJKm^+BQ'1{ק1>TW&T8`\kXk C0v{-Eav`[v]CWS4 C0zBbF&C *] qzq/u0cdk|byRvbAfTbJQ[yNbVgq0 C@C 6|dF;B[4*֧5h^BQ'1{ק1>TWiAJ1!`@ `l]lTX?&9 v0dEiڢTΪ TE1 9)de!`!P!QM :mh*H؂ʘ}xxNkbe:TƂ)d C0 C0V FN˪$X nAkrKQKIP?θK#ɀˊ C0 dDH,-miɝ{ḙ S}wi$وY!`@#`l]cn-f!@r0Eeڝ1D,TީZu[J~nj2v[10 C@ =ts@02 ҢXawjPVr+篒҅dC0 GغڿGa@0)q=9<.mŜHwAнA% rQ'쳍cnx&!`A2$9Y]sRA'Aemv$A?A%~ "/$f3}4ʔ!`=u=5])PIz}=g6X_Iag;Od1) C0  C ĪөBa(8,W*3[(Ok$^CuqU1'tc30 C0͜K+4t$^"{ԻDRҩg]jA!q~`C4 C0bĆO!j'XįP!2b!ltiZ]g5d!`5u5{k,Hq3K.# u]T,伋j E(je!`!PbQw&3t$FF bPoWoYyy.ؗ6d!`@#`l] /##_L'0cf1W].cƅi7hTcΈ<椐v'V+ Vd!`Ć.u1˘>M̸qј`EĜAeEj7!`@O!`l]O!oF4"gǜեV̉tsƏRkyB̸1b˘BA1e"; C0 txʙ3gusBi~?1'=h0AWAk044Nil 1?i}ZcEi}ZSFڮ !`=u=~ ,(h^    [ r^s:/#0!`!D 6T76*- ƣr!cjoYF0]P7kgM0 Cqdᕏ@v"՝Yr,` AcWLEWbӻ6\J!cR{<6j_؍D=ds 3X!`!P!xvL$fU" 4N+ 5Hm\}!cZq] @N cr U`zC0 AغZyF0_5SK ;u'>椐1nj2Š}P$) C0 DIsFz@v/&}P@Nb;y./dٔ!`u|w#g(H]Ia!A㠒*1}(f\TBQ'A2m:]%Qc C0 2pGnCm ;#!XnHOkľ>fXQLF[F$U-1'Mc!P[W"XR(g4xFg7ty4^x*^淤J!./I`C0 CDn aDH6iMu]v!$-U[s C0j cjvX0>T6sS?ʮ;݌! K S*mcW gc9r1sz.ULi!`E 60`uU}GBt3֮t'r!cb/ 4"Sp״w]FrE2dhKvӗnҊIۛ0 C`@غ6Y>,$fw\:δF ʌ=gu֭YfժU-Zlƍnݺm۶;wa&{b-6ޭR45660`C2dĉGfذaqtȨ&!` .u<㼦pN5ߠ>ľS=Y.6l y7oތ̙|FR;v`)tBP }Pc9; 9 GSSgr(܉CUϮk"ѤC C0jcj^X$U@ HUI괅]?%]xUHI_lk-]t\nڴI8S ݾ}d9g<]z &4Dn䖬K͌I 4zh &@e0t_Қ`ES!`UD 6𗳡UI괅]?%]֤]vŊa֯_Oz&b E%F.9QOI{hN[TkK)r #G[$6%QA oԅ3ICå٤zz\&l_CGQ֍͕0 C6fՁ)EB^ NA(d\a22ʕ+/^ gubFӀ4*2lUOuW#}D\zN0 C(wԉ%3B-C?LVd2AN8ω^jR򄘟^Zq1<}Lu#G#3 gbyZ!`ݏuݏd'.,*`uݠAPIN#e-zjsLI#pFɂ am f +g9&O|!Aء'1`#,TC0 ^@1W9ĪkA*yBհ4c, C/~8BFkVT.ۭWQ i9s$JKhǍ7i$pv̐H2{C0 EغZ @nA.(HbYHr?14Q4 !6'wyyب,d7mAY#Y /8u%v3g΄KOVpY C0F#g CLյbUTʼn$BgN=6dO,7'G:X𢗏DDy?7<;ǒɰ`ڴiwhӞXjf!`@ `l] cbDu/FNTҭ&#,ymh$h4$|ԩS$d7rx~>q$:I!1P*YwK]YC%\"VfDBf)2ܖkOn#|Pe~-re.1@5-R$t| 6"C=t_'dӧ˓Hf:40 C0ցM¬s@zHs,r dɒg}{W, `Uo'4]9֑'n (0ÀR.['J.['2!+[G§1tfd2CF1.һ1ctIù!`5u5xS,$RZTOٽ<RA &_ų C0_'V2rgytS<')QRx0O>č-0CZ;]]Dywq̘L\Iv匥&x )=%HΪYJ5juɀT?裹|ކ̇KX9bR',U7+%rkj!`!A@ VxCî VP*&3N"Uk{U%;=xMBUw3+MnTQ~֬Y IfDŽ+=Yt3u\r](ΕPKJu3 6ͥ0't̞y$,?#F.r&!y#<]>,QpL0 C`AغNqjăhf񨐥ĺ0~x2ϟ,zHh>.&FF *AwUwg3XV1/YI hRu(u \%6P7)o*uRSv)t'|rKx9l4wܹw.svT:K30 C(dJpsD,Tpa7Ќ;6#9u5͐iҵ/fx衇<'9N&N@=$iNZRiU҅Td nr>R !c@~@1[I,f܉8;.G se!`u|w,x0HKjhr\¤|pi*YH: |n.ao1m.," b[ZZ?x ['醊7W#Nxzĭ)áVAv%rzƀ =̶c, /0Î=4zP'GȔ!`"@¸ BC2àL$gV¡M&ԋ+y&5PTAqʣ8ӍZV\\zMkH8,:ij\zfҊF%VH`@2 iL$Ax :SNKC0 BغB-Ȋ:6 Qc&#G!wɟjS%b@"{a,=Ǭ`*Y$r) #>2*=A`Y/kӡRoGiN.]R+m&6zV<gjG?QM A^~\R׭>qrigC0 C 3qXnRe!aI #.qIJڵ 9C2j̻^ kIB"g1JѺbz+Yb< n<ȞFcKy\1Oy.l>WGXM;ؾռ|NiZEik] ;܃6skE(*PgmCҨ(F*}V :0`{[[[oK.-ԛ4^|WCC!o$3lZGGjsV3;œOFrki !`=u=[&q IJH/،9$̰#Dž#6i9)SϘnLB2e XR鸸BFAuWɥT(auZv( bY^.?ϝwmH3Z1! Qgas5&!`0,ʑ01%dpvy{v FA2B C.x9|xZ,'Of&b&igUR$hԡj\s֭!Ĭj n nGxP ' (O<<"@Yh,C0 EغZ/26逕$(0S,`%KN7q{-W Mݴ&O6s뭷> .aגӧMI9AjEIuTG D)!ilTN ;ăDQpsO PCl?^%uw#f"~53 OH6{"fg)6Ruy Ǧ-Z\~3n CvNTgJGqg*M6 C0tS(dKW#]! lfG2<;J!Cg:)Y]cN]yK 2W̙CsYlAh mgfQ,'t=,zT*%RUP3QhWiNJ;TϞC<}) JP<]vA{!Oƭw&=t Yڥ!`@w"`l]wmmu d$d?^N@3 0]YĩELbO8YB~V̳ pF:K ( OdV̂ݺbPm\819+8z*g\R[@tCIzIOkYYC L !|A/%GY#@IDATRtYe. C0 B0318 vV gꝑd:5;oߢ`NJ&C2K>EzǦd#1t<^2CC-OiNt|SLL0 CF0Fn Ns&2WR48&r5xYNFFNIK` bk(d{,x) wr;R=M l&,z!{jr $|tP(s$9y)J"a#*z.%f7rdR{e5hB IJJ'  {qDZnV)Kl4p1HU)g&-s٥!`!!cRHr10LH#F"'˼*3C#-n+-hOjG2w2RQ1ZA2%ЁFdb1*bƁ \ "sHe^eF%f7rd9\}F2# <]3hp<!%osѳq[/Ɉu6 jDZwdl([̛7vDAKTѳ[Zd^dD! 9"2gI%KA([ (<Ȓ|Cq8DO^T\onTz. 2xv>0\[6qC:"4 C0d7l1p2g0,]dfc3;F.YhBU9'{I2+A4Y?`2# H/2$3d&HC&3!̨.g.D0HZDn2C a=fpdōvԩ$3lia%f\noήY/SO0 Ci0@ p8crC!}`ѻƒڢ@.9 "Z !qVevR]h 3n8iXp !xS1ɒM0 CaaTIe̎얇,i$!4 жD;ɟ2M2x.\̕L7#C&%:yK,6Lˁ 4.n5K7-RerB\Pf>IR%32MRXt=|ۭLtxiQOW4!`!P t\$-+N'|2Ǧ͕!`@0ZH# x)#V_$3ؼzTa a9gǮvg3 ;וYݺ'zLz6pQ΅^xGV׿y3 C [$/|@0N`Q'Lbc[XưYgDˣxI)<`dy{G4WrXŘ^}y16g2Xh|!`Tuch SROH0 |ec Exm"sif&X!Cg)fu3 2eK.ag[6Dɔ!`1HH?Md1&C6Ӵ!&KҐ1W,"2;QG8HsYKX  ʺi ~=ܒⓇҸܗ<":g%pJI Wg?7uL l0 C@@IfxR: 8KB2#y)d##jOQ[V+6#I m4 JUGJcɌ}6 Cesq$̆``?s(F$lUTDd!{ر\ =<>-!] u9ɘ]Ae4"O˹;gΜ+φ4ЭU7 Cxc4C c ۞i-Z3 zӧW%WWQؙdW^!G4$3L#MbyP}PF- : *Z&W><|N;}$39WŹ91 C  Tg54GdוL#eU,+>ГxN428xč[yٲeQl͘Rg̨4J1 CF@Gs 4kCd駟Fǖ =0qiD$ &HZV^͒X;㖬`ϴqeɌB+R[ٳgva|ʞZ CM[כfodd4Go!XJYT|4]ʚ3$s'd )5rTo,E.*N xO?St !`@%HrBARB4y=;0d3Gdc9pNDD[Y*_f"En9*&"WJ2ƻPe!`݉u݉9$pNV2N!m0 _'<9EZ+ְEjj ᜃ޼ ʆ7bCg0ZbdcUbI9#O905ü{!`@y0pxuĀuUY Ǝf%AyH2CDB2K/uLf:]!>N?ʨkU C0zcz|k:@ΎG,b%\t)Y / F,)fɪt'OL~LbX@;N2|2RIf!`@oBغt7ܾ}^fK"@64"J 4>y"x~r`愰 zUMhEN֗`JzO>bn5y2tկ􂾣z7xYiUmo\!.E rlvTQ@$off{oX}݅ 4A`t+n ON7f:pE>a ' ?0kRiL7Сu5E@PGA$'UQ 3#S!׌Af4 eBw^:GzL8=>dQJW_s~~ʨ_PE@H )$"퉡c9,e$2d7JEÀ2<E (.rC$(oۂ.&1 ,ͣLGD^zٲeIGs @AAV \ă$bBӤ:h:;Q若ף/(LgMQt<;BJQ X PR&/{s.o@H:K\3Z-X?rBRɗhܥf/M!c}qϒ=?=SlaJcV(;l?}sWDJmj̜>v,8 ,Qd3G&oTKޟolQ&~1q4WJ:r)<].:@=Ss琼XdzTE@P `BqD,`mAZ_CAiXb|+̘ymN WaԊEt0(dY{!]vw|CkveoǨ$&%1į-n rƽ۽[P1eGKQ͖:O]>⣱O>Ǵɽ;nO3ױNo` IG8wɯ=ߵ?ͫt5Y,ɟgعj'{TKqn'iYn\d+@,*By(8` eTDc eά7˶2ܲ=<Lj%NlӄZ޽!\}NK(!Бv"1ꎖG9OI<Imr;ה;lɦF}әJqQ#YgΝzpp͝p#Zy:fNZ?vQj!Q!? %Qk(ohM.!f♴0EֵNa!Qg›]_䃄&T 8yxIgNfIӸ$"mV9Iل;GiHX&( +d(x ȋ.!jBgٞ]ځc;$8So{czV=3͡$wߍZ?|c^Q,׺,xwDg-K_o,Z5U:"ou: shǃv)~ ɢ=2%$4vSVg787=& Uv}e"5$۰0 4hx)EyfmٲeĉK.ED7dsЦ1p{ A=+&E41NFeL50PسiQ됄͌NsK0`5 7BrǸ+8ʡ)Q ,u .\r%"m'!QAyy)&.yvMyD¤bH,+ʂǦ%o:F^8cІoa9r۴w>6s5A2-]񠉴q edTNG&@RQT20%uT1gCG)@e ԙe;i+(ݐz:u Meh ǀv[m"q!mwԁ =Tvmf <@  ;b-+*'͇oeT.[Qqs#|UٹnSFv`񫑒P.%׳Y+ D,'d&%`,wџqeU,4|n )i*O9l<%yu4v9HDcՕ ^z ̙i&*F5 -AdjSTmJHEpʭ\s.[)hT?/xL0~B$wh#PYGI]|:4F%Knj("(~СЏ.`g`8 DWBǩ NtmRFFx ]e&݂ n[sމ# +t*3]uܳe+ۡS.uZeZSn?XkH)W}4IY(w&8Xӣ f,sn hDM63{*ݺM͞v;o"!?\0gqƺs'3b=[|+^+Y#gZq r˕#8R#&ZTkîv/L' *߼uZWHj%FBm&r{7ěѵslHǩ٨N2٣K2NvO|DWzKk3Nz6(w,C=oz]8wİ#C9 J֥=Q4ŤgϞ&L`݉#Y"*ؒ%Ktۓx& !!?!@LeBaJ񣻱d gGK3 2G zJ21p:t)]pNؔ}ا5aC0L /#h˖-+Dia\yI1d:^lGM$U)\ɲ 1IcyDhA?;@~0lr)Ĕ4?djĀ'"La`HRxT@nʘ,6q> Qܒ凄L̹XϧlgfQ1)>j("(A"@ODz7K2c1HGb Yitd BҥL'/aA֊0yt|LCՐK(s"@MH =/_"JU!\_w$M%j+q`XQ<vYQI:|rϴ?7Gꗿ?caw̆#hJUsFzl.KC:|\c3Mlr P O+l\WN0٢?/CJm9[qM2ܪbbQІ_{ ,]6DBqT0zCP%J&qudW&a!ǥPfG0]‚Jڏ;*lgoOP+7zL  $$KpE+Ix$x棤1+ԓ: Ŗދ+ !\ $\?9h20/5د6N5E@PC"2347Mh !DoL73ta~̃5}]!.w{2 ImȌtΗLd01 ( $u*U+ѼLRH xZ G볂Ք|exҦ^S#OaNֽB0ȝzK||EY,>5WgmF7$u *G8N=f?gGM0g@ذ\o7bLX Y{QZpľءxFL/v@,PRV#%#8%CmOhZz [1ȯ#do6$Lc]#O]GdsePOt+fhő1r\SNiTXyR7څ4Ib>;F^d^yk\nx܆7Y<& C"("p 뮻 ha+V`N:a s͛G {2Ɏ{MY L$$ N;4Y~\BX a_$3(5xD wX'z+߱sG#Y(WGIK[86\"~S֥O< A|'7n8rȟ!_fiyrS1c#pzC۶m{ŌKCXM=˗\w9WQ8vrGZj0`&Ҟ f^~&8z:aQE@PB=vZa 3PǒX)W\r׮];#32>ԡAfX#"c2&dxRUw vlۯ/R[P?U7MzNXu;:sk"yb,"Ym\![l)H'RZB/ut ])Gad ֳTE@P? =%̰'K@< ` v}Z@fe52ϒ2m )XveG !3%345E V4iȜ~5ծee0s)JT.:]Vy;gjZs' :‘'N`;-Vlܳ4_Lӛ7SoKDbyY}Y>M5=f90νG;6tg 9:y/U j{>vx.}yf3I#j]CeeRjժ5x`Vlݺo?*TE-[DcOO ฼aZvfh04<;fGvdo|3xrnOg0i"(";uK|X-:mذ2C q]qB|Ͱ~W"WSK9 BE% P Ȋ7mOt?E@H*T-єId֍ ;o!iBEdXb Z7b;3W.;wrTE-i3PSg}Mg%YXwK݇~<Ѹ'?6纚r"gzXNڝr%Mac1,~E0~9۝ן:s,s\MY) VqNIZDO>W^aWd}݉'B7ܭ%wp)&XB*Ul ^zɒ%JÀ(SpGD[ w"("$(Ѓgyf̘1Bf 2縷VgcxF0<=Tve˖e2`Dad`fAfXFc9v8%Ƒ v<"xC`EШ(5y\M[v޸pl]%ś4(yE*ȚK.:kxjiiq&?ș?A+bNgKuLCnrlGW|ΡvFrmsͱĜ?u)NԍEcS$0vk㏟~f͚ O7] B9{lvAc!-ڙgiSLvG:<*\XbʕpqJ` 4֯_o~sW#YM1$ܪ("( t/d/7=g.tlc7jԨo?v 38`̰nXX^`3$ɊDC̚5ksG<釻|!1qD:$/*5F 5-Ws.i^.VH/]VMRTܾ]緦 xGgtH兀\1xsW%oWe͘ܒ~ǘпL1g.f*U< e*5c!=Ύ}͖)CEew7{a+KCq=u BL];^M!ti&k4 "" c0#G ga,8.5׊hȞ/Aּ0RN`ѓc(E@PE@H  aa)f0O<0ųdadE rU* UD4+Q lǐ^|&rDU.=mWoݷ*|1{f/8eiҢLv *7vbTzps'rpdmD@պ'VCK.fv`7x)דbg3RJ7F,cL3X pp<>4;,{ET~;wDݲu@sW]3KJp4.\X=02JsJӧF zo ^hE@7Ygo̘12U`g΋HV E@PE*!Л0z׺ukx„ 8Yb_KrH$lpݻ9B`n:~x@#?̠qd0&WS8I2-#=ԩ(-^{/\>gM1=Rdsۣ"P. }X)њI8"29PlfMh@IDATF04\&62W\ƒ "!1Ll9SE=NJ㞏s`LDC6ƦJL#jc$ V|"|Xķ6o̔?@ɔ7:[5E@PF5ftF0z" Əyy"1 Wˆ<(dyvtt *L5`Vt$<͠¤`^abboTCP^;='|Njv¡vԼJRXh*w t77owaS PX60jl&QR @?Egya ,mj nPv&rpjժEP$בo7<̶C 8fGx$XDbACHs\i>_>~a64xڋHJ"Xڋ=IIZUE@PD<䦛nb}뭷 6mKMa ~d[ 5jԈEA$F2C'=UΑRA_ 2dɌgԩ(@R޳vڴ#'fɘ3k\%&E #děO>4 |†̓`\:J(2q駟fa,T [ܬ k(wfĕ*yxg //Wo"ի'U'+lx5!AyXyvLf۱+ -GX4oIkh˯ /-i8`%!@׃2 Ic;tj]ܵ"$9b-aV۷/œCO_6~xzjVBfG 1cǤ߄E@f(@G.GmE@P $4KHZ?^g{ўʕ+v7ȑ#! TK9dIUL(c#<!AFث!ۡ zѿQmRyJ&R SX<2S>zGi2@1:Y7dՓ=*#~~BDI#mc"^N!=8&ɏ=5MupK YbcHQ ˯H_&9R7rAlԩYX~  ("z%!3lrȣ#K⹥_я`2Kʘ/?6zcp @?N/]ymPgiD:[wQQE@PR ֥=4Hb 2ZHNpYOڇЃ|46 H7゗vZ Axa_\Ǿ `oٲE&I XV͠ӡOqN.opnEI L-LIe CmQZ]@\^'1~j :&^ QOV![wx@jƍBêUXGpRE@P= 2C/FCKGrL zCn<#\pNjɘoLzdGR1-DPE@HZ?4)۸Ԯ]]]>쳡CT&) 6t=c΄Kj0dgkGٞ dәGuSΫ*ZXbE#_ͦMDp=?HH̔y͌ÇEȏ 07 gh+1ހBoI(ӧf2PB׬"E@P(z1p@ 1g_/1Hŋ5k BKP&Qwց]q'39@EZ"("P.@Z;@㥗^ۿ+N@fddر(wH-T^OA~ux=0޽{.ɕ`(,ke ̡J{t+ݐ0EH`(S$h$',Wi6%ؾ C6WtX 8)D@# (q츇ˢcr yDn⑚p5wLB2dH׮]YDIl*"( =쳝:u4iADoK=;#T:h 3-QHVQg1z:=W"( ŋ!m.Nh@\=:i"[P^bXFzX b3s?8.'"" J^SlS(P.0>$`\ ƍ7o<n,*'#$ *Lb5eƍ֭DD"yVOo`:1qĥK2 L؝~ Գ!oQW0R8 0%ƼZi-J)q/6rիWY ڨZE@P;LG駟̰9/#vҽ:jHWNv$O-XǴqGu05_JKE@H*$4M*@b@ QtCDjժXlfm, 8PC6"os"EX{aH*ș;M[p![ D2Î,V}!QXt4JC \f2,47e" :"29sUVM2eϞ=240ҲWдU[aztylwG6W oPT[?6Ms>~-᯲ku)D7.v9^Ȉ#V֕Fc2Dl_[܋Wn5zkw.wtklh@$>M>SVܬ3IUG{PE@PM\5 3tvt~d196#._nchza(yyvڀj>3@b.u&c߸筀Z`Ѹ&/fG<]6YjnoʑkwOCR_6o7D:s{CNb!R!Diߚx [ μINW;^,%1JPLy$fGr~Hxx8G#"ۉHy"(@BVZۡC̘1crvs߃. 3̳c/<;7l*c(1"xd3tX';}L:cNDiXSܥ3̞-8x~H)W/abIa[KUe6MWN[}ߊgC%2{j?jUg_;wL 7Q }egѠ-T]aصja~vHN[~_e"!g"On?|C5R߱xHr 7j_2կx"j',L0׌UNwԉVlQyݺu+gE6#qFӲgώw'<5x!)VSC"4JXM`'h2,`Hx"6Yy*2]jԢTAnm@Lqmxq,ݸrK`G'˭ࣱC'^,?*fjhj"("T`DNkǎ‚>meӕӹCfX!Cx @uY"lۿƼ"4!1Ѣ]v- qD\nQ*MNuʓ|ry8{Խ]?gMY?|ۣūVūԍڙBzlx' cf/͊ˢv#Z0 .y4oG:No|8~# $iӦvC4#T6("(Lҥ9Qe˖Ǐp,4;6XȌ ccEQbH`j(Ta.gZ1VnܽW<5Ȗr&Z8V1BCq@퉣mj}Ds&ױ=mĨ@ o,E 1#fT9d86c&q\V G0XN6QU?ߨ%("(@2@[I9Bvlˡ~d-D0'2AfdO @ [<,E@T(Z.4e8tb+;QǶ] V;k-qϕdunGT/{65z,ݲDO|lyv|\T79Hvv>O֎Q[PFC픎l[omժCϟ<;&ٱɋΙ3gҥLͫ\r*U Gp>!Ə$E@PE@H0߿]vM6eqF}1ȌU@p˗sZE&M3dƁ*) =}iuq];Oyȩ{ 97t&,wmWIVo_i˾0{QɉsfPA@պ`PҘ1KwyWw񣹌KGDDpRv Vfv|~?}x`E@PE@`_Pxx㍍7r_|A$X< UV!"wp=~8+@XQFxgs?p|'>Y*vz&ԭ)D{[fSd1G{ӧ,:u.=KWr,[#+'k*/g}~Wms#Fz/֥@4E@DLCw[.CӍ7.S [X F)< *a *"(@ ȲVvU3̰ 2öw6mBcAw|?ʙj ŭ }:%"ЭijZƅ`:gMEl5.]8tb#[`A#ruHu䖎>Du&sig]cF26~nT)g׽nZuI^naÆ=S-Z`i=f}>}kb4iԌ`C=M:;VPE@PR,le';!3]v-X gdqXLjժ~aرj'dﴊQ#-?Z\>59/3K@ҧ8g#,]h;=CB.gU-Ѵs҇frDsYܶ>s*gRo 5cZ`w̓?MqvR/&74$mHPx&g&pZTˬsV`ջԯSe[ ET~yWΡı[o`3;qZ֭K.L=4G1vzݻsZI߿PO/x!S+ꫯR :x|WiCR Slmj`*^v/9~`µ OPO^G?i(Z{O>9TʓC @ ٱmkֱc?s7o*g,zcƌjժ {úFg -3z z衇pRǻR- _}ji5dnPE@P:ۯ^zĈxlɌHb9sB-<N?q4>u{}Y^xu 4dOLi>܏]\`) mr|!#s^n(vYf P,ˊ) 3/j\5NTBm58ηhddXL`"+/Cfxv~rk|71?TС{rWR%RE@H:N6m+"<̐$|r Xd^jf<Á0 Eqr V0P=5.]ΈHV4$ 8Mk.dXFQ~gP̰V\ M4W=KSgKq~4GpcF-7k}]ET#{l>yԪSGؕO?~u~Ȩ_ m8%kNǏCM_{ K߉؇ Tvml4s=[ggF䘭9(M&W 'Lk׮tTCDCL8=o\.c\ 1Yư n#֭Cp/>GÀga7|&E@PE "@'Ȱ"WC Qb$3֫Wz A[#tǞBf g̘A ?q%-ZTR5S"E8,Ϛv\6ئ-OrxđknYcIwQ]@q۶mWY}ЅkR_<+_Dxt IHpeX WlyAmkqx)|]`NH{6v徕V@ցoFaÆL-\#QE@PR/Y]It$RP) AbiӦ yd0zaJkܸqժUBc.!W·53l"dW̶dJ2] $~ A ɓwm{'ԅdpS4OQ[-C"2.[ uqx5 U∡>(Em(6p)h+2d9@s -Y@b(Uvc5Lr( ~ 0̡ Rn\2amwk@aRE^^v!RΔi;\KTX^j:ln1̡(^Xz,d񄇇s%~Z%ٿ `Iiʐ[JZ7E@UbHD@NY<π0BW H`)#x(}P^-ۜaʪOON $Rx-zn ,_Tod O= ܼZ!bۣ#IΆPg? % s6.O'hkVXPE@Hhd?8:M:Pf?/O"McIBKa0Soͬ6oaR0\(AT Ttbҥ,2ECCH>1~ua 04i 0 I a2ud 5OƯv`y cG^pkvL4!|G2r]G,E 1#+auK>0 |뭷آicBXY`2 ACarD>s([6K~lIC$-CDBpIIp@$dF+Е+ VCPE@PD X+3ϬXoGGQc iʔ)<Z8HGݨQ k8Q8 H2qNtEFEXD_$[i]qU7|m@`?xoJ~IN}CȐ6sOd4 u h"}}'M|RA.ΗdAf[n AVz1hLMO uQm9V}Cg0J0(ă6dǾ %p {p%*$4,LR] |7 -B FLVߐkV1'$|mHoas˗kV o$ʒ1E}/g;G|?/][tЕ# Vp2D%v`,W+Ud/p۷ {@]VXIvdEY;4C"yA>͋j ؉!eDQ_a[Za;8Htih+̃C'.8b"("`תU.P t.N?oc@fH1JE[ -´>bi-LF $a H.\!&KxĦ9Tͼ2{`L(H@5 | DCݸ:֓I"Q1EWXdSgMiH MH*hOT>)gr@>MYihLE^LCxaf }dzOy`vb.;S"("xXPOY1A 8uc}dGY/ KgM#ZWE anE[ǃIJᧇu T!iҟ#yWMQZ>nmlbAqTea cYKߣqnefW *$#oQ"("d 3={֭> 2c  LfXжmN:l2%èmO*{ g)[la'kMcCn(tv$*jCBRNF=\,  Q^qP#9S$M:W? pE H&(I :Val2ŋCya6ӕF1,̘08ڵkwcCM{LJsݞlw _="("dIFvfZf̈́  3N0mgn6F E\_Cb $QD xG)U7k/ ݼo#[҅f̟#J&w4|@b2gO[yndY<n\178?l8O99Wg3g.*Xp,0>{<_j(uSZI"x w[<y*Wr"I;D'N92qɷH,^8Sȗ|n9^)#(FFMUuC&|eS"nM_V],_IK=hס NE$Nۑj'TK"i#@ge,$پ};Wl?<Xߢ˲3,X*W =&}˶ourկ("(@@f0; NheΝ;٘d2ZTNvEC\| S;rv(t&; :V$&`?+I9[ͳ0G/lSl;BgFSt6[ܒ6jbXO]ǖ cL΃^eӞ2m['oL[ezDXt?{MALlJY3AS&^,J]D!ǻeۚ=ynpGvmmRgE@պi͓t0]h.G]ˁets̤kѢE͚5%!hF[l>c"("3 3۲eAa&d|ѧ_F*hQ.0H54HӉėMxtY|ly|5of]l5M`>ڰ{_M+]#=~p<}_}P낇V!m/ʔ>+C2%H;z4+!YI# '[Ph]t1#uVʘa(vG+;:?Y#eSeHs*("(@0x-@fHfΑ5XH$%\t:*f׽3A'UK3r{*7xɈ# t) Vb\=O>*,4M:.pYcFMSlnŒQPU]9lZ?&6h҄mXRaUox$)dڊkkxYk:"[VĥCͤGmEA f/_o#< ~1SmE@PE@P (-DHl&s%1sG(PZw4vgdSߐW,2 =Z<,bi3r۬r8oXBsuaJ 5YeϜF1k|ciRQ[um5]謿X2vg&W~w6mk{[ҖSgnڻv6GN3Jd֌`tq|l?Gk{oBԺm֘7rzFtYre-PyDlhHZ<>Gm"D cJc\<|ywHYon]ir]%+"("(@ģ:ۮi" "aw(/GQ[*T-Ya ")WG^X!c![1oݸv5{!m\¨ӷ}ark63ѰK(R1Ogg.e(d+tNd>|B5*7@Rُ/Jj@B7 UZ( $>y^)'Fڥm7LƬ"L4FPE@PE@P#!& ,wIc֭cǁuxJ(c;9hyO5c{#Νd7#n9J4|ˆv󀏲H8@!K)RDTX<ϳzzꝽ׳ (* ҋ^Cғ/dY6awo?3;w_'}\tc[Ӵ:t6eK[jEUz86"W!"&HHu{E3 @BA= :2iא2g\*kn_kge;|F_F}jfˎoړiܵ_^ޏ$6ADFH"q @88|O=8, ppо˵lZk'_r|spmkm[uC<@IDATX;۲rT4ЫEӴ}w]ǁR)*69ʵۖ2Z4n' rZ7|)DG\1qGL~J1AůV3LZ+ @V%|2aȔ}1 7@.3=,ZI ?``^u=m/{yD3tiSzrC#EՇK! M66eyԬ۔*MmԨgGȻǮO# WáXw#]gL)eZڪyG>L L @8|0XӤ !)Lڗ_.*.~+Ł#zs?-)w|/{l>FNBl|uۗrŦJMƙr&%jZ[MiVSkc P~e?ӳ7=Jht22dNA_&.藐 @ @ H dORt q;7gV嚸%z;2jhϓkմWUxLj;*x٘g,(Wٺpýb̥/<Z: BDśTY>C aں:бeMK bEd  @>/ZN~SC %ЭkO~82sΩR3I T8637Emt#ZU.RZͱz\ا20Уw;Qgb{jvg<^Eئ:I;dkf;Sq5֒BP@ y@ +_Yםe[I8Pu8xڬW0iֱ]~αTLkÞ~ޠnrSز#XZX'~|p{]=aך\͹Gaߝ>.̙C!{wv0,,z똑("#£\5Q5,/mx6KxQGy;xg@mLֳ4r|[͆z@ ? 2,*axb.9g:X%PVV%k͚+656`jvZmJ :߹X6$-kɶZ5عI=WSE:h[JqS+;/#cfm>N%C(-+]}S_td6) ~u{#Z7BC lt79i2 @ 4 64םYC @@0%8[)CBj]53 @@@(I~+QK% $ZΔ!@ p ?QYee @!O.@ @`vdu^MXg=e@@H@ f @8^rG'VS&*!@C.t֚B @ȩ%x{ 'Z @$PVXɷ^`U6^ h P%f @yS.elBei**k@ "CpL qη7VPک.lգ1 @ =K{W1|*\"ׅI6MNi(@Bu֜?eI 'Zk H`}齨,oi'Xb@ @䯼Ku.+c?[]iErO*8@O.|}C> 3@ Jd[Nvm;.FO*x@O.@~incA !PoE3Ld){Dfgl-S ԕj]]aPh$D3@ QmZ.ՙ{e\ uy3 Y W@Ժ:`#б>NC>Zb@ juf5ƄB@ 7 54׳ɉXb@ j |e֪QK% _B @.ƶ7&"af@  9Շ @NnCp#T6R @@6 @\ 8C#%>UN j84A @L`ηM乫:RT v. x"Z u @XaANuܡ\e+F @`P@ @fgJ[Tg윂tk@@-PjC5 @ ZdoZ;%>U%hU)}+Yޣ  wuL @ E4vڤ:sZe#K{6@jE@ @>X^:ӍdC@*@ @Ug==ٝ,ep2 @65P@ @cWz]   Uf @ @A.8 /!@ @Bj](2s @ @uNx @ @  PBa# @ @@p@ uK@ @ P Z !@ @j]p^B @ P$s lI//j*<6f?(Xx&3n^QRًA±bŘ#PdUᒕztS3S;mq,mjIz>7Fh0@w~&)2~旝eWd>fEvu 9w׸~$]tZt4{o] ϴ4'wǃ/YRbmNpӫk?O֎_˔ۯΟ9_ f̎l Iqzڐd[-_չmCZr`lv#}orY޷AIϐq֫}nU^ۯ܈|uC_ϵ<\x%Y3'ybLp~#;`ԛQizv9-uZtgt˼܇v)Xl۟y^Z~==YJv}~x7l|ۧ{ӏK8~Q@)E -+noT^UIB?^:RZ0Xlek+aRn&K{@ǵ'zFSx!.?+]=*ޒ~S-.,1O7f۠"qNsVc=Ot5"}lZd>VYrwӸ~=oݪ~ @!L.CLdێʨaU&;I֑U}dM8Xl>tvزBzv= E6X¢鿚>17J3v:=s?ݫʒ;Ο9ok{A?${Yh vЄd)!֔uuo BWl%>O(Dx}/>yK7_w 鼬L2޹WcN&)4ŻfOm߷xizfUY7ʓ UQۃUa; %2e0_`VkMr_V{[r]C/fJ3w4WV%q Rj?ʑ~ޒ:-3}w7*NʈzNIJGPRh8{+ٴ=WfHlZ*W9/So!sl9UjYpj)&1$-Իi?M췫Ѫ|wDs-j5f[ZwsY=# ϭMN82 GY&Tݹ~($[Q"{UyR=f?Ҩ͓YU_5|zPHL?|B pCAψ B@Go6~DLUR]F엮w?}~9k|,ףO%p ʿ9:nig);:* I0ҥ*]I~/.n*6MMMO;N~6+9a ;+})>Sm=:y['RG]UJuI҆R *JRxR]U}iَWlàERKmw>QZlQTgʹ PEO=-ݑ' :.Rw̛6Y V-+ҖS]JtVSv=CMJu]+BMZ )k-$;=F)+{iIsV,,gk)qCٚ:z%&ط8:kB;_o'  Ժ\t 4pM}fLJS.XxuU$ZDrR s+^^>##ZjgVco"9b[vt 3SfV«J{3{MmZV]*8LYUOm aIƨNm_w~+Ǜ0KvzkcjSp W53=)y"#U.r>)eFA_3YǻKUT?Xf"#]zf}n|0;Ȣuv;.u8c%㜪cwgxՑ`ՏO59XTEmemٵPR8☽tu~ *ϫ¾l6W4o63_Bz&"5 jՇ)S)DFhKxޖ0|Rq㿉gTY]CkZ +=NUQMN-kowEڅ  &sgI@qXv/7n`C{3`'54 鷪(-RݧHҷؾu|*5JeͯKm\5dEرdF]NOLat5ۛKAg~5=;em%kzѲ&|n:^ܮc?}M5tÄ?]_|zLu_|+-͠C?:Sn\6䊛ʠ1kF]d,r˕0voS_]՚FUʚ 4S٪$+S{׌HWie'~$6Tx5NIyۢS̥>ڷ&aH((6y);%Ѫ :\Z[Aй6N Tu@bn0=0;"[i z&(<>VQ[A֙7^\ה&'߄67ͅ푊.+7n$]r@@Ԛ^MPlqp“iMO@Bj].:S} GZ?pƓB*Osp̣=*kwm_9^}ˤ6Sfe*$JB:gő#cԕEx)f $9ԺkLҕBk7#[H,O)7WTy->ⵛl_DDG[a_'tZW}װX9i,VbV ,n5MSFelVje۰KcÐ8j{QV3CW˜6TW]Ս{rMԤGWY>6]r13VedyD]~[~6:w2ڃɜ;*jn9LuP!m_OyEVST݂%[Ӎ\M&DީIHeo^3ⷢ˫K)ZYC${*cUtiT\Mh/"&(@5b)I矒cM>"Ǐo}f0)Cs_j @C.t֚B K@ھ/|L:ŋZ~3y3~J>:T>a:9P*DKems~]lz(/6> 5eEZTbw9#@"KTIbo-ZS#)iۭWw}<:LuлFۓM֨u*:_ DUM!,&i^v<TI=D+A7TdkZ۞ك-Gwmo*th/Y(Jʑԛ 1; .?P&j5]|3:Qm_q:Q:&RKV$|5>diEEձDמgGt)ZLN22DեoJPfIjէj55jzq@vUtvR|CmҗM?q:_SX_^:ZiMK/儣'61ل~S_$$^GIW_n 24M dPe:#oSNmdbj:ܱǬTgA2m8N׳XOTbmivjӵ{NmLY-n)] ܪcl)41ȥ˥3v$pO [y%e-ק?ޜVԽq~ yow״5֎{"̚aTHOTđ ck6oxé={@VGvz eyU[m}t4WչɄ?gSexRSţvH?2]v辸*^^zbw98 XGRkSB^DgV$&GV04mǁZXO% @!B.DiB' 9GVzCޮ/.dp{ )A 4.֔LԌWf~)?ڱ챹_w;ULM<ÔZxvTyt`jZϤp?^]v|)w)ngejO32YFF[z_\Z{gz}Ym2Vʄ*_YYZXnԣRNֈJs17GT4yi..kS_,} 6 @@Pjq%ӽ[vCeIz+p^lYUq8yPJΘ񟗵BSYn+zzP%8Jɵd ӰO*aIyJu ӧJr^&[f˘1& T2{g7o]/TQq+N7͓.XRe}t6qٜնuo&:4/WSvJ:,GtuwMwyq5Xi[UV/-|m[ہ;m̢ [2{ZҚKr5;h_T:z V] ;О-|85{Z?u&}ןbteA?2ߨ1~amyuw\KU`_nfkm(@  Eg47&תv}5CqN I٭ JGco䶸~=/YoʋKT6K>t?^mږz:iަޕMƎv :3k/jE,c~S?WQDd*MOgrOxFmyv"O{|tw k7_IMee{]tw^s_ʰfѝWt BPYHw> Za?0M(1>cvkg6X6Z axUtz)rS˧n49_w˔\]̄/u揮Oy ;A N ǟP3LISSHZ}6YVeiJg(LESk%Ɨ7IR2tQr/FDo-Yrjj9:ɠշ3m_WtI^ sҶǤFQ~۞/2.h^5"'&nh?)YT^?s^T8NjnoRړ7PgKlӣav*i Wkk{bcYQvv~)D$U;{'RYc#v+*#[qr.; (C jPBmř/ Oadh|^G>g4l} f⯗V;j s(#aUO]e͢5&w\tzd qqV:D\'6=bOԃl6,eg 7LL{>D?-g2:7~\ {񆭅۷GTԆƲ~,_[i mw|ٿRF|3 %(打t7}9nTz;k>7{Pd_\^z)m474fML+V}; ?> .S8]ϥ祉(ïM¾i~q =rՏkuWkipz}^XB P!=@!`ܽ\ X@ @@;@Lxç.@֧n1 @ ou&J hY >8 @@H@ efu3(D +!@Y sj!!@AG. !PguOezо!+40{@Dt%|DJn&@AH \TI.\@yQqXl^0 g1;6r~Tg@- @ ݒ0L yb/m@@h8%$II駟$X :TM~T?;DFFG}Gu\v?u^رcudдiqƩku֭[=իWu]ټyw}eM\5mڴQN=KNN6cO_v^碙N2Egm߾]rphB1Yۏ/={UDL=h"s8on*Z jhz'>!ҥd_m@=EW-вe֬Y剦өSz}w]Yi s=6zƎ>h9`߿-]jTP D @ b* IT;]Fҥ˗z2}^JI1amVDb"F+V}VS+JkFшzIINFw3f ֓oԺ9s|6JܹsI3VhoTjhr\Oe6m̥쥚%/dmg϶}.A|͝;wZ{L Kgѯ_? Kyf.]zyŒ|&Kcpi,%]z饒M䝛FA~JjJ(K4ю'^83J=رk,6f8)6Nb6ʺSOuK @;ayu El׊ZegaӦMn5*K;{@# EsK?Q>}T4mXg,iIrVMV5."Iɒc:,{9MTmYK/l YrKY]k(7xK{uJuLտSv洑Zg;zRg% WO0h:; ^hjuՔ[iI @ X[,+ O niG-$(Nqmڧ&Ӫp65j~{v_] vAm E1kXSZχ~ )S2Y*vOCTl7=ȷݻhm$Yz衒&.1N TGmo΂8e&}ڠ9Bꪫz) 6o\b;+Bje2-e]CC=d6WoON>dN{ Jj]BB͎kk(@ ԺX& hel\9[c \K\ 0u&tvGKƊҍVfx9_|R?IT2M}JUM:!$?ILFS?qDe =ӥ2_u}tی)n Z*HxR7=_:9i:# FY{Oj NF}`B7c霾ѧ4 j:]8Qk*NZS[hDOWɱjsSs>F>!@j]P,NB? \6{^v6m9w>:o岝y)NkF H/mb%1[g):}3.e)VSD+p7樻}tcڼi5RZ紴#Y>9%jՕDRZVpbyee-h˦'~qҭtuVhI% @@pd:c .Q\vȺz଱K?K3&hK{W%*3J!]I'>] v׭ErQN-I8wKg_vvh^"ٻ\Λ^cIW/"`i9+w.\eu$jMdQB]kjrYpSl4^: &[>ЖYY㢙Zss_J{gEqj{)1SxKo҅_c ቏#:jwa@ PPj9pU~A1ٽ20iU~w^]ڲ٤)Inm߾_. 1{lk,PB`Ss*\Sӧ|)8Jvtzܬ(G7\(͛uEzr۝ҽk 2ҤPۻ@?֠A Idag5t @P!@ HȰj]nnGN5x m_hYfى)ʽzRSO:UR0g.\hGOI*-ɪuj~J,):ͦ:UX\RjtkKyRbken[onN;&6o9a v2xw\F!r६V>bSw9e,I{XiӦI!gC45'zY @@p@ [@N:^1z`ӦM:YH3XwBHmo*_.RLQtt9M=)"ޢD&A6횼ue]6vؗ_~ث+gSA{x:sLs*;qSv|tۣ;r'>䓦I 4qkf Jj6/Eɹw x*ϬBQ~dv6'6mSz8- @@ @j^=z=LOvrq*66Nҿx5AR,c cŻYcSݻw7Bm2KK MRO?]m6es[L!r{1kAj2E:k}Ċsꔺ =OF! @?PgF " YEmtUXzL\xÇ:KnҤI5 7L>lʚ&N(1ޮ.ؙ&l]tQ׮]T5ʓ0j(yeotKkWwuI/sf#M1dI{RmKkUojC L'"%8-˶ҥ tixttٿ ;EcuC @ 9\!ٖNOȔNjqA!CXJ;*kc=f*w:Li 'MHkS@tݔК.Bnաl9vI zz޴|ꌳU*L]nt6:IDATFQ޳>kUD_ҏK\N'7Uwm獶\\OiLB,UtOԹ~G":qiե,SdҔmOMCx?a@<  [t@G@QWJj 7ֹOCR׻HM/P:^.=xTSNUE^ݔ)SmX:q \̴ˆbzffZg=$!f yޟsd,@ @7 '#FP"WD)_m<+IQ֭[g+%ꪫ$ͤڵKy)BaHst' @Gu> ZνJڱc S:DiA  @!N z0}@ @ P&Zʫ!@ @j]`@ @ 2ԺP^}@ @ XPk= @ @ օ3w@ @ "ZX7 @ @L.WC @ ԺZ @ @eu @ @ z  @ @@(@ g @ @E.o @ @Bj](>s @ @,ux@ @ PBy; @ @@`@ @ @ P&Zʫ!@ @j]`@ @ 2ԺP^}@ @ XPk= @ @ օ3w@ @ "ZX7 @ @L.WC @ ԺZ @ @eu @ @ z  @ @@(@ g @ @E.o @ @Bj](>s @ @,ux@ @ PBy; @ @@`@ @ @ P&Zʫ!@ @j]`@ @ 2ԺP^}@ @ XPk= @ @ օ3w@ @ "ZX7 @ @L.WC @ ԺZ @ @eu @ @ z  @ @@(@ g @ @E.o @ @Bj](>s @ @,ux@ @ PBy; @ @@`@ @ @ P&Zʫ!@ @j]`@ @ 2ԺP^}@ @ XPk= @ @ օ3w@ @ "ZX7 @ @L.WC @ ro @O,=gcAQ^fc\&VIrA=.3wmic;/.WJJe6{d6es2cm Y\ڻO#-l۹~e]t=6Q@iiɫeZySmC ԺP[q @ H`օO~zサ?Tڬ9#n6\Mé8mkoZ36E| u5K[XyUNo޼6 ۲֙n{8CKʊ_vYB*j( @ (6( '!@ ,\;GrJT'˷e}I78iFe>I;v]{Jb^/6&,,B*(.iT^NXh3 snHfꀼk~׹Ft_RU~ETD;߹:ikۊ/~_g'&;bRfbnw yGdp͗KJߜ˿\ua^AvjBN|ҳ7D9!}ٮ̄fI:mKZ5`l2rkMN#t9V~^U};Z;z?=˲篞^ިgS. 4&n?bqA:Tj8S:Q'~z|_$eU'GD]HLL|B 8 @L;r67x Lrq2-6'uNxS@~ώ-{K/L듟8u |\hE?.H{Erzߝgt|-g4zSaV cuۗ9-gY5M|/*R|؜SU陖S @  eI@tGߡ8qW\FӥAXINFUyleV;^;FM Ͱ&nnE>%&Ph[7%}gp$)T3+w=oU[\: yN_:MO~gTT߻Ycӗ/{?)Kk엷_Ub꜃>ɟ rL~%VfV5 @j]-B@pm}ο:c)m(E/z^sݏ75Զ{ᷕS,%Mf;( 3u݈gsΣ/sj<6b'7io:.2"zԡ^yw}-g*ƆLr^x7[KEyTQxѷx4id;r6.ZYm"p2abmv۾\wؔ yԪ5@ \ Pe^HK}:٣[rtQ7RٵOysos_/Ħݸ:LRt=GAzNtƙSu쐓dd7cM:-<<\6?,~T*3쑽Nca%Aj[7is6ɝg |o?f&}`$IOW c„ӳ;Ҿ9N<L w*NHQLq;7ZyW#WMoN sa-}i T n$W]uiҩ6]ȼ9ciٴcECU/;/+; @AC.h G!@ x ]?g20R꣣b{-/hl6er145J>`c$5oJT<KCcB۬ZOviW|1pݾG) NF7K'NQsl-[VB[0c%JBjݕAQlV|mԤ|7ShHuTI4{[V9#mؘUi(C6ZWTZ􄘤"*Khу4c% @AG. !@ hC~5k3Vvcxe}:^S{˼vg)[8-X>3wȾBSؚ SYE{vA^&7%%:cE?JK+hmDzk я'?I crs/OÖ켊daaaJ7)apN{g9qt}#cnl6a|mάs;%mkt @@p@ [@͛nRaL uօ~rQbh5)_L,ؔy+ֵaHIcfϥ~)*)0CeTTdu㻅:羼69 Z'UK0gO9bӜ+VmYo8^8^iUM:sP j5Xvj^URMan 9 @ `!Z,+ uGE|)~jVmVսfn_}" 7k`P=m rYufwaURQ]Zg7r>笜k͠N; {YS{ltb%۷\-&eVբiSn<ޢ-ɪIVKMp K>K~7<{ 3=Xʰ 9V;^–Uؘ^S鬤 @ ,Pe6W5{K7/+ȱ` `nY) Ktˊs{h*笚f[fԔ vbAmo.Aj:+.uJLt(M;UӒZO2wm2.05oN6Q @:uAd8 @H@GUg,+nrYy^\uulGw.s_ksƒl5 ;d}n\;fgkTdw~9 }{GuH$w d˴.xڼ7XIkvٯ&Sxa ~[e..]2M6Κfoլ'K 1N|~69SQWܻеu?s8m v+6ͽTS\M,?[PV\_Oz,K;aڕotnu}*):(8祟v @j]. A@0Pjw aa$mUh]#ʒb+v٫ƱIJRa lljTvu.`#zb;1ܭVSM;VMAq^r_!xTvaպ@4={>^#ՔcO4=3gU"]k,\6n)f_b&>+6:nexxɃP}˞+75I8N OMCe?~;]u7ti+,=Q@ lƒa "'82YiYS˙EDGkNz0q [ IQ53լSSm+7Ϸ1hj:qȩr+-׻pѷ*}SƊY+uvoGu1LQe[VV#g5^mht#7]:?imK㒏\:.^9-߄7;f\jޤ  @M!@FvY_tv %gtةC9y 'wOz Mq2[0GUZ/߬M4_ymu,7v9^ڂJn?^Ȉ'mx7bǏ~`]Fy{bҤkQ}~5FǮqkM͸#&q qb쿮>A{;?sȬ{ +3MZ6ɝj+m/ ~W~3;bٽm钶L @ (2$ @؃68NQw'CqY۬&2@ uAL8 @\ L<QV~g)՝qĤ]=?ho@  E *;< 4hqtK~ng_-ZKgJ:q{ؾx&hK\B? kk ;:Ya*b›hg*sf@%Ԙ>u< E#fƦ+XI|sUnZܚd> p0< !@5Z:ZÒev8ԓ@Bzm/]8u\_oA{Tg1g5W :+//9 @ @@ Dc@ @ ԑj]a@ @ #u !@`'0s+Vh}=|Znٳg۶m}z(--o͍GuT=:-*_=z$&&go7n4oҤil @ hPmE֭[?3@rwurzժUE j]øo17&$$Zd+ yǼ|%%%eyyyOMM6l7b@ p@`gP@nR{=#uҥSN=? XA: @@@ %d ,X@͸V m}'!`Ȑ!+**:u#@ #rm8 @1M6Le˖ݻwdG9`sW\\54&̙hѢt]iӦiiiݻ)33?6r!ڵSԆ tTY֭;tp6n؎b k׮矕@c)AΝǎ'H7n-/g̘rJcsYgi1cƘ͛7w[lQ6m>AHNNv|%K'Yfq,z͛7wZJ!˗/IIIIPlJJJ}]s)ysΑo${f8 ^&zvP&n.rϔMk^ϟ2eN۾}<-Z:wq*@߭^Z`%X:!\f͚e,uHVVNf*(݄KW\B ԺY܀ !`6yܭ[7㷄' F\PPTfϞI™ 㬱5Ӥȯ~[̥Yn%G3TC)tJ!4IK=L0cǎR?IRJ$EwP3w[[Au?b)]Ia&NMrR^4/%Ԏbm0(N1H(%$5!#bE@Q#x!b]/% >{e{<ljU~z W]g^q'@۷o͛n;ܹ37X#XϤ7nlbɧR,μo__Qz~֟7Lu'$=S9mBv;]\d~0w֡<_|NhWȲP'MbEnذaF[Sh @,䲔ſ3 @e$x+IdI@Z2|L֟-Xko?:[#JMt>h$>|8!N[ [)UTW=1%JXEu!=3*Q]?2Un]gN/Q]?2gȾo'Y,/cvJS7RVKsw T]uV㯿ڱcGm,sGuU]ϐl.%mL~_3h @(  qt]wVVuRZiU%IV 2&9d/»7mڔ>F*;fAnz~ƭ38%`Y>Hڕ *{k֬[[fIi:t3M[nի0hJs( f+Lm{UWU fm0ONOEz4_D\Y<1z'g5`R<,bT2-o9>Ă8y>:۬N  @ `%Rx @_}VIS>Jҟ?IҟGiHMp¬'=[yyYO=@:m߾ҢL,u}ʓ7ʵj NaW_ 8qڣ&?j!fWY{-JPm۶v\UjӲ j¸-[j1)X,K}]63iξ79眼!fix&+bj<处6פY;#͕|W58x/1/{ꩧZ;yq~Y5 Rn^M矯C}ԟA @`ul #4n>٧uU[WE[Iw'ljEa eg|LpSk-SזbJIRΐ ,\ 8fSVk_$ˋڡ|JϤlU4F ۷RGF?2},[+6y]p.2t恶SD zjvl:ZWUfՇn%y_Һ!.= @S9\ @X Һ\,>賏@ʦqg{H]֟{K0T&9cSfޚ")Oz$b}kg|~EjNGKV ^;]$kY['W 0oo*KGs4Dڮڅ6'&}8}UvJ n+k?Ԣ*kueo;t,{M~ӧu}csɰ'~Oq؟_6 @b sbs @(P[I;={՝}ZH(go2>+ղ駟2{nMZop,.?C_^ 'tR$\pA}Vʋ_՗JBs2E-n yljFҫӟO>,mL_",>ډ\ Sjww̕ep_H8silcufʕ+kƹc< @R- @`tZd'}LH/kۼ:S aZYGȩچ md-v}*ه%㈳+VW%Y}Lzy^hݻ[;lo֓O>ƜM￷m|Ƥ,kk'_;ډ|NL-}?뉒e,hm'IVVsXȍ6G{e܏&@]@Z7#p ~fC> kG\(h4/&O 9ɗ^z);%te^l>SL}w׮] REٓu.߶6{hdۄ;JRot<@۷8p ôsJvyM7%˽kUYD#Gs LyTnt{w>aidl;Dv SWf $e~ΐS/ m @u?@I {qaaT%Kr K;Q۶mK:ܥV}NO:ѓʲ;vLt tZ`{I*Ur)kg~뭷ݛvSƭPH,A'd*ZˡJ2 eqUt;R?PO5ON,K<-ͯW_ky'׳Odk׮?_}p9خ)Г $@,iRx @^BWX՘̚^GQ]c|I+"ˡv攛//&g}^8͛/I#7ߜKO;5kdX/:g7ڽ5&G瞤Q˝^kN&uwkLykVq]wMGrɰRV׾a7ĥ'okL믿fo}+7n!}jsgnG֩Zwi߭S$@W{7;,?gxںN}ԱO3rݤ6+WsRGB pHYsgR iTXtXML:com.adobe.xmp 2 5 1 2 Ү$@IDATxqn]H &kzMk{^ځEtww7O}abq3{f͚55{wC@@@@" ?e     :@@@@"@.*5A9@@@@ Z5    @TE&(    D@@@@Ѻ@@@@@h    Q Z    @@@@@ *DR@@@@u\    DEh]Tjr     @k@@@@JMP@@@@q     uQ ʁ    :@@@@"@.*5A9@@@@ Z5    @TE&(    D@@@@Ѻ@@@@@h    Q Z    @@@@@ *DR@@@@u\    DEh]Tjr     @k@@@@JMP@@@@q     uQ ʁ    :@@@@"@.*5A9@@@@ Z5    @TE&(    D@@@@Ѻ@@@@@h    Q Z    @@@@@ *DR@@@@u\    DEh]Tjr     @k@@@@JMP@@@@q     uQ ʁ    :@@@@"@.*5A9@@@@ Z5    @TE&(    D@@@@Ѻ@@@@@h    Q Z    @@@@@ *DR@@@@u\    DEh]Tjr     @k@@@@JMP@@@@q     uQ ʁ    :@@@@"@.*5A9@@@@ Z5    @TE&(    D@@@@Ѻ@@@@@h    Q Z    @@@@@ *DR@@@@u\    DEh]Tjr     @k@@@@JMP@@@@q     uQ ʁ    :@@@@"@.*5A9@@@@ Z5    @TE&(    D@@@@Ѻ@@@@@h    Q Z    @@@@@ *DR@@@@u\    DEh]Tjr     @k@@@@JMP@@@@q     uQ ʁ    :@@@@"@.*5A9@@@@(   Q`מ}Snm7ߺk=ٺcOʕ,\D劵mP}Ê |'۰f[w۲k PHWd5P f*LwL8/xͶW~y̟r٢^Xjʗ,b},sJX@v ėczVV-xnJe ;<2/'/9f\$}',m֭ڸS۷Sxa}J+TBяN'Ex#&m"˖(G%[+W0/dT?Ӥ4w{Zkg>`) 7ÜSVd}4:k>kϟ/.4?LXcec|n2{>wY۳KzȃD`syT`ņ'=<voXt%@ Ad"v)S͚U>ͲR[׃C{YΔ;C85n"g<>"-:_YmESRX+ ~]rًڪС j/nЎ$F 'lW.S`=:b< g@ q ^B}*֓#HC(e-oM0CK5p~N VX-:Я8Ӭ, @ Z/N? ,^a is +0f=\vf77'wegBq#&\Mzs3\s3}BMD&@OؼV/<Ռ Mt@y|iRtV颚q$GGTçV`fY 7>~C֬c欝4U4x#TS_X͊%Z*=s[VnܹjMBڨZf56YZ4ȧwSazoN4?z rJ~vo/*ܽhMK;)mHcߤvvzdNYѾ޾^8SWyJh]nN6 ^\Uy\XC3n~W}R5+.'K7OJnEէC'5iQ+N`C&q7M[O]ڳ=JƠ/0/G3׽>owtVAqr֍2v?k9hM͊OjWoƊx&H컢zѳ#j]wDa~^zn|=cuۗՊܮuTzw/+~c#6ޤ5\Qc_C^~v8~.rRIPëil^mȟd$.wV7^YkZWVn񹰵of)"lsלp_JqWO}9C}hNnҺ^y+a!*ptԡ{[$87[Ѐ! _sulrW{Ph j"LR^iΗc"ڧ7g0=tB~Z3;vl֋0n,q|dW/̆%k5VھYXx̓bC+W?|_7\uj>"<) NLP;/4w|tտ;g*8iO;lJ'_ϖU;UkSVlq#5h!Cuo/Qm=g[>׸݆B*.v;)i\2_?58+j҂ ~/Wo/ƻߟw\cR^jo 㹠Z~-xOhS3Ml֝{/Bg X>XhP [kY^ns_ Z)|>X4-KWh!.wVώEv|/¬JN +E:9lUUkv] w悄m/_7gmX{2kYOPw?Y׃^Xι _ojcW3?Vɟ BRM\5h}(5DaVe 6wilO0\oK޵C8QsKR̹e֚L1c ^~ޯZjإoφo[u.VoYóa$.9)^ȯ+v[p+m5oe6rlA)gЛ㤛|J53G*u#yG &y3E =`uFg45Nʅ\Dz3.BȈ$BuVz|#7mԷoAJ+ M3leDaCuViuw5v֤e!+ʳ_x&+ŊÕ/qrYDaY$Bu«=ſ>.A%gk-H #&EU>⼧KJ<"/trwqLRb0$d~\: 5 >㱑Ԭ߂KࡺX>jg{|D;a}a7a+$9PMk>߿zѺ+Y6-z=Lvoa#.q3!dѺLY+O h2=?@p:ol<7c==*xb߻& ַ'4A{i"s?9GѠIAo/hגo0(2M+3lW־o Ju;pA_bxx!"̪ B}GVSeu&un8Db*MjNe8_K퐵r绁s&&zyF3'Kzo:uUO_髱3&aa87CN;cOӤLr~c;jG9s`A{FXgj{2@ #edrRyT@C-oM8A54o7gŧG[ pDIw N9C,7|c)" .>Gqqüʬaf+*[5Lo;w)ZأzdR<ڷ?2Ɵ,{ּ};Gu ~a^QKeN+\Z/,.7yo o$xR-o}{ǎ3[k-] oY_LWkwzqه7H&]~]j(T!k/FF[b"ŬS(vi7]wE0oy{A2ʛB9ܾ BLOs wn\sӬ +j ]i֥WC!knzC^Mc dѺbN0CغF95B"/Y={z lTbAzjWŭS?Юԋ!jWfBNXUkV8~ _[j"(Li޳=kXk.]}Y[55S?Lc5Xji")Hn+_m}]tT=k\2emLa!A>E/ca^QK&Bao]_ #ue]7unUuvmݸC?N%Zj.GvfEkY%w,Ã絼VC95O£)`Ni_1?lvZZ@*aolRܫw,jٿDCh6ƽ|J+tek{of3ܷэί4Z9GJI44Wr%|7g۹snܐ?~|PRjmYBqCc7bѺt;4Sx[ֹݼO=d&[.oO_ڦ=f=sN^Q4HlVRL92VIk?\rLή2yH௟(y9U7t:iwNll okXNVvEyc i\*m6ubgvs}:8{OxP=M.kloC5J+W㗘zaϻ}H՚`e*iJxj|ގ(j}!WڗR~Hes8 =>`_Ы2}T38_mxP pz8BtV/_Fzo[fQ{cgPM<#QJ {ARVCݤK=/WITc9eyȁ @~F0'@PgX%Шq&;TMA=+*Ha;SFT!5m4VM~:/޽}~{q&6 huM!~NIk|kZK#`hY^YuQ3Zc UW-+CV?ȱ8jwJi-ĭ+edʕ(?Ⅿ]daGeUԟ΍ < W'ξ2fSZ\~l53 -skѓw| '7~yۻ;G;:p:-!TivL@ x U[O}dwWǎ 7+79cF2 ]ئS{6 k~f*qg0RH5!FUHŐ}SJ^gk+ah+{yX16>*P۲v+ԶzY[;^?h̡Xvh]0go\*vű eD4˹47'`qOoMbn?F<nݲDm @ ceLUr".sZ̽;s-KnQ3֚9vk4Gu䕃&>mFvOa(a$z 4 YcA#zu*ieWQ\5*jz8Sհyv9n8A?`61WoizND>^Y!1V 7R  R)CGmuT9ݩqVh` C5nuD'IJkYALZ a0 7 ֫muj8rƚVk=E?;ha ꪰ5hOi^6_;s&lE Zyuefh /t#^H]=y* O~ۿoX}sٴu%N^6 lkW*pߚM)7q)Ad 5I̚D난u1K`[Pj- 9 ߋЪ2so55I; ׁ TX9Zb\ýDz)k 1}iVSr'G܆:VB4zZkR\xF7dm6SN\)fɲYkh ڏso?5k_[MȻbi=3\QGf!ZJ|^8Pǜ!lUŞ?6w&fGNG49n}{}9Nɳ+5·q_+{h&WTrL+L.&] *W?b"Uk~ ߋЪ2|%71ќxS"Fsc޸gN թ9&Noz\=t,S6}S"A;Ͳu =b 'M~DőOn_]䂰lض13f &(SNVBr_wA ж.UCHI@/Z5%˪;wkC)aаN Oƾ^]Ҿ5˥=z-l8 swKÎ֦(X m6sוi8(lUZ 뺥Rq*S9<*vJ1;MhtL5}ĨO[UݲAʜڽU놎xc0߲F= |Y :_?M/P[nu*km4k }Ցj %4:f#cOk~clPTEf+d@Jy!|jKnoX=͍oO4TרZg4#=+k9!=n_6Z9#٥W73ֶt,|1+~8E m+*yJ Ԅ!钫/lc- `3`._=G;[J ߸7%G֯@K|NwM0NsNeּS?c3b+GZ{>U׀.KGJ.M<&@.*A/ʗ*:xIbۣuzi%NI]$7]M:4(_Αy GV|T{>p.BI3.k7M[4֚9˷ 1k0>K ɤ׃î=ryKӊXF<%@.OU7'4!T' k짘˳_4(RP?.^W_ ﭦv l(c1UYa\xiz%^{ v9{mshℰ\Q~^Bt=jU܏fRR Ue¤7MRei0دНm&^o]j-5xʟg1JܵAZj)Dט:H`qK?~1f_9OTrFv;[]CiE2ߏfֽ=dNr_]8zδs@ Z*Km0}گ(Y;lx!αУQimrgm-ho-s>tU;shJ[Pn2idx=tL*jjEM XHhAh1wPՑꌟޱXtMAF+BWgX+3Ggï) 7h"Wk˜j-j7 aVe1Zh_K7o۹+>$t#e)AEM|"N*ۂU[GSC *`SKKNAL a@EkՎ73_7Z?ipz ZV60=Q}0bM'7{6VCGuw!LQkz%֫+k`xGg֝G?=)dvӛfU\>~Y=eƁ6H}0(ѓ[+3^~?ւΠדS)O}_ݿ߱FŜ׭ewpӛѹVBR >M?:)i9DZ;No~;RTTM h :(ehV>`M=p53t~Hh׼:N]o˔@IDATٽ.T]/ޤ4ZdkMofƽ^{B?s$05GjeosNXmRPCN ~ʗǪ XՄAO9qM<#f!{Wx{ܜuzcF{zì>>k_X8i9VQcf_a]/]܉CHnMdN'ԕ NMfJ+_>Mi.g**5\ߟjVCLf.۬iֹ:YkIQQìJC1iS&YG kk,GeGa4p)dm?L?O5l0ݦ]EK+&W ~5vz5;GHpA:/|;+ MWlZ.4zw=DA?^o~T+!uz:)$|0w1͙uqjzﭐtEL &jeCVSlbԊi]gP^3<;M󈥊zsk 䣛hXt]| g_it}Me #Vg_JY\)|E%Wf1|3MlJ|9Ps[8F#Kf{oU_g $q\Mk~iIGv SSjto?1;J[7LݚWjUl;k0ow8FCR'vV_/iLotű :Nb;+Mo@FjuRF뵭{ZRNիۛTek.ٽ)Fܞra2ێ,^$^u}􁵗UrQǾƱ=v5bֱ2Krۑ)?n z1уT79=YXÿC+3Jn[Nmyrj/Z~vģL9!(ZK$:7D4L`iI6LK28д郛m+{-}a, Gl!G5/whTὛlh,EsVl pZZ_x gxv Y+ KoΜ2o9ORχ@ 7 ˍFH@ezɚ>g {)q# 5䁞:xf[)eGJ_| m</}B9AQ]+Q;x73;`OQ unRnZ4H JD,m.&Qˋܔͺ9rrZ ѹQQ * m緺O&wD5Nil\1r^iAŭ˖H>U;};]v9Uw՝&qGZ9n=O7(NߩqJK0oرPd4[*3cn=gzS:1gQ;~zGGA\9v~,#@ɿ- heV..ML,W۰WHP=z=ΝgMeyЊ}c?N/?0!NvX:׻ -z*+Vk>Zzk}ԂލQ[S>ݬ&a#^_o? m>)?c]V6?/TˡVwlc[¸2$rF{ s8v}+W*nAŷT) jֺ^'L<+7t'F y﫞rƈWMEGՍ5NEߋЪ^qu &H֜E8癯g߲;.J#L=F)NR3/RL!Wεl[=OG7s}}KbxgeҨzM[˿lΊ-֭^kT(!ZY=1iﵪA[E͡Xij[pF2%$_>ktP[羞; B}׼&Ӱus~Αuj*4IM5E"+@>iySF9N^1{ٖ7]E)FVbZYO8کkQɱK?fSV'56nm>z;)   P৉+.xvۦvGœ囿Խ>F -b} ]s=k dގH)   Kޫqw7t* wjm>aN1dnYYrC#)u   yB<3ܝHMR mr^ KXG@\$@O\TY@@2D~R-jxhޙ)U.Y|]{MYQi7Vf, :*   ל(ilھg ߍ_>xʪ :exQ4J@ D"]=@@2U#kk6ݑͲ.?Az$7@օ @@@? jgH}tE HW #Dr"  d):졞GqkɢRϊ@rVY&r֟#  @(_wtv7$,4< !$. i ZvR2DS)mf(.@@Ԯ_pݖs*H׽Ee;?{ 2|eE  ߲k=Pe @r/[co_aM;5]˕,\d ]B")9 AhM    *,rs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@ D 8lB@@@@ Turs0@@@@  ؄ *p+Wnܸq۶m+\p*UVy knݺQԚ5k(P  @ ze. зoO<1|,p=3ƞVZ}Mf,O4i'NT~F=z;kXF,pw/XF1ߧzʾ&oYק}|}Ŋr޹{䯿ŋkaÆO>}M-Sΰt@\'@ۺ\Wer3ݛzq-z&W`ڴi*TPܽr]W^yO?,Y={~7߾~ -߿֬Y]tYFY~3;k|/bʔ)mbO_ڵ;:vXǟ{o]ZjuoUh/oٲel~h0!C(kAnݺ'tR^<q Hq׻wݻw+ǚ(ٳ6Kb_N &(iG o'v Q/1>2.";w=;wqСF̞uYk~~I[nE J6iAMSrmSs--˘1c9=\ѠA>إ^x%\bOW5o.UO? ^Wy]w?ֲv믭Z=;DMZVoQ(ȭ`=SN1GST{Xt,rm(t}qncǎ^BD‚bSU> [?H}ʾF7o5\>L1SVUr)A5i9SLz`i!+c?رk׮gyc%@?;@ bU֯n+~?154iM8RLPAHOJϻSWpV=Ɗr>7Vc<{:TKr؋շv￯ 'z'A(tܹlָU%N)|+AGQ(Zե6u:{ixΟ?ߞyڵ׬Yc㻖W5onq\zjd?ز=?} NNV[᫡OP/ sF8@o+Rc:@/qUװ~SiQ+q]$]_+V7^r1.-γrp_Ed]V2?s/6] /| ilEb)Lzu[,7ExwJ{GUӄFzo4ՄjOl_0`bib_XyPeSБQW?#zEܿ.ܕ%'|2Hx ݰ>_ЏLRͿݹ]ժӱ6yZY[ ou/UBorwJEg/k<颋HzOsH/GwQNv-P:B׏I5SU~_|Eoa⩭Fqdqڤ̿ Q|ڦ. _O*[\<XPM/Kx./;PRձ= G**hN h|댬L8h(,3VꖨQjbunԨu _Y[h!1v å6tBHiH/nƯEn7]ߚXY3ر&l=q_CeƀoZ(Rn#FG)d-XMҨ=aŶ`X<}khU'JL: x;qL+K,e2z9{"4rSg9a VH͔[ 5\@%Eb@ _{RxQ@M݁dzln:Xc ;ih{_=+uy9կz-mվ[NXJEm4>gM6HNSR>쳎!^SS]WkDMYhͤdOt{JE:Y(^iì:uYh:m ~ -kG &psTtRĖuvfugߊu]:GE/^Ǎ=JԜ}|T3%QN+VZa:t_Q-KiƸWXtia7]U [;u,>&U:؂"Ύ58-±>껠(v룦vpfV=]{ ƝHG#]z~uAGM/~tJe-m=Io,ܷhw.ւp}~8{[ npK)gJL}Q?ZpG-u_!gquY PqB *",sϗ/u]tEs}XŠ(gPRmԌ )٩ً#s}T ,BuZs1ǸivJʳO1i괋~`}}eun[~=Ɩ~*2ebivcˆ6)ĊGRc%)ʅ_$$"iYX$9G E*$E$$(A%gXD}>3's7짦3gkrKGAW`.zLJy YF߷B"+ {y{bk9W1DB&\Rt-1bnDI:6XYxFP +.犰f9jL]v0CeϿUȷnO5AA-.mȑ&<^>ގ!3u!t@.-:ƺT)_r5*QϬQq!הeјB6Z y+CVasFS┧KT}In<)^,v؁P@諀Ѻ_h#Ps1s%ɏ(TN<YU{i4@1g3oJt^Z](O圜p:V26\[Ùb<Ŗ%}+2GF)W8EG9ZW͵BV^ZkUg* ]9Zx뵭 ʻ@ɞBuXWԵkkFȐa| g lwUi*s 6(wԍhr]vO+/F_?n2VʟSH9ZV\|#|._\~Ƀqߕ}Imܱ.ꕽiP@FZW?P}@ʽz*:i/EYC,祗^bv:1TeQtF?^x|*wYn5,HQm ݯ|ҨCy6v1.2TcSN9%my-2s!'J+87M6Y>X,0TOP/m?WteX6_OD٨CL>ԅQ<3צr:E:SpuBZTSj=ʧF]_~t+ܚ-i_,/,O^9b!G22/3{!t^Y~Ƀqߕ}It]ϊ<ؘY9}d!oP@Z k^Po_ҷB37m?;Ci/PA)@"(ɯB%6ʿ&)EoVL۔{;pRxG ma_ AJ~|2[\>?SSUީuKʡR =8u{ 7HG_u}UkTFdcP~->#zĕ 9U+zc]_5}laiԨC$wB'4(-]6|`)Xݱ75Fv*DV\ǩ+#^Lқuq P@~ 7*RqX7Ѧg5WA؂1)+ZP?ȜAH| /,",dr|,j:KE0.mVǿ˔נrlyJޤ5ۛl)Hzkyo~DoeT,Pg[mt5^fHVޅŒW`U!"Sŷ& ҕ^L<']#] g/Q[(J>5~q*U+PbjB Z[^/Bz?N5^ie߬F֕;1]]0yP@Z k^Po}ҷ8f};>u'pBW'i[C_51v+0P R94ywtJBR4P2 \yuG5~Ez뭷jo[ ++U4JXq95X#m1KQ3!S7Myc׶^/3.waPKN\x >+ʕ)l`B:NQb\y |[#k6CQ_"ߒq뵭LW]9/[90\5~U[S3<| w\`!WyG)N;L뮻.jTMk#:F.޿?J`qQtZSY˔{'vy$oh k| WZ%Nro)GbM7-l (@yR}ʏ%58{|_!&wE_r&P@'`n TT6st&~{챱ᮻR[lRK}ߎ-)A׿׾|{h2,*q!_-Gx[*e9R+ɏ9撛uYC "{쑟rn~R S!Az(6O,}.9__veD~eh30~a`=7~"y O<4?+#|/#(׶^/3yV~tl>_UoMeLkͅ@wQ~qǭzD Xf},d#Cר)(T!ʏy~aul,f]Uk }#85~+_6C,O8\%qM僴mZ/qHgzkBPFQ (@@e3"_MR,%Vt15<l `H=O8o NtVy_nNȬ<4A~y e]ޚeK&N!&s<?NmK~#RrY<ߵʑy (}0Z'.3+= OEv>S_V[m\@IDATb/|rCŀ± Maӽ52Opq O ˳hyI06M?n%V椋 ]l$Y5r x{*$}9Q4ʡ?x[mt^&[ Tu}U;aexP +[cXxTrdP#xWj:c#lo֟ J닳  n~ymie偞ip\5^`r|_7uߺVZi*P@K)cP@.(7H4@ ,!Ę|*^tEy|г,"+ JhQlo~_>}'p3!B{zKQhcZ!Czވg)l_Yx1cZgܻmP;o|ߓe;U>-=)zƯ©뵭d^xEjzp9Z^x۔(tcc&_BDllAo)eXB]鼽Ѻgn*9BHG-#Y.ӇN(˟/Ə`\i/l܅%vE<Մbmf&P@-G*]>9眓601y'~5#g㤄nxˆĭme'tRa?r2Q*L%CG8 )w` r$˭\oEYY *!ZS8i:HK(T^-[-]Bs-DZcyĞtF.$/I,/})Z'|s1o}]=Z׳T`]_mX/ Ry/Knx[Ч[Wk8 ƕRAg3_-P@j0ZW (UZ8bCguV/&MgcD=u sG3W@eg/w| _nqD!YĶP ʿ - 8Tr F?i9rX8i'>ΓJӿGf.>Лn)nf=嬄S=P,묳}i䰅~LW>_配HG=~܅kt-\k!ƒ_C_82|;T)= u(|C iF󗶗QZGyd.5 '?FZx';R)Y%q+YMo+w?8FP@ Lo (CH_sQrja3I5j[.Q6kBǙ4K_9~6'_\w{iF "v(6M?鈁޳g\$CKN{a/_K` ~3aَ;yX7ߒ Z.3ޤUm·7٧<|Sa|L_feZӉڧv溾8,iͣ YR`W]3sFo;WGu 8 3e| iѷ (BP@KqLί,^{[L7DŅ*_-lo~~k+41=haZW3C5(}pM>qYP(AO:]8.Bgµ2t ~OBW!rO>dOײk=u (@]F꒴P@*ef=gic'#d8[?6ͰL5Y;.sϕg Mf:4 (0, ;,n S?O+s`o1bęgyu׍;_t30}%z243fh?^$slqܢ $=Z (`OfwyT9ck[G}6\sM34; #GwyYg'7* (@l;}ᇏ?x%B2H>U (@_U (0P믿~6k)v^{ף_nR.M7ݴu=oW\1++8ɖosι>SL1El4 (CE_QF~^{-S̬ [@PCtkq{ 6)Tǁ/WUW]>hQP@P@ZQ@:#`ߺ8{z{SU ُ|ħ L?ώ8رK6 MF^uU?pvw?c# P@4fg9?ʴ (0 emD{W{^X߾!ݖa{gj&gal M1⋷nwy-w\snzP@fvL+}Ur (kf}M>Fi,1Q#]O?BM2$ləgI"ojE) (@Cl;5FX Pu0Z )SOmi T=餓[l:OPGY&~A.9dZ=BgiwS6+ (4DoV@z0ZדP`oxxtzZXYy\pV[mgvi_~2{ʇ\~_2PYfaI'tDf;CX•ȘnV:GN&xt:Epz~}}ᇕyܨ (CQPkYh-u}ܫ; #*~xe ^~m6" )-k/;\"qM7r)$c"Vn?؄8svu׷~;i[o=n~31c0WbW¯=aśoyz=)]P@!'`i2+ FuQ2 tHkF.ls=zt#K,EJ lGM5T-׾5zEM>䑇{cKg+Bhv7[oŖ<я~t2Z˘V@P`H vҷ+= I (i~:k%;k)RK-u7bȝw޹:Ne]|er-#~_=Rx|'r?4 *vǧ-)J(fmmQ~TҌ5jT:#a>fAt-{<v[c c9]Cy c9W^y%b8묳XgL3 JL1蔷˦D {-WQ0hR̩nju/8>g!oP@P`( tsۉ'rhbPYuAaB&~ܸq-V ǧM6$FbNiZ1 ]t[c5f} i[v>"F~= o1=QxY fe첐ٷ ( (dNM;MEbZʵPLw-VHx(v喋t!! 37|HO:餑g?H#1yw+K #h9cc˱<@l/']pi .&/Bqc--Ł{ws[ooS◿N;o\|vY4nydCft 'A)`16,Ta,ӹp.^rzt (@ u@-M$ǒHm}[]w}* ,-N /:_~O=|;ǏO DZPNKrl,T?l"$ B dIih,P@h; 6HB3\;W }baf;c.VMzvx駉t,"G=ܳ?Oh#*8δz{e7* (@c M׳pn;0+4z9kiwEi&P@Ap$ZxW ?c?|tp{D6f=ƨ2l} Q< ^xAR~!nҘPgńk1_û|_.x (w. @jiw X&z p$loS@xWSYY^03ZuU]+N0~ ( ( à4TKk XP#aAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCF:iP@P@P@P@h+`-P@P@P@P@萀ѺA{P@P@P@P@ kKdP@P@P@P@:$`CОFP@P@P@P@FAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCF:iP@P@P@P@h+`-P@P@P@P@萀ѺA{P@P@P@P@ kKdP@P@P@P@:$`CОFP@P@P@P@FAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCF:iP@P@P@P@h+`-P@P@P@P@萀ѺA{P@P@P@P@ kKdP@P@P@P@:$`CОFP@P@P@P@FAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCF:iP@P@P@P@h+`-P@P@P@P@萀ѺA{P@P@P@P@ kKdP@P@P@P@:$`CОFP@P@P@P@FAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCF:iP@P@P@P@h+`-P@P@P@P@萀ѺA{P@P@P@P@ kKdP@P@P@P@:$`CОFP@P@P@P@FAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCF:iP@P@P@P@h+`-P@P@P@P@萀ѺA{P@P@P@P@ kKdP@P@P@P@:$`CОFP@P@P@P@FAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCF:iP@P@P@P@h+`-P@P@P@P@萀ѺA{P@P@P@P@ kKdP@P@P@P@:$`CОFP@P@P@P@FAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCF:iP@P@P@P@h+`-P@P@P@P@萀ѺA{P@P@P@P@ kKdP@P@P@P@:$`CОFP@P@P@P@FAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCF:iP@P@P@P@h+`-P@P@P@P@萀ѺA{P@P@P@P@ kKdP@P@P@P@:$`CОFP@P@P@P@FAP@P@P@P@ Q@P@P@P@PѺDfP@P@P@P@PCupg]w}~{gzg}e]_|yL( ( <뮻RK-ua~*VX&O^mWQ:vi9K.~CG?Q")* "`F Q@ 06EشJ+-1ΔL#3S]vY+O}/K*m5d"d*Fd%)kb9 ( ( РJ:';G.M7ՓO>_b%"rF֗_Ju?Rg^{-?EOi-ݥ (P)HJ7*@`8>F_6<20ݧ?na<')=d1#]t9FvČudY2kv@( (MW{z4xjyS:oxSc%nZP7Fzd5\:y-fX,C[XYSƎZ$b;6yq?~itK)WXEOyqxO GQy)P@P@w*7tSVWj2n6(N9$V{c`k4rO|E/~! (qV[ ~ЃbԾ"?M#5Б,~7hDcsϥlԫub#Ch4nqO ZLBA&-aX:]ꪅ^~;s;)̬ -uC~Y[@F,5%wql ó^Nw?u<gOJD~2JyLP #FyL(U5aht:[ib@(mݖFXyw߽{.DӘdm! FΨwj .D: .oQ'xǖHAit%6nae-l߾{.2֖e?Bu\>||!*dcv{P@(`n(5+5D,Xy)S.I/SК9hG罴#2jSnҌ={8r (ӻP–[n]O=CY{P@h@WFW}XN21,{3sӍcD=?q D6DwF?8`Tk\XmULd+-?`4'sa8J/@3;21873p eKein@W}a;lP@^ JV{oȳ|ABmsWڈ1gufD=i1dS-H Wd`<,&=/L̬#[n9r]8D_U Uu}3 T0-7U,t4x$`n_QJ:ڑZj)6"Zdf{)It{gGD Ioܸq̎r"k~@P@,@s-&@}xfyu1OE{ZUpJ &vmƂQK<ebe]3Wz!+Tת)J} (0h1@)Th۝wyhGkc9Z5PԎ$5IVcJPd`0*4c2 iPr%'=,V[ێgL+ (CK&C.{ţPZY4Ri |xf!s\]&Vj_b2;zҁiUXˋiLdG4}q,n!m0ZtQ P` L`Uת*0^y:m1KjwIUc.c9YYvLQGێ5X51iU(6{{dN< 5fcap t.,hbiVxeYLtF/lg= (B?-p.Z>hۜ}G}M7ݔ:Qyƺ_#Nk0yFTo޸-V(Z&-eE*>A8KR,2 ]'D+Qa:,o(5vXtZr we+St;1cƤ[^5٢o)"Mb py/C|͔Q<&l 9昃 VP@h!k=3 E9N%5ǓxX}*u`ώsp B}[l&x`penT@(`7*)` 0Z45ڑxJSaSpLB2/5+IW'O̜)1v5l%kp Dq0T[ngbv>* (@4r^z%e]tC_~(>Jfg"9tEK& M6mW몔?^{+45)smSX|HjYTx(M\Kّ̎;kkj&2M( (*@xntoy&,bfRK-E[& 1*îhd*9oEB9[C'#Gd2;Ҭbi/dYGظ+3g#iF&"V@:hxȔ C_ uѬLW:f45QymR/2]6_h#+? o6B2i8F/DYayHv1;2S[pg.z6([fZP@jR%cǎꫯ)ңP֑ TG::ȉD:$扴+oEE~D'4ƹ$if'\Hұ e 6|sF2s$aU@(`n"{jGYHFwmy/c\bs1^4R/c٘s/HD.ڵDqH*Uor,[6 JLfGKc2;vl $>ibolBP@𪫮(SsٟLJ,(4 ph2Sh.o#R=tc:X?8hFw'|2lLLf _P`(Tuse(6"+4uy/4xE.G"ߕ2])"O|KdD7ʏ)Axqua)?~|0Ȭ|w܍7dv9;rP@P@ b+A|N07|WڔlOQB-o&]lG%"qJmʞB"[O9(ŵjdvd֗` 7&5y!00Vh13睓aKP@"@NF<xJ4+7"sjPʙ3_2M&򓡜'STzU}T>Cdv3XMٽ;4|b`b-dvmB:)`ڞK`?O\s I T.0"Vĭ11}؞ۣG)C1?b|QZ98<f鉑fGfƋ/ȐXenK/dveX( (@ohrq^xex|Ȫ ,*nR 9IMh ȟW-F~/B[>t<%`G:FiȤ|40nU5[o6lr230ZgϢ0N^cK1t

'tJ7tS9ھPcF:FP`@4UD?z*;FDx4ʄvd:%hh,F5I7nT O"qk6$2,}C4#8?x"p yG!&P@P@M&Ut{7#BO^,@`ܥ +^7ctJFJԇ+%ZG.-@A.MfGԒƍc2; ^m՘ِ9ʗ (0T^ W@zDF4u$cOHMd2z1%ɈdK_h;1<{Ƌ${.S&U8g:1\[P@P@h_P(ʷ (@Fj'@S!xWjB4Xf\uik:wjy:j%

    d,/^B,(fnX!].ecotL*$DD3Ia:0pF* (@7 N`+c^uU4BfQ*Ěgyh9-4Q<y<ʭ<[oᑙƲI kc`f auIP` %+@hi"B^uDM`K/ BTJӊWPnN8ńcWƮSyHS/E2 ǒ?@W3m -l뮻.Q˴,Kџw fpf (]+{J! 49OfE̎VG+IhZ]J!s!{˙ӱ=/E~␔9J%dL"`;iQ1LvYfqL7tL#LcG;$`GsB ѺAĵh@j'0%.Rid2 C_gix0j^[P@ 'R@y&zgc"O5?Љl"MXWji6//oᕗ^ȜOSiN)ISyrfp!Oe8;)<FЪf .G{CL+ (@ QƳ+3|!@ߖykЉIS""kJl vNoSc;i^dE" _ȟަ󓇍i{?OS`%3ӥFba f\Q(5do*'aBXF&U{`*he𼗡??Th[q-fBP@{ou<EQPU3@,[,;t=B)Hq;@Tpaj!P$N#kZ_4Sd)-O, x=j™nb@ Urǭ21pԭZ577{Gee%)V8QNH zP9џ9+l=ޭ[R 0 /56ᏍQ# Āb@ \ GAb[,cԨQg~{޳h"0b~t_wNPZdbRὉ&Y*;H,Aᒪ5\0]`~/{ Y]v-SLAGΝ;wĈtAZ2sh 棦b@ 1p91ïǩwqŋgt  ' }B:z]8n*'͟&Is `#7pB;W9ÄIl5%b ZSz\ <$!;F܄ X'_ @&LP&S,ƾےbcE.77(E~7mƦB. b@ 1 .W[l'+Qc)M> ndtɄȻiy®!>D?{[.VCC 0b3FG 10d Z7dTk 10@=SXgwՍ3"jpye am=dܩXBܞ!郿XP~0S~5`0mMMM;w|׸Kuo>2}%0UGs 6b@ 1 _N~~ w}7u:b&B#Q27  1nio?nXA~k#?]!6O0݀'T0y]=RXB{5(a1 0jQ0e^PcR[AI͇FKrJ(Qw3n$#g'3㌱͐r3뽜`.~l2*\*N(YĀb@ ˃2Ճ>hj|M4J7U҄`1͛` wv{=9]nBkKZ̘1㦛n]^^nKQ0sMZ9eb@ Zlb@ \5kPNJ:'GH]quuu Oe]9qvqjDsCCXo^޸q#Hj꘣p割yZDaa1 ĀbdAr={دV Gz!vqA BBbx,VFi> t{w珰7{zX,|wÆ s W.UbA}T{#,ĀUb,T?n fvm~ӟR»XNnFTeY&f=ĝtc2ʻ-LMҴ"JԩSOW~;ߑ[PF&u:D ٌBAo1 Āb2cgR܁˩طzZoMp:$Q󢒆I/%!2%!)ܒLZ<9F.Df)I-u`K,3a cvϐb@ \0j].XUN1 2@oN,EBiG}GAUTT`k!SlIa]ՙO='7ZӻWJ{Eo>Iyl,M% Āb@ \ "X_>}.?'[5[pys  Ql0lnn77Ctp"5%m߾}'O> JJ Ko1 0jQ0eKt6w}^}UukꪫMBPcaH;+>>\pO/ЌظOz'(J䋅"sOA#X\K1 Ā-aMeцBiPBbXgh1TVVV SQYrfj!!#F}>6%9sfhHBG$Kx&,CDoFhIW@ 1#TJ+@Baă\`ԩSё< E@*#S'U~w<@;?4FQa'ʎ޻n:.85!B{3DTS 1 Ā@vVs^{9>K bĉwJdeKTGȡw^fbwx{Gټy3CXĆؚ%Mz]o1 @P. +1 y՜`,pʤI8R47v` I#g!бOCdRcJ/>L2~:HZ32Z"Mj1 ĀÇc֬Y|#9^~e jvTXMt}'."ltYm_^;Z"6CT;1 ĀÇXq.ˊP,EbqKu TTD(B['f`0=e? 櫯ܻ)TЄtdm-nOOޣĀf@պ\3b@ Gnm/"ٱaS*K\MbPr.23B1cr FHꅩ; yf؁O,i.1 Āb`83,APcܹsxX]d Mev.(BKm7jܭTGIW:Ò7ACc˜jw1 @P.G*oDʏv[7}tjvk׮eϙq \34KX #͛`B+TPlܻw .]-К!v@dIZi@b@ 1 e'[d9+Vy뮛6mGLAQ8Lȴ^&E&I♚;G_ 8fQRRȂesnw1 @NP.* A $e;מ|I.vyQfFw!:fbhGt|̎f7-SQMύ|b@ 1 Ā@-p)0Wy<(c@cbi吹ɭa cf*+Kׯ?p7QFd)ԎTX6\}e )ĀxgPSĀxg0D]z JjvX)|]x}ܘJؕz|CcWNpDԩS Jeyx* N cˍb@ 1 Ā0znر|{ n:Fs xWW:h]X7=i`}Xݼy3_}+᫯t ^bBpt؁ 1 U놀d !dFMe;cdWp ee"u)-9՘<9sK֬Y~wM0y2DCh G(gL:z1 Āb b9ev=ؕ+Wr0s J nf1;]9ʑ1%vqɇTcLfXBg |f!ŀ9e@պҫb@ k6c\\ֶիW#(_3(DIFFb,+7sȑ#\tȹkku!k{|b@ 1 @P <53Y96^EP2CbQcI)K3 K!, ꎣ`)Ԏ2 ,px=㥺(7 xсf ݒK4p,a1 @P.w*) zxB0YԈ#-ZĒ/%8#+є S>mŬԡ#|x:::+E:Cv{!ݻ""lŀb@ )ިKy127V(_ |mw/zr9X jNK0W~M.åGu`rg[BŀCuC@b f [ E5"< v۶m2ٳg'PY},ӎx8qapAGRcۗNjn>O8hVlv'QWj0JX 1 ĀȢ7Y~8 D# e 910If>\-"(]uFb?W\90phX]IAdٰb@ a@պY1RI eK=4 eIX;>ݻwώVVVF,gs`vZ*ό Iǹ t{`]|J!Wz(IBK*eAC|b@ 1 a ?<1ICUC PD|5KQSJɤΖ@Lbq'^Q+/^: iI #HMj$aBa1 @.P.* wIF,fvdٱVXEPG-WMVz6$cwݺu|ȂkS8ʑ+4t>d/fÁ1L!nwp.N1 Ā9+,?(5gd쐃xgEPڥZdBt-[(4^8kDC[ 1 Ā p.W֢̓NO(B{*]i+=z{Qqc^ەfwRT8~:N21k}/ǁĚ1{WO`0y-Jo1 1jݐQĀ8bOq2_it?EH*Nrх܊~n]Ӭ SY%ȡWv>9~:'4Xf,Si X̘+iTFݺRb@ 1  TXzb*bOtGZ•<8Ucu9FَB*LEi>}5kpW-uT뢣<]!FԕiTə"Ā5ab,&(dogN46*e<J|e. JErxg#>>Jcsf[8f6CL7!3Xà qg1 Āb`808Jo찣rǚ) b cK,A,X>b'cgo-::::fi^wu\\__og)p@{۽+֋sdYvϜJZlhŀf@պ\3b@ \ a8Rǩ *w(K!RŢgIM- @ɏS\s g4ի 0+a,t}`G :>BQ`aa32eݺ2=В;SL0VX 1 Ā0qe!d8Cbgg'!21eزbzܶm2;a,xW^yeƍ8"̖A4,v<-6 oI1]>Dv]1 AT{T*1 .4vF# # 5I!ٸJ,5;ց)رώR'ā^f%Beʆ84!D]a30<ئb@ 10@{c@_q/4KX,2Skll<,)C+>҅ZjBQca%1*֌-|X,! q"?1 rǀuVŀȀ4Uʨ!(9a5;4%ZݟBBs/1c85+,'?'9]fL5>!,UjœH:Gj1 Ā%hzepqekH,1$?E,NQ$,]Gy.>zwR) (Md'0u!hV>1 0j!Ā8π)-Vw5Mp$o[C>칬f.2l,'^YE)uZ=.k*a&a3a;es8ʟ= x=b@ 1 pc |S_d$g,YD_qǚ(+PW! Lq5UxǛrAh*o3dߞtb@ Urǭ21mn,9`D$G)B0j챫@eFz19Ff=lc,_hBw00%fNi p,e;BwpSn31 ĀÄDLfyBAt*fuu5:5%tJѣG3"ͤgc$-IR_g~+c|Bhʊ,Oh1`ВE 1ST)J. 07W$K<*/pÇEm&;.;OO2U[8fޔY4iXDpQt0!8 壴Gٵk'p>^TP@IDAT7b~  MCb@ 1 .oxB`RXh [xP#h *D1%=" YM͞guQ;SE8:uiKJlVM_CWl DG[KN/ };;pȪYfF ੭3DY`8%8ޤŻĀc@պqb@ 01coհE!Bn6L&h B-'g64w8~{"'[Z[V0A21NRw;q֭[O8Vخήtcǎ1cֵK++-%ts^|E~qSoJ*fX8+7ozb@ 1 .cL 0(8D*:l-H,J]ːX iLxPV(< .߱c6ow֖Sr;T=b@ 1j]UZ1 لCP"l e"_AM+,ϤÒTp1BkO?4sXy UW_RVї߇N<|ྭ[曯 ]t/}a¼ž.>5cڔQ6oj~W^ZTY\v ?=,YOA8UQb@ 1 !.:k}d[G7rK³O?J[)ۡ9m>ݍ|b@ \3j]V~1 2Ȗ|"1&u%<) 5i1B] ®cͤ|;ٳgϯ}ΚHڃ'8މ,+-YY:z)KK8'{_sl֝Z GVVLl=wrrk~Kksv>ƮSgK K'5;yG'=tҥ ,pb@ 10@(N!|MbAYM94oKߒ"J7cX˗/7}ۻ-YvxϬ;vm?ډ#*F4/v]>L7z>poO_aQAME59Y7{-ΠJF1~շ}=>ʳOᇧϘq= mnєRb@ Urǭ21p!o:"ygHF;jGJuح!.v!+ơQ֊SsF gcО9f͚??1yw\a: S[]諻N0eJaSX 1 d@պ_ Z GPTVO;|0gC'Nܻl]|{:{%Uنb@ 1   20wPcIJQxWؑXc@e 訧 YeDdn+W^1{oɺO! s_=c~7sH^Q>d&~Gn{GV>t}d՛6|~?)SlJ,GX 1 AS 1pne5d„QfU|Fd>H:.#nGѴQl @4t|ß~z;!<6=p˂7+l}23 +&WUU0~-e ũS'ߟ~ %7ͨb@ 10|@D?օALQ CYQAMQ+PV.WV=Ϣg mkMftz Mu[nukF# (+]?苛:zR"~fE]=+mX7s○ %ȉUO_+<ȑ5܂g)fezb@ 䚁Oŀ  .(IuD]BӞ]m,t!%T둡xHdˍcS]vsNx-imux.9n˜VDnw>~Sk/=lJٸةGN׏Im Gb@ 1 nS X+U9D]*+ء W( n41ĸuuu&M]3mɕP|zzzM5qO_ڑEQ𓗮TeWaBz!^޼#ئdfx8Fb@  B 4}63\F\+ٝ=ā|do;6!(Y".۷o_zCMmO| o?m#fg̶sպvk5hK;%}vh-Y[rw4%NL:wեeWM䛻_\9`|p^NDzutuvSϯ,]b w ?)s7210 p@%&Pv0wxBhMv`"()qߊ[fÙ^0zTӧ@Jvv_=0Gfw8n_PYVoMjos7.Ng4b@ : ;t\k$100/=N9?ݞ+bRϸQI͎E`D$eɃ[jME59f̘-[jQR_MK>8J׶χ{pck9W)#*8FqcڸQ[a [ۻX.ƍfcmuYAC***kkkX3vAN3b@ 1 p`ADxW3vw!~Q+XtU<,g!6ܔwСK?Ruʊz^|s~YfcGm=pޥsGònȊ_ţ/lb7c[mb1oj㞝;ZZZ'MM=ѯYb@ [7$k1 2iL;2bhl78|DMrg>1K;»Ə]]RSY>)w>s[;)qʒ}GNGU5*c6Yd;:Ot}9OlyݓFon.%m[Y]US3n["pN:;{?׻y߱[FNyN]1JOT&o7b@ 1 e2 RG?B&Ob[5eMzaThW^zz\kkdh䣱,g=n[a~:c׶r,ρy+'),,8z0/xGNU3̞=#)ln4ob $lV~1 bP<}imw6`.xˌTĸ̎P$SeFcu|;s&7s-s'?q*\.FdKgGE^Qa;nLͨmomfq4sAa1s&,Āb@ L-$5CHX,x(r8<{'ÇU(< W* )557 l9xyq=H}}fO:olXp'99bތRS 1kT5/yBc(Tür'˿T8WzـxZzXE]3'oK{(:ޞ>>X6<(ѓg;xxvPX䳶_ɠX0gJC^ɝ;v444%C632)Āb@ +\ 60reAp0E~b(,QZLh#rn#ʫ*!${K}DEEBqʡ ofzey [8^[3flvޡQX 10 Z7kD10|$}LY%e-LX~QcWd;p,uϬZ5gJ}UgbG}qpS!t3|kbƄ1| Vyڽ۷h:ɠvL0tơήu;z Wr~ պ`W+/qӧ{pJNc vO1 Ābre ~c%RbnQIl@᠄p;L-).7Q?dK;q6C^^aQ>[BeŅv~ƺEܼ`slݺ[o&N MX 1kT5/ ܉xdnx[dW} yB;xܹxvx$vй;A+ ZjFVI;Gqɓwn:2߅R> Ɨ&n-QP[ɔxaz| ozFK{wOw͝4ɣơ7ox~wObΙb ZCrZ $.BC{2.OEY=K羰aב^_֊5>~el_c͚5+˔wYb@  B ~L R  Vs嗇 lni,Yŗ^:z`v$a_~߫osCii~gWOA^ܩcG({|Vt! Oqhc_9yl]MQU+UW]ENɲy3BX 1 Ā00HXag،4nU}ޱʉ++J~: 78q{a}mywW_Ww)Sǎd\υQ(86޳tw]=}S+֮]{}QY=GJ-%"֝nV_>ucˮ~{g-[n 4]]O_{ރ>iӦ.v vnFU[g+ Āb@ BI`x0=eL)KN%w6 񴡑[^}ՙ UcF+G^./ f-..c}޼ >%l-xռ nb9 F׌ۿm+ŕ ׾wޅ fmr>71 Āb@ d IJu-X+_~qZ[-KŅ_#:>lmiy{Rei?%" _y /{_8s7v-XӓWP\XR2䡇Wz#є,_Ѝ*{>lxӧ?nͶݕ_9>›'D3.d)ȟ0z'/ټoB}ͨҫgLnvͶm۸ :U=j/b@ 1 .{"`MC%1狦jf_|M7߼xħlF<ח]h뷾ʕGg':rquev'َC*Jݜ앇Wm03EkY6F.Ӕ"{ԌSS 1 t6* pE IOz0c:-ff xWo'節)o{y~+_~zœ?S;/-)(()Bt&>4QQV2z霩,h[{[Ϳ__3ꫯv4%ωݱb@ 10v 8xӁ{^xH2OXn}u-?q6L+[Wtxݷ?1{(uMl9TkkWxSNz˖-9N^vg1 Āb`80`2`04iEgK1aWI;;nG~o˟;O_W[>khkw_,cK]&},*/.ϫ?uT)O8?ܳ];;|HNɌ0JX 1;T,@ A9+q!rTʔM9f(74ob@ 10L@be@ΌIc¡cGn4/]t[_QjgϽjƊ.;mG;;[SU+ѣGE,N: _"g$ 1 ru`U9ŀǻC !";],I9 _w]6UUU5jſdɓ'jiڵ>,xMG3fQah>Մ?)*++yp l 1 ĀÇW(J(yrFٽ ƍ"===G("//~g}o<޴s s~deKs3I>ϣy#XEb9$6b@ !c@պ!Zb(C0M]e:`:w\ք0" wܳ2@!o(p>gOnK@ 1 Ā $iDH&LI'iˌbbۄ XdfϞͲ+L'N73X5nkN6]_Cg-b@ UrͰ1pPYEa2{ ƻaznWƤ,nGfsy8mYYR\ݝm)Q~(//ZwqBLFCxT&;te b@ 1 èWDjJ:`pI7,>X7b8lօݖ3)qՌ-6q*0, kof== ڛC̞z 1 qRC ŀ@u"If;@&铉,Y @dCj]~_/{hde&;ɮ b@ 1 .wB1@4J5F$ &!XQp.7bk{zzKCajzd.>5eHiDpd-ĀT5g"z®PAfg4hLLvRR8+JY,/ >qv5Gihh̔ؠɡ/RK 1 Ā daKTcv2vZ23-4ɮ$yʲM77. 4;9b@ 2A=b@ S Rα9tgw͍.Oh{5]t]cj*z艔,*(QrBhxWhv늌j1 ĀbUcI Bo8n9H O6T{zFUu)SDihb@ !`@պ! YC1pns P;eF&)uDFn1rJUuu#3'պ޾+'>_ww_f0Yy䠦b@ 1p3$T9ܹ%[wYx!{3]aNmmfMO*ypߞcǎϜ93J"赐@Yo1 @P.G*) qa:®T?, cdwbI&XۮY_[Ջ'c3ig}vΜ9:t!l Āb@ \ fRhI06lRЎћӛn, x޼y𑛮2q̈`mi3w^_ R Ǎboo>8ŀf@պ\3b@ g )w$P+\=H3YzfuD]6!jԩS/_zl{ *J{sz[M7I ®v܎c1 Āb`0~dH*WalSXNw |ˌ`Q ,xɧo\Og:N{n姗?AO<>LSb@ 10|0`Eu!ta <$MZ,@dwt[څ\y;v̚5'NMCdOmb@ UrJ1TI@$7΍>Y);w}ӟ{+d{/ho:)t 1 ĀÐSv j  L*`$pKG}#y?~ ˖}S on[ 10 Z7$k1 QHJȰ+!N7w%AQWv;]9e X<$nq1 ĀÄ,J 2)#(3zWDv8+gb>'O'>oÞ%F[ 104 Z74H'-al,ʛgaW& aͤ1 Äb@ 1j]Uf1 . -o<{t͒C(IÐTfz{(b@ 1py30%EINybᙒf;vb3ItyaTH Åŀ`@պ\b@ B$3!TnwɺuRӁ$-֕'S~ ̓[b@ 1 Ā0BM$2CCpp=lo:Srl&}1 0jQĀ84j'""ݍQltcBlfI9n͌;=.kF0b@ 10L@$idxI17% G7{Y"{ 1Qo}\Q3r31 U놘p '5H{".IdX 1yw:as~G3zS"Ur`sb@ 1 .?\\9{3w=Ojov${uFK>Y~~'b@ Urr1pq")5HOqG%˛;39<"6g?'%9 4tvv1#qXe #@/ɺ| $n6@Zw %9D$Z!&Uٽ{odɒn?-P[q˩]0Hb@ 1 1@_^_TU'_3jBA0$d'藕+WYUAXDP!Q,L644+bd[Φm?p9a:fA#xXyܰ]÷c(|rVC!j* eE},dɓk'U&|QQ]6yiBʊw,ՎRd֭[9Zyf,`dΜ9ĆS 1 rǀuVŀHaV#+C!jJ_t%Ő[ ei=Eь,ؑHUVQC,tM~W\AU cQ,K^ި@viLr1ʀ"EYR` {+V@O7X>AxӌQyX+M9xZK/j1 D_x6L<@b ZSz\ 0ƛ{֐H"cǎŒAI 4BR*/ ?/ZՋ?7w4@IDAT&+-ؼ+$uEԦLp6Ao.8Ёf9c><}82b@ 1 .WtJ`V5vɎ]TسRF D$˟7xg>5r5I8T9m7-ղaaby{b,;HN"#1 rʀu9Wŀ8π<|`EPٰKd[±Lǰ5kZ,x#XfҖuid.LBv {ld<'>,"g%/eD޸b5oe)ɠG 1 Āw@^'O)P,"P2xƒ!Nz%r&nuY )qB M*,tć ^z `H,'s+P@HbK¹ |7ʊGX.Βzŀ??*e}~@-#lcHbUL4%BAe7dFK9$6"o8.g v~w2jXVh {x3a6lW-4خI̓!Zoè@1ڛ3|/\CG&kr 4Ϟ=;^cŀb@ ˃F4 ˟(!XEKpe >Mw:$emӤDyꫯ2^{3XT+Q.=fGw몁/Q =)FL_"|b 2(q%zTcEbے %(RSSo˒TʰJI4=[6uQM9W@@@`\WS P(c3 VY)F vY)-q5NL"v}w+(+Q)Xvӊvr7[J,%N)ԝRS%̊<(Po&ϝGb ~iTC B3L!KMRiptf8(L\T"Sj3j潸jBq{We7mOĕJY *kICoY.]M@.x( `[K.QBYVݧGsy78qB@@ O(Ri-55 ,PC~ -HeKz5Iw6&.$u$ 3D_QS% 9\ *i* 壬F;o9I]G=Z3HM4*ZKZUݹA]^Qkl8ck5!kJp-qÑGp"ԪH"8j;ئM#7VP [)Il"+qZew(Biz  Y5鞫z5re]<,X Ұ j"|rI qQ2ef;6kW64jŦc͡ ]qqr1ME\UTU"5*a˪Gwo]͞Uu[܆l[ ߦ!N ޺YGC #PN"d@t\6i֚qmJI $KC|qr7_\oE_jUz3"Uu0{i۸ ժ&D`DGuFo kƻVwq=F^Wqt&B @usA5}@LAydloݡp uɲOJ$; pQ#U#5X=K[Q'#j6Ci&i⡇JJbӴͫ@HrKI&+@Nhᳫ<APb!k6ۏ1C1,ӫ&U?,[Τ>ejU5>CiP`=$i*ʊn} iU@ xfc: X:7 []{R3Jq]rW S J]~Y #jDUػZu*L9Wp^Vs˸!!!ː@pQY]GxUn8-Jc(e,,ZPq_劶}}GCl\Z RDI,d%DwlԄ@S7G`mFI$fU…sA)KSVziuA)P6aɝ [n ݑI5bY[BryU ^0#PC݇yw|MZ٬tm0?(!!!C`OtᰫDnFv"-x]cJN=Ƙhkh9,ɫ83 JE?y1[L+:Ӯ.\l´-o*9C B`> [73V,/*wt$IU\_*\ J꥛^HPR6#k.ZfUΠkm AZ6LtWV`Ni:>j8h ⾗;)Y҈._DU*]j!eJ;@vok()@@@H᷷Ģ%( XLDqQMuCe,YsBkP(F鼶1!x3RPzJ5խB6*WKb=+on L۔(X &o\N!R _\JoamGQ.v^[2K%Ř>y\pժv&3|ꪫ.R}ҵݗM3\n /Vv}Ұ ۲dp\~k_]/iʗ蹇@@@JgZt}چj:;ΩRMp2+KJ4#B,1Pma?.`΄N?6d5rn3Qnۯg "8}sj}!L ޺yB`"@xm7&RcVoG4͢91Rbllbr {XpORA 6`歓 @"6vruڴ=B B B`&tQ}#fO >9K@!JbMLԈڃbLP&@p =bѸo~yj'Lx,⣾|i{nUp_(P_ QW+zM`(Tz$+9_c;SmX\>C B`> [73Vtn;'!Q<v-SqTJ`!2iYvtBW9KA'f4JqdkV#pt5^w&b-ZgRLV鮾^d2 #ߜM[Gw [ +Tc8="n{+Js1v}GjP E7my y&o<p!rvm1-¨"6db%6 Vul<)N?)&<I7= ,DP{ VvY:wp&L R>G>GHVrmLUtW%KvلiMi>r}xL{ުy}B!!!+ z`>RLy'b[ofmF9P#1"NhJ|vug5UEjEE ]\u7\zB2! EF)(:ⳫܝWLԡ^o:{jdEʽ/*T}&Aew<>͊zjP-:[V9XV[V3n;;|3%A^AI6^&8Ć} J>;p(=TokGoИӕUy0K:UlDvUǬlpM*}A"+^*#nMϹB B B V*T /jTNpoWV?v#]>)GqӜ)㌫<Het%?Ը]#ݪ-zeMo K@0V~'3?NP>؝aN8wD UT1mS`R6洲RN$4aUh52g]yf9@@@,"n!cwN8q^̄ۍ7&1;Ok5O&o+O(&ZFtW}vk}2%YX\é!O 3ψ! _8Γk5 0ag/a:c-hdRKPG:RƄXj$(ԓ۶))~%D" ?q/& }jr^y.B B B`rTW'b.AͰH#LXnwB}CZˤ?vxD@)+;t6@H6BI~vꥦ|QmLɵLOpZj27B @uK?4ZkXO<>t# JBDz'[ҭ]Mee[2KV,{n>+(oY ae _`+ #ê7 R9WGq9V* U'xgBG 0B B B` g"u]w=/cp qEBpmVV $DiR9ZeF\`1mDQ9y \ֽ:蜃Oo޹k_<QX ^FؘH=GÙXh"I<=?hznb!0;H".BH%_v\Ql A)c5TlrO*YΨCSZpذ!eV F\Q=\r%UXh"RX0W)-}_4y XQ X (XZEsΑ/}>?Bo-䃣=J~Ͱ܈767,=#I5kaW]?{j?+>=]}S XTrQ8z+7:Se5f^$t.JYQS>WbT_|AbOkPr$o|X! 8?|Sc^̮vGJ=XAi3;ҊX+ظ(<ՈB[ SX&O 2bʮ.+P1+etn>d.\ qS\ .(&/tf[2J)B B B`e"@ % ^0bO>dk0ꂻ_,-tWC*JgM+iz*P,vkw_0UlBՔLNX /Z $YU-Lɇsf#>{UWP+5CL'oC B` ;4@,l7S%(dwsr[2E(Cq`0CgaHMS,TP{x,]X6-d㏣a^׋$cj8Ps'/JbUƪ܇* T_ߢ2&]&@JQwhYXc2?#y9!C ޺QB K#Eji='?)픓$)GO&)dVvƸlPk"rUCl]M1#v 8.rU˫GBKb5T&,\LcZh\ ܈t>"@'pK^zJ0P;dL*h@#]J0X%FQS}mChyeVysL@E9Ϊz ֢kAGq;5UPKYe.ei\ʒ,Lџ0M@ xfc: x&KJy=mfwj"a'Nvk6S7$z]5lS̵z4 gȥQbPᝍR tWY7 Jw5.4.]]wo|!!!C"BbbO:$y1 !DbsT] %믫^Gws1Q^I0[$4I=K Vz#PRo*_ YN-RG5P!!!!0Nl<9N;4%_q]#Q4ƻ&{ #,#L!I,־ipUXT7Etה&#,'KMNLZKOyS̍|5C B` [73bhiSN9:EZ]ފsTk i=t=;>p}me:[pX)9N(2;:dT6UX]q˽㉣ ݕ]V1ֈGloh!b;Ǿ/!!!! A)wh/Y,;*ncYfϠ4TSm׬_QD_r1Q;;arNe5e}U|zT#te.KhFWK,e1`⊏R\E)npH !!0⭛O+B`h//xAYP\tEVS`RAi;Nyg2ƈG.qc"d]XZָֻLfIFDSkneVw5]ZJ]zFjEh߭ںHĒ{AA >y0 -QAɶj{@,+-+7B`)x¼Q/~t#(9+AɫEhٰ~6e0c֜O@@<4\Z [@34ތ̰'/ceN*&4g2 &=bY2t-( ~EpLC%qEV{s$ZUpG!KYUk}EHbUUGznSy&o<p!KM"(%&XV,A)5b#d1dm&–4_iFb38աiZyEҹ҅Za}ҎC׽$fP]zkX~na#/OӋƚQy@@L#X_X99T(dv{-AOcE\xrBI,qi-q"krEoX-%{j[ۛCI, KAܢ%VWo-J\S\!!0 ⭛_!sX6*HU +O_tٽ"na,3R*A*m)_7iEXYWz=lo=djV{ԓ\K$!!!K$`uy/Eg={| WR;[m"i43KdX I&z[*%VT_F C%N/!!0⭛1B! -"+):;A/ܶ&("(kwd% ƎWM} i%7dj]uG҅ ӘM3h^@@@[g7.9㎓@D*`_9l,AISf|V5mPUUgf#&B &@usd2!K&G%/!(W ل;F/ƻ.7^_5VU> y=wp#B B B fB=awc931hK/ ^p %aԗ*)a&ZV6Xˊ| h5B '%gRy:,Q.+xy+V,b܁ȡPAګH-=ڬ;BTjg)y'C B B V$dXۏ{dvKeY[ Q\}s~{kԏaM[b9J⦛nIIX-V8(͙S}E-! x떗_* %@<)FvuVYdg Br7J;Ξ,¤hEXEdy)3GXXRț:VcYpiK liۗ]vyH\ulO!!!2Oh%[l?_sZnvQKmݦnZk"*uʪ %`HL}z*&O&"RkXU;5+HGN:w]r%^xy'YLtC B`q[ԇ@,*PA)Z6;YWmF,-ıګ$ԜVᜊ+B-GG}]Z4)EV[mZFVgxQ͎x0J2`*)cE+m I"SvFx ox}F9!!!KKp ml}u)bɲ>,qw&QDbT5Je℠.֑.eE|TvꫯQmW%x=/PʠnyLMT鮽Z^E_!#y%oݼ`!sDzֳu s9'tdXaR:cUrdzW]u՗eZz+-VV1 d9咘ĜZUk_.zt($dUr ViԴؒOϑv#T`/m+׾Nވ,@@@Z_Fv6?emGDm g?^̙`T#>"+1Qcş\wo)@@@, i$X&uЇ>}8oY&ֆ¢8ר+RBUdN$f}TWO\+#Doj8<_7""bײ_#"!ڣ!!!@h suU5!j=-FRh'omA;\~=ܓS/}nĕZg'IVW>)=d8UAJVTe悠k} !L ޺yB %:Oyvi-5$ᰳ0SC_]3<6!!0⭛k?B` x J#Sg}-9hBj¹=3b!b%jo7W?ѝtU*q9""#xHx EUIPGgB@@@?ʄݱ >53!{$zVB,,,G_)TApQV%H髊W /Mf(JkQp oK_EbOBnr~$B` _2F=a{17pmi>rM:#e8bK`hóM6B B B V$mTmW' / aW |ab^fWzm=l:ūkRB\>;!R ,aʁ[weҘΒv,}X*!!!Rn;jkr*%Uv,^{<B`%$oJC`%%7g 0Yig:iH/J %!!!%1{ġ￿F(+ TVtmZ@[y|yFҵr~~:B B B 悀}BD@]sy Ss@@@@@@@@@Lx&WB B B B B B B B B`!xw!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.]v@IDAT!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2.!!!!!!!!!0)⭛_"x7!!!!!!!!B ޺I%2xT@L2+>)O` yqZkg>< !!!˄@d2AC B .!0o5?#y?z;y.B B B  Ȱe=@@ L@@@@@@@@nR~#B B B B B B B B @@@@@@@@@L [7)D3!|q^x 7`; ox*2l/}k_k/X/}K^UW]uh/^կ~u5XokݸHO}S\rIWo|ԣ7j6t#8}џ=>|;ޱk}mo|3}N;aʂ_veg͖){gu!HȒjra0 E38{u9ӌ-rd)㎿w"I?fm~wlX3l6Xy 81W\xǺ?ο7Acho~=&Fm~-_RW}cǍ'\e 2Qgu=[okcHhW\s͈k^Ұ @|NˤdKFsSvٟ=W]PIGY +'YvD"0i*^ZF\-ok1,]CPto?42]YΑ PDMoꡫ<~pS?OoH[m:pn$Z'U@2aP]tm629L_|eĦ-?~?k`mځ8Jᦛn T,mLGZ]y啿;5!

    r!wy簲^~Ԅ@@̐@u3Co??.z'׋/&*Bg}[֮M"܄^j'ޫmt? pU; rZbwᆵ{놩$Ķ348ꨣ# ɰ\U)OoB B B B`2nbu?qP( 6Fx6q(V(?OzUtuVo~i SbKU?Щ5E/:s9[_TMilbOvVmB@@@4e*W'=y/TNvU@{96jMꫯfs'Þgj뜱loBa֠2l2vVujart-sSnBR@@LC ޺iULδ 5 Z׾VN{uYm{)WsB5twNE_Wwn3̶I77G WK[xU!Gnv;bԧ>*gjx]vq0]w<@@@ecjb;gGTЇ> w~ rձ`w>پcdd–N:"z{JlJo]n5x=wM:å !xf*f! OI ai>KdZ=2/o GC/jN>l?NS '?y~&0l):hbVx+ć{JRuܹz׻oS!ȰG>,0Miɛ|(Hbp{oYaQb[7Coݙ~5,oL<|r@4o4p*&B6 USO˭VYedU;I F?D}|$ӧ?ÆO|ݪհhgozӛCN7׵e_pꩧr׿m0Cjuy'#lpX㮑WOv0 &k;.2!!ny3a-;GUʼn4ICW=o+\d٬;*ۨwq< j,NWUurA-і_=- z?m>sYwHn]JQ\w$f~mDǺqmoC>}C !!!/0€Z2L$u7/ t+GwqǮٍ6hjd!,Ls0w}5.il2x@=dp=9|9xբk[u2:QRFrGhN$垀  Vx{"׍$GE~ ʌ+cɫ[ :tW!X֩^eyaշ sVp$nu50Іscwz.>P44UW]u؃K_Rd3&ҠUovPS~us%0o|l!I`UT2l(K>8#FhJW،xDXΑ ӳ@i*s]QR {nd!R&,<jwfv86mNk&9t˄@ r,^ΞqD\R2{& |+_QdIзumIUsTa }k;k>X6.n! T˒3^6e[>)_[ jՐЇ>ԝXᣎ:뮫Ju >k#8'Zx/{ˬ͎+,y..*ăN#] !;x_0_ ʑnÓ䥞q1. rNek??S08:8Ⲫsz$Jcݹ 2==SjRw]3^z}y$[xCT"'u{Y۩GC Xydy @K_ FȒ-ܲ?P!xzEm{8oPhhD맔BGtU]O3_^g$Q#'L|__YOڔqߥlՉ(<Ca.+esl/8Ԛ ;wRµ!3|No!(P=ig֊0RYruv޵ϱ6&jy,Fj;C+XxKѴCWZfu[;xY3P׸6` nh]C@~sU0|Gs1z-Jo!lޱho$^[ $0GNfJ!!0&_? 4cElmv>K!;}Z˿[ofǥ(t(V*ozCX‘mю mQ%"6tSsJ 6}Ҋ'x+vuW !³}p[Gw\{>3,-ui(!0i⭛_$Yqa޺i } {7g"9Hr`m:AO򕯔!:ܩye@,[-[=B`"vwl'@i٢Ι[m(E%Ғj8W4=X6h]zطN{_nYcg!!!Drl =X$\kl :aF8%:u)ȍNe]v7k^bgy桇K;Ix2@u+((N8SNkzD:[)?r-ԉt/i^5zx^x!; ,[0^ӕ cݣW Vdg}{B$Yk.v};C  -?S&+j U¡xa1vq|opsTWS06ɫw}f6ˍ]O~Ҳ:Ht.*kx!ozӛdHxqCHDNʪ$v )wv Y [29>.9H:u~3JN̩GF C &@udV!fm~Dc9>!';թ EzWOu/؛9=y_,1vvv 1g[p~Dt$\l+t("{H}%{3;:O gVЩ}aWJE ޺džH67x>19#|y睥hb{JսcaVj\#o9.6xcb61]z饔C*֕G7@@@'KH꫃G)ՒbPj_cb,^}s O~zի^Z?~Y!x떿,3!Lԩgy/j-–-K2ӶG~pX5%w8gLƚ=cefY=|͝h怶a۔C B B B`#@|;ԧ>ʞ :׋:8#m)Jl0sΞ#$$qKmC$tڿT !ː@e?C@!{XF'z'ڨ?@ ָ{srQ\ mtayh9l 뮻ȸ ,Qeq9#-;蠃$z+K*W@@@,_+~Нp rJ0 Dj'-/FΫdUU_o(WS5?q%V}Od筳XJyN˞H X[3t.zwYl(mh3ct v9u}w͈}/Βx!r 73cF:1VYoq<@@@$ gWSMR.jB.'y7W*xzidND&l"kOaZ?ztfOᗾsmf7F!!0⭛O+B` ط(';8~x%O['AGɕAQ?4y-c ∥G]e22/X^g?OD5^2_]~a)@@@H[0~ !Ԣmjyd{$PUWC5Unaz8Xamfg=C}]>8YaHy y o<@!Aug?Y{;kU&QIW_"5Ǎ9ơO} yJNͰ\m +K8VeTW#ͻ4Д 뮻:!hJW8K&Q6KFFN!B B B`X(g:dy''K$첋W zMܔ2LZu/V_]oogzv2+݁tbg>=K贙!0?⭛%B` [vst@W5-](K \Y"]?w}Wq<~%NoVK+4t@5P}o[.x\_c5<u+;Pw~GϩQ7DC B` [70U2 T/r#MRjW/NY y-Jս㱭3n*rjzJoeVw5a'`л|vyDK/u25/m!!!!Lá^gyTR_mfɈ*ԭƵS}KTY5gY֫,3u1V#+8JYR[2Oq~܎>vC!t!0)墋.sAEU-@⧓ Υ~Dbwo82~W=+Xz ͍ -ok:ZPbKb#:QeDV[&#c-$`kj /S_WYeyv.3W^P׫*"8f8~W#uz",{wws峳Tf3q ].!0@f9Q֩vy#DtUX8kY-J}$[jrh\: -Ltw= Pmwv0Ȱ[l'>h {@@@5ZŖVcEz`7ލ7ޘ sjfJ-%ޖWeOSS#ψeUNoYpKb7:LIOɾ/iaۚR!!0YGC 斀 PN?޹]5hN.:2ʻ<麅Knh0omnx}pm5Uv^ ;]_G)rމ:5yK^=iÉMS/<ͬz#ZG'Jo$B B &vBoes1vm/HlOeݙt.<m9l}*t}[Xُ0zfvڮ$6GPگtb2̞¯{hVnqSO ޺0=@oRHR_/o5Q,7[rouWަz-m׏e;W8eoݪ m}S&lfw9 ,']WW>y<@nV0% 1tYOwgX^WI}YR`%+ePk\LvD~=a+P YfUޏ԰`|:묳[aw]wnD}kVs4X&z6F}LrrH}3t{;xf(4/5^+P -uX2PYWunհO-}Igv&%ԧXt蹇@nvy'7'O>nf(NZxscIwx+N7W)g?y껟_4<u_[NI2Q lz7ԇ@@@XSN9EHaH,yI}[niyHU3Ƙ{+nޅ~E M jr-}Ϫ>gwoF ^|[^99vu6ڥ+B B B f)h2좋.je?ծ׵^hWCd2XidظNuݧJ׈%#g>z)!fG߰nQb>[IN:e5b!!0⭛E*B`C rqǝy#zk\*I%BU:ldWVazٰ۪gCK0eSZjEMJ n62MؑI!!!`%}cy:Ցլ0^ZIKʼn+=lS'K.X@VG9%2/2!a [DD1o|s7=ؓN:Zᝑj#+V)pfæMjbVñܯhɾ}rȥ;--3 ^24R!!!EJɧ6 ZJo?bLhig(fѲeCe7:n/zb2VYe_:"; S o\PM! vxȕ`UW]fr]J"lDyUeܖռUfݶ-0K2^%LZOg_IzwM6 ?uƭ Nj@@@৳vڬS6l3yu6 E\-{,TV##}ae1\b5K$6GtN0;ì^ {@xl X:kRrvyg\e DVfpnW%ڰv+5вͦgY:_@E}7~68eϰ&W@@@<r)Oy.H}u*"Wە0yR[μR+êCL !Cf|O}Sj+blM_T@nئPr:,JQH51geTP*;*>Jm˸vW϶Tae>{CKiv/alv22꼇)@@@,-_^ǫ^ki/ᙋ+!WEvJ9zaڣLU6+h!CͧڲY\=tٸI} ~n"^u '<o@&xff 9I{lcZ9rKB,9B RWzRlae9ԅ>-kv)7$ Da'ֈW34/{ϼ觟}G>qyl:B B VlY??oZm=}V|cVpG :ⷲNޫeR&Z$Gm[ 0 ۇP}$G?⧣|-XN}5 @nN H|׻e3^RW+6I[ jʉR mW3۲ݔ{z;W5z?[vC$nv+ \L'Ws>aݫ~XZNrSο$ʻ:ilw[]B@@3m3}}_՟ݸVby<g! kh iO{V6.U鮡h-4|82H^UM׷e6ʞFYke6^ dWL!B @us6݆@,<У:)\u w]LոdJʪBWv}xl7gЕe8anyOۚJ}#w!~N;Ւ: ~Q7WX#6w SNR[x:'cY O*C B ?=??c_n]4ɉ'(b*vH4 e[Z 3;fD~W]_uo 3e׫^XΤ聜qsSKsJpKb鳯Q`&E"t!0#/>qc4P DڱPK"\=^[o^F,G*+w>ݫmgb/rm\hRIZR\8 fћʓW,UWŝ?k߶GNf!!X{/t77n? ك^}H_=ػ{?w{T2 B%ÑazW\RC%#_}~g?ٖXS]u!ϩNFjT &o\N!F@DF۬Zpڸ4S[-^uvIYuCKڸWtMNUs3:nL\(նweW?b6Qw8-?xKA@@q±~_e>~ǿx5dn2d'o?7U,_/~0w53/ʤtf -m*ySReHeye}XVfU 8LO5UuHW <`  o|P!M^!sN=::iUě"nE+376 n yxrЋNCJ~NO9;vˋlMhv =MzL'S "'{k&~?ϼ!; Yꂉnd4qڕ===aɤDZW_g6l!-x<'I9GAy24,OrK obkH}A옂!aŊH}.!n!c< ֕U)y@"+Y۶m1ٳgx 6?A"d);ݔX֌׃ٳE:n?'4LgNc8ObŮ!bKig7J9A@(bdȮ {ɂ_MpB;~ygpODӮ<uHPd޽{hqyƸ% D+uwwƌիW22n3wRCeDzhr$A@A@!2Cq"u ۷oGI;0E rXJxoV&+"Ø-!&s>Pȹuca7!]FUzXx6),N)Z q)Qez{zuu%J  #'890\lآ-9[=}߾s^[48f Jɴ  Z!sv:9;0 1y'CðjP "3*arS+66u# ``b]8' Rg@5Չ rGނ ֕U)E!C[m/رc֭֭]86gdd u12 ]JD,(I_x8 o"Z.4R_qr.@;,y$)i\iմMb>gQݧR߶ +.[sA@CGwͰ`[GF咙HƤ@V7J 0D_~7(MYYl`vi,*Jj+mpɒ<: %C+8b:| {< $[XPO-@h] 0 -aGP'@:::|rG UfJ֠7W i}U%d >;|0\`V©@|8GϢjH@y=Zcn&*n^}-^# &DBu`[NX\S,> 鿦@^ ^[SAuka{MMMT{)VTAoI<;fAV/*aE륗^^\UUV-Y*!M#@h] 0i~B (`G(h3o<xМPi NX*j* JԡB |.Ys81\>+W,@_ROS쇏Ӆ{ՅmWA@(5ơxͷ|<ȋ0s6#$:Z*Q0$Κ5 4 u &%`;'O&˸6V $lHaK9EuJUBSEA`h4)A`'Cb,x!r ñM6A@쐑+єT2YԱ.៕ЅK$@aQ1;0ZpDbbB7zPIpei.]_?ͷNk/m|A@N:omm5Ӿ><{] q<i,{2L!N!EpW_ݲe 8|YUa. 4/P @`;P; x`VoCe>c% ӎDRq('$0B&ELq͚58ņk---pʺQ|!n}E2D 1@^Cb0n j*e>%o>K{V-kjuzi  l6JTU\Y;rtk+V;grpr/o~馛g<~a3 {7o^v-0lOJab$taF~CnRH_b! J_tE`@-:(HfMYPm (2 F@uFX [B ,aW!#Et|3ћd'$c̋MbiB"XWWg󥾪!yf >2w89݄,coM[\.ֲ.{TY  P]`{ V_`q:J l6.GéRO7eD61(fDDpJPMa$b\XI` 6p앂qM|;<\%z \ {AE@uӋxiF t 1svNvLlOGxz^P>%ٰ%!02IG:*H-cy9:VJUg@6U y*Z:)kA@x!P]2^OMS9}]=EMa0GxG0$VrX2Kp'^z饾>p @IDAT•+Wb1t_m4oeAQRgo" 0HnW P*@AiYn!;0HB"kx,4|9QB)\86-QGS7  SCuϼ6vjG uѢE8dh.x: X5)SŒ 0]ȫغu+ꐠ@UـirU|G! Ѻ+nA` aQ3vC6\@kdq6vn@o4 [;q*4$\ 8R;~jJQA@A@S8Q|;z_l= cA;Fgcp% $0-)722/Z DWA9 T% AA@N ;58, `)ZDаъY^+x^AoTޅ7~f3R_Q&U đ:$"niu)a)xM8FaHEfB\r fC|`bˆdXX NSY  Ѻ?JAx?CxbD@(1gYb|Ke$*炞d( <+_7n܈bȽJ3\4;vBTʅ𤆁BAA@I!R"bDMYfM_|g}\pS6T*xIx<{nxÍ8OWa>a4;{&zh\a|mKA@JDJ CoP1J ipsٴi&8gx!y\RB(x߁zy@udX̂Te6`AM`  'x?PΈ ȁ9;a7oCBax@o4 CE< 0Dph:jB XMCC%[R# ѺR#,A`bX_/buܹ(9X>θː0Li<E۹`xJ!;0(Κ1˹5LHdA@A@0 fGCa@qd^|HȥaOǡ a"އ:Cszi%NM- ӅD I#SAIG #Q$(avQ]q:p^x^r%4)fGhRD"aמ_0'0. Te6c% (V@;dtKSA@Q2A>cPDtޠ$'8 wg!1կ~$`bapB{x' aÆ#Gb\HW5]5Ul[0BGR# ѺR#,A /DP*pR'&uSyCUi (Lj١cqP{p u:te=Mʹ`NFճLdiI=6VP. AA@) @Lۖ۶mYg|]&N*31!HN C=;$0xFڵkaV__ַLZ< 2*ua,4XW[N RB@uӅ \* !(Ro>VD{YXD@  ʈZUA o<`/$4@mPLɂW,N&2)ә??ؘ =Ne_:'12- #-4vCCCa *D*Q'GÐcza]}GyW.RD񈆁 A0 ƌbٲe\dBe`UX&!Jhil/2Hƹ@LZ6Nh)iŸSFA@JbIg炀 "R><8g@QWbgc>av8\qc,{1FF6 b/8L%KT2 xԊJɩns:Nw嵹P]*HD¡X8MV:b$a]j/)- dP[b1$:GQ^d׍.҃V!<k!4 QfX۷'^@%ѭ0%,CH%bŜ㈠av}.4̒dd<#P,cZH}5a6l @@u@U| $`^hv\(vwqNZA˜.i/Bb_ye b4nM@Du< +YT"nʤ\mkj2>`(W_zVsOgOW0%i7xun{ORpZOCO\ůJ,A@R pW:xVao00AeG"t0*++l"IpG )aj$@2vDÜB4,H$Hs0nuuh<ó9To$~8 @Ih]I炀 0A 7"N؁)f!^gR*e)3ed 1;$ƮYnqs6lͽnLu2 B'QK*VYQ2knYUm]{ۇC|: ]MehoUMCK[ۼ g/^ ;|p``8֯g zA,,lv ]A@AGy0"ac):6 08 ;.0>-܂#u0\:Ya鄯8s^eMp`߽{wюT6իW/;眹g-ox$u+<ܻg$0RVW`Lˠe{z"us5W/?yKVtڷ'YV>:K)# ѺiT $PbsՁCv uBV,edRmd8g ,A4)a=qDP.L͜=?e<ȣ{ߋoﱘ6ŋeK4.itc]L:~cϽ;^w]d 7tuלb}wvnk6+UgoAA@D2RA#>ѺQO Ia|r8)L`QIIJаtt}Z~Ϯ>nJ%^)i\ajOL&z)xz)1;>/p14 NQAį üĻ}]$ƺƒடߵRɘb,o=Пٗ)s;v{R^%#8pCgҦtp^L- ֝e A@ȋh@xvp ыd$225pO^<܄) 2F*U ̨m8yw}Gv$jq:\IfNSѤb7Tב{~qh0uD";`s"xvH/oA@A@xXׁ]0 _ *lyC5=sxT&<*lBQdV{Z$G?4vL#f1Ufæɒ4a?:{~~f(|]AiG@u8I !h]FjmB %bvz3>" U.RG=VS?{̅PLq:-TG&.Ì& 9LC?@}Cí}h Gv-H  SAI `\4 ܌\a4  a;AEOݤ4fbծb<~y`%ƞy  Pjl] P\: Ķ@-h"pD( ;/uA$B MSmQ^FF+*N#ēXEhaLaIu>˻^tI;1GhGҲ-@h] P,*QcVCxáCA/"CFz? ^ !:$<~DW^9Nw4#JF"@45FG=pܕ|z0-uy sH|fujŭU:CѤ볚ms807z-oQz,A@Ad!Cri4`\LrX0/ gtt1;p0az19tot*ᗯ!JFG@$mhJ$S4豚Z8 Hw^CEC̅qfi;a2NՔnvt(Jh5-f4$:{;w-[zv7YUxx,^ނ ֕Xq+SA AzR7H Br`T zE0k=t:*MCF;#}^B$d@h2ؐa`bc8֩栗¼hrf4\}#^\rmb׷mNg6JkiɬѰd2'p0ųj]/[LY3fK:ʖȸm U|[! Ѻ+nA( u|)#lՂ "&dE(#y1cAu2v-W״؝9HG@:ubPW$J mBWRƴraO SW\=, =N Ƣ]H`hQsYO Ɛkǎw .704o  prLa6p*a!<ޠa iE mmm-0zɰS9 `(ҫӪ,kɩ6;hXllT_O7.G#=`T\,824jsAX0]6 6k8WeLM309,apZ-EADHD[A@f?o„@ /<`oFD(fH,YX+jY(@L¸,]-\]!JOeveM%шk#Nbx]6zUޚ !qysԳa;)$@M&4ovA@A@ >b X:"xs0iaxÍ\Z)ein{+0`4,cr:Q\恇ǃ3ze>si[+s5[l%͈ j?- %D@2aKe?F\, `a&ਝ:΢*p"DiEZnHv:6n{f՗!Ў.-2L"Nc7'F#35^wЂ;<0EPrh|%  'N 0"Z ;IC@ 4*ojb|2/ `~YZ= KuwDfTj4҅=[-AVa2md5{!mv8OxDbt$xTAHsA@53B͵H ȹc8vtx83B_@^L*[Ax#MDz &:#M~w[]Ym:.GC`!^ ($pvgoǑƕ_4QM݌EpКj;e^ T; y=1\3v8sL&|ތچd=ݝdpL   `]I0ĹUiyI\AhX|ٳ؋V-2#aZM p#X*%PN+6 cG[kgTB2" qFZbUn8Iv@Q6VA`hݴC*A`8rA]$O|MYP XI隓J7l{+~zS؈hɬtdI-C« .̘bhd 6.\_2Ș]K^veHbwZ,Y$A@A@, 6Lea.s=P/WWev*Yf̬P<kh\uׯG&Y\ ՔnA4VawPt8== h(ێ d"`3/l>dulaX‰/Ue|A@v$v! P,*xþ:5]aEn!ʬdy2O?tiD괓t8`nMQ.Eˇ=,Yh;4 E1-dTp军o]C#g`{xcA@A`^A<'SvA^.Rg& @"TOW*kfMvi@LVD'[iiP44fLU u6M0NX @h] Po2zxQ+lPrBJUAmCv`~m2;i84)H,&E.KCwVc6*<*7CX ik/P0p`ϮxwٴC{هɢ%oA@A@&3~tzn6(K]o6$ԥ*y)ohۜ~_WEo7%RP}0VmkF)v+W_uM7!gb׶i=8['Gނ ֕^q. @+׈s{)lP]4\dYTbÝvl[?+$"xve 1DK֗]sN,m1<` TN)wW; [?[:uZMp[nA@A`R0э]5B6YVU6[Pd\$ ᐩ \&,@Vo 3k/m˔(NR)i BpX*ɷ:Zn!x@\qf}͸,+ԑm5msk&-GĤԴ˖hA@AdQ * s+;~T],XZUvȑJ3oO <~C*#e4,cS˯XG  LԂzmxpIU%˪V[c ߺ暫,vo$>ղfаW^tN57V\vZwV[qlUeM^} }o- [/{{8jۉ- $ZwJ`IA ?_YMsTj/ Q5$CJ٘ؐ7.l7} s+b({z~K[<.K& l_ep4W]+s.ppd ;`(YgAH^  $1a^9T{iacV†$Hbwϯm~@eu} D'z֫mp8X8fF-+ۣ /7v} W_Jv[?yxr`-`m aYBJH5 @La+A`b쇓/XH^U2 `(J4x[ޮMO>|46~se?ڴuǞ~#ȱ@{}yccZCUY9j5l7;nygϗ^Zhπ(ل\3^^ZKĐGA@iBL@^}o63GT3z eUcF/[luM\p?O߸9aP4~02^[[P" Իmv]շ|VVUY̙k{* W6}i7RdHEA ѺSL!fv>Ra,pE W|yW}=u 'c;90Ƌ8h<qy V_}um].~: ]QãE#bSWke[!"   PC!@CPڰ|l`8J]j[PG< fξڷ\we[gH4uBat>]u7̘p8F{7{@̰= \LAZYd W[4A@$Z7-0A@" =XB,@7[7jl*IFƪg8wko]WpsOgǾ=>7FN_>{ּygZqo~iWsqlyY0^Q   P<XxC%`8c0.TN&50O0|xU>vH8ls++Z[g.XxFk,0To 4<,VS[=Y`o,Ts,8tpM[lP䯩Nj%2Xhtdh`xpoh(ndIePEOkb(N5[Bi   0Y@-]2}2q,^}Ӥ< Ű2(6gk{i+hihlU]^bORP8z&fhVN[b \+]7քnA`zh)A`!Lǫd(fE:Q \*uX:x@nqZv;ሜSYÂ2^ go9|Nl,>AA@"O.GB;Eפ Ua?C?Y0s}ꒉPefu8lLÒBҫbhtD]e6Y- jvN7U'%y  PR$ZWRxŹ x ½/lB&i.Ufo\AWTTmL:t-4'S4tLEVYt`J,/- I"`.ԅ 44 (YY̖Dֳzkm("p%` OK]oF׽班xj?!542 -zyg$Sl2%RZ[Yq)^LrUΚ?-n̜/jeZ{"mg5]ѰTve^ѝX9gjGF Ӌ@aF?܋%6(܋U~r>ֳzk 2b4LǷKaFƪ6 K% %@@u%U\ d cM>bģ r{a9GS'YR[(咥:yK$%$YVH)7Xc_ICV_YpCHttw-eW_tuɞο-{Ɋ7h.K_aKO}GH]΅3tG|Y27ݺ3ۧ Ejor7OWKHT @YS140%=4 dfJulQVaRi8$se["`)wq.@'!MܥzPe2P5,Ф.dY5)eY ~T%A#ϛӛPp<̛ pA$HFGxT-].T9tT" o\TYGTNuIT],:%7 06ӹ%*QG" Ѻ+A`<錈 FC{p4SN>$;FO+hQǒyiiݷֽN)MA=n0Ge f5Y,N[,s]0L;KA3@LCC7T%d]u&b6,J2NihIJ@HgSEAH&lI炀 01쇳'0^g@M2P gKIPFU٘T5lÂj*UYuBCA 5@:"ր@zN6aGPKUNofD:&z4+&SUeᅝdo5Y?򗾷Fݟfvu+Olxy¯lW}#֊2șT?d{}_r`V]IM?f::Ȃ"|0u% $>WP£wRUcCPz#@h]ς 0`<~ʵSɐJْ 35y sY,Ldu+!2۰`ث*u2}}yyG>򑚚 >ޔH!0Z8w_|5ًkSlM_Vmu3_>B-^@Vz?/6|SuW{ɹgKsX>Y{R@BP+ jN{sU}Fef=>J1s\KWotu@IDAT}ElTٛ޲qOW5t׃-;[nb.ig̮row[kڬ _&e/p@ٺ358i勫?^rO~#7D.ױMg6V7]z'mV} ;{' lZz4Bn}u][V#Cѱ_{kPTnt&yݿ'?=~_RL@(7~#.{UvRnVkg?DGgq%N?(X7fvo9dJs[aY]L[G 濰W$ +2V/NՕ7.l[Qlov~ NMmyoQc=^D#렦d@TC|5T]RC]jskÙ8|ѧ_h]h#DXt|C p [`83YP؀{1<~p+R,fGճϥ \:J =@A@JDJx !z o *ٞjX ,XUr[9WVl&uS+ #74ˉOu< fEdI|HTMτ"GKO~΅ʯ$5D )̂O{sGb/t6ŗ'i϶VUIJG i(HQH@ͨRӡ [*bƳ]M^4Vd_j5Ԙ"Sl^G&H[v^Zeي$auNaO} BU#ٳrIͫuSYt8!g^ȍk$#Ɗ\uK# %/)ϫ:M.\]*US3(u #@h]ς 01a>JTApe=2WV5MXjX ʪy >t s)GsZǚvfE`ak{պ4'dM(LcD{[B$썅/C_߀W}#?| Ѻ>HPN=MG,&=BoEFjVA:zGO(yѺ'#xW{Fsqdwij&RI,\&v6NEjhC? AձhrJTI>?koZ\!k]5mPw0C^D)45R*6B ~Bl߻1 *`/1[ TdԆP gȘfAN@ݸBNdjٿj؎H0zwu;Gu:D0N֍EP%,q%H[/EA7u왬)c֍>KO[p*nu;XF(0&Oʱ|ӋLv\gG@I`ϪX>^2$"0 CR* yTiU.U2 l dQ0 RճA@(+S&!azg E膣I<@Э4gA56TL͒]y xh Wos,Ly.^Ɇ40;|t~~{ZXxGPN)S`kSw?j&q/m,Fp9sny7*;p6r6(ۨ´*}O1y e$c0(t8"zЪ .+)D;DrV njpO+_f"3UXG&PzkXn5Msn++sUog(Ƈvq)GD-1 .AOg|G ЏU!W:>| -'z%E.``>I8%o保r&e;\,Kl=S R(v˂ɺKSiD@uI#2!ug8CP62,s؀Sq:NJf@ N:Q 7}4];b?ZUOg2HI%TE^!+/@TH!?2 FRj`_St-䦠"py=pM(a!崬+?^vM- -mu~Uo J7_ٗValfWgY c2sSI+l3z:8Y9}?u#Y> oϥt$}__s}m6Oc7}nM ZDrtJ1ԉT}ir+-FgMuA& PR$ZWRxŹ LʇTk&dh@T4X ul6ZI$Fx@oo X,HRd6C Yc&>Ht<OeeeMM tY$ ;dopY$ /a9B?Dѧ7@ȯ S߇Jk"t/hg׋ ANml0jعΘ]K'n>AZ?_á?;(;򟣯?@Χ*g;g1Yɿ%GL4~SS,'goo_>.~_ud`}_Iu] 8YY$VcEAou!D{Zvv"ɍnM+K@N =|Y-yה5,Dq>|@ 0j 4 5Rı 4?gsB`i+غ()~Ǯ 5j?/ |nȉ~8>+Йhv̜naG}Ԁo%؏ "q+RF*t~毿MJ8ʖ$LmUUl}?_;[Q2|ճ~TDGnj3ınN o|{YwȜG#JRG)Oνdu8_sqO("]b(859ĵ6r?pu.MGٶkciS6dn=|\w$LʞEN1DcT4CU:yxsOoҨ u}=<=WLMWRGARmc;vBd#/tn|~u-;)/M\I ;&{"/͟R]K[3XgIڰ욋q.#/C= x8 O ׏`Aǎ{嗷n݊RP# h8Ap;X*"n>[ haNA ؝%b ޽8^7]}#\" 0`W  P<;E.("dĮ!^;k?DHDbhb&</R^XcXsH%H"F$9'뜜u{޽zg[joT[&qvr-\%t=D׸ĺRK-\n%c!1Sb5Y6"eD_} &($aGlr~^{뭷^fe7``u.4߾4C?a-)[VAdf>9nzȠxws7iN1o|~ y̗QnNWT`CBx3y!}퐓\kg鑗y ižoJ|p׃U\x!]un/(x׾ WWl[EFm6?[JJqiV WXEn܀||yX݂+zϑ}$^:WyYwS^$^1o[PG g00SxDCvƻ iŊ[&v;z4DB.rrV0HDXH?8uCѶ(cdXHZgʯ aس>˄'\Ce9qƱQF,+ +׍+@[L$A46~xUW]uebf,[vnI}W]uݸlKlW_\sMy1Jl]{9Ji;^S!@L~t#`}FЌGQLm6O^LUGރ PEGEϸw?F3&羫:6U7'_7\k7͗m/dz' *3w~w‘xVrKz3Ïyr̸|~4;[ǭos_V?LUݔ}{yCߜpĩu{ >2s\!M[VZ&$f<|"l&(L |n#6U,?,B=#&|aeyy <˹Ifkj7{rw*kzD<?RU#FЗ@0~K.Dlȑ# B> 1d 5VP bX.yѹ$# {g^~eotfڝP<uٺe@DHDD]Dc/C'1T-FE.;h]+oDT9 ,abz+Zk>$]6bEjI^2˛vsԌrl-m! 8Z1W[nmن!i/ky駇N0?X1_Red{׌v==i9}$ma]L弰g_IʀѲ.;s/e,Eu oRɺٴXeZQwoʽ#b!e;R(ХV=]Xopw!OZ*1oKx:ź?IM,h- 0{smyW/N '6i7^?tK\m@h?IGX+;{_ߣՕ[\¸<[7`*e˻ڜ^s7Ց#eW 6KH=y?_=qgvH\CkF]_No5m|ϼ4qZkDԃ5odEЍSm ](KNon3-:Y!]؄Rv!Da]z)zZb%H:= { 4 $X@O$%y|EeZEBG v*°&8BY˱$+Js̘1gy& +DcGlAtuNP@k<$qiGH=sU$s͗GZߥr(cce;&lBy7v5^Rr*Fe^&b| *'~3tLzx#ު+֒KBʥS:cJqK~\ҰwYZUB0u Of7ߜ0Vߍ !V'B4%U<Bi^)k[3)9[{o=fm\Ce>;yP@N0[nP!OQ,DLBȸ*2&~DZRn[Pz*m2/ n:a$.rұVS|Y Bjd/[Ħy4%3sL3InvliSXNh#DHإJ]{#^Y+ͮvsu77Rac[ OJnf7w{Vڀt- #4'IKjH-'EhN\:e'QtJpO9ڮ֪.d|+>l"C@a)pѕ@x%GGEl3iLjO\%Ф=h8d=uK.KV  4qtr. ͊g0l%xYo~|5?g`ڹ0&s;VA1#_}9aodQY䈸c*R: tn/Og|43AKEK& WEøԘSIT8<Ω>DBZR]H@C DP Axy&Ƅ!tz0X-)vͱD!*^1h1Xڋ!ʦ#slQFrwز (@ tR7p{(?ӈ )3t1" !Bx˲9|DFWފQT,IB*Mb2exxf!KxD}r ̻nZ^5*Tfk[¦R(P-*ėƿ6З.t/6pFr׿S~weEK<꣘⤥ߞ2C/Nw{ 77|5pG/6駳oZG鬏uo 7'jæiB87Wv{@)v]}?6~>jKMARm@ P:E֪>XƒT!/r`,!ƃ͈XN,⥈ZƃUxvA!ry065=X;化Pu:T@ < 1+$yTb<"(L#2!#ZFwYFR[3;uFxG͊6f* ii({Oefn|qᇯia^%X̪kAf}q#׏Io1[ ?SP/]+XV@:Ql]'b)A(Ga:a&~72H~)P^xȸFw)Ԉ|Vn%X2m'i'|2/=kD94d'2[طga+?&Us (U=秘Yc.6@{URK "!NWLFiNaFr:P,ro /+*axS# (Cff(@# Ńۍ10{?mȸ2q:D`تAsVCqirKF<䓱mݶٖC=ua/'#=zh3rfjE`}8A}mg?yoZA.ΪK Sh(EtDDtQGNΎK5t# &\njB+) ]=Y2oePa5G+=viǔxgQ!_m9ZKj.]1/>9zt7n3QQ-uZ%VLmH/S#O>4hmz1n8[љ6n`؏~^{Pv@/|́nH Q&p9ϦE<B90(A嫳F5vLxǿ Dk10cS`<&$P@ y Ԩ~E %#g SGl  \rIfQPYT‡~?Dj.uT=R{ovɷT64̙߂[(!Kwnٵ;mO<7a6۬Ջfj݊ 6-ioc^;3sڵ!vѮPHclji i%NTɓ*h@i<"(=|g̘Q0 RiӦcb|gH0?_Dk|H zŗ:LD}:_sA4tsk&V#nVI"egUO N<@uY.!07t~{~i zi=rv7Ilcd+abi̪ƕXȦƌÜk\r # rc)XD>kZ{^" k![l)Lw' <0nVK:pp>\]w%eLaʔ)T G! gKM>D 4Y0~d%袋B5YG&)-! ?zdvˀ&Rɍ)M>`1pZT-.L 4ٺF=vA3#<%ά=q!\a]h)tKfyCy:sƹkLfǍ8y ,v(PQ1_Nzo5{*~ULz/jt_XuȘpjYBSǴ: 8̃߇C=| osIQ$Nle2n25R9$to?}yb;9[Ӿ8%+@8}{.]VzG^Ry! _jȦҝ=ͶV.՟}/W`k{0Z;C򏵢rV% Խ?,\7ӟӟ_8d_zwx!)r,! ck| d:?s! -,Xmf*nEeP hǫ@O`hҩO?3O2?iqFM95H"$)XΠg/Y=ṯX% *P5Y,m1e[Ҏ:(CQf8*¢>=bb,qX_NHzr~NZw-LB)/$`Bre,䙄#)pԝ,aS1<ޘ)0uȅ fk_:t(ǞZ<IVxJD20 .򿷴S0;0̆1B[vһhbN8_s9O]& /WH0ä0=Ivt|&# c~~ T@2֕6Pl07F.Gcr+l.]`ʙ'r)F1=圗ry/8'&)]ԌJEB ($Zznq:2h) O\o:KPF(&#;^|yǫɾxI*.iщ%=/`l(l9e"DU(2?}9%njx>,Eu%3R* 'ziv~v0fʫEzIwHձ$ϳumڎS`Fps:!Iyry`Ư0Wa_]b3~ëӒT#ˍx (@0[(,(@Ё9L#ܧv1癫!N Z)Ku,\Lhȭ -Rq&gG(sئ (Кso1I~-e"W/_WzdDUJeꨟ)|k_|jJMͶ.Po%em$4 \?T?pm:nBb]@c N7:2c&>E7x#WRA͂aIO=q!d B*7Q+f*@|#⋟tI ~FoF <)#G2 I7֢:2Ĕ\e`,g t3͑֊Bz={}z_[_rZV~uHpX@IBx/S޿c-Jkﮫ3UQzI*TYm*0ǗWT(;=Vlʗ ̥@–+92{@~Ojf;ѝ4=_"aÇjI;,.𤸫0 ԙٺ:@=jX:/94kb|AfvmƘpccwP:iu´ hpDeZ콸o}]Utq$<.nJ:1q~Xq e$\:֒Us_Χ竾-l෬$m7$IDATN`-0bd]Z¿tfD-â/䒴Å-ε-tٿP;g°1c/<9&'ܖ[nX.)ޢԹ }Z hgTp]Q'uiz, ԃ;#Y+6oᆇztgM #> ybaZŠ$⵸Z_|CHHw8Kľۅ[! ,Ms6lZHX$e[qK40ַb!toTB7$~8@zd6fHNMfZP+9>cH 7>,Ia]؂(І@ e2]w 91epwz q#`GQBz mR@Dl]|5&@o{]v~wGFbر `r|DFkGX$s:ߵZ_>xSN9֏()[aZ6pC:2=!eFll_FY_/U.ucKR9.0?+h5&aG?Oi>~ӝƩbҫt/vZXgiߺDa_X~o?tBK|rw}\XWX0,+6/kmS.W@Jl]C}$)4?H̝qrKr&]HSpG=0cN1b*P3/P HKQf*@hQG[h77Ypw饗r[OrUEVX[謗)[-ahX:p#=؃ 79f$Zms+UK~ *5, #;q sL $dKe (ІcO8QFŜJ_x뭷Ac[~Ry] 4| r Ԩ3a120B: :_as%lj=;i@,`*P3/P cEzviIl͢եfgEwTK.nJ+SN+zVuaUV +qI'k̟Sq]k}ֵrZ`I.\[xwy,0E:*s#`nn\WH(^'Gf8+J8{뭷Z2(qsvգy.-s7 ԲR ҍGJQc~8IXH'M74OW;CZ)_[`p78mBWHerFoq^?ŴP|pbx`:aN䅴OձrQ3=*y;O;b/d3cZkEyXNޡϮcZ 4,|Ϳo;8zFs|XmdNƳ/`q᲎ 4釲CVT异N56O?aXǹ+et3$taQ1p#~ _|ų:.LtQA[QFB~ZKs(`v . uGDLEW), :8S>0+*l)_fI22ML!2S?&NooXmZ;Zg!/RF*_iYTK픍ٙ?]f6a0.0bTbl*1L:WaO:$~ SXSQe ST-- y9ՌB,(oD͆}mm؏22jw @3<7 뮻RGTGe>7ߜ*tРfkb#3ʩPQ ]wwxZ1a9. EΣ ]V fk'~ j~׊?Hnc9mx6lXkh[`eaNaڑ.\e!#z駹I D)L0mIU@p$l>=Q3:FqD:q(PGOkgֆ;S~*T{1&z t+G6F5O>I8~ +ЩL@;:K " cNa10ѐHugP'(8 c7c.!1pk!, (GϭՕľܾ)}ͻsge]FE-w{kA-j\8;v,EM)2j Ġ)o L$s!#"Gȅ_ wBӾx̴L"#.$(ʤ̻l=Mnyl6캻T~~a#P S?"-|7?7!V VZi%v<"4b!Qaؽː8WU_~yޢ{XaŨ(laⱥ^D_2kB^o >Hʴc4$Jʽhh*e°x"1BG6K]z1IkF㷵>e{[׽]:_k;w}wұh2u\%d${9K`tGq/4?J Ubb8*ٷ;|q7=S .d.Zk-t 1by  ( tY;ӹKW;'0%a;:m馫1?|m5qDw]c A#tuLZe#d$4dƖr gG;Gɀ@a}ه+\}׈/$K7\ f9+^)A!Wk{͌%Zen-RG (u*@<_K)Lލa]܏h\' " #."ab?\^ "Ul!\s%Ky"1nmDhF#PdVb0".ZgFmlB)a)@ &*@%V[]q^x!C*aGH1QGEHwɦ^ @]Dܑ#HȖ㑶c\e!(.L\1"3PNXP@P@@A7|󫯾P0R!.̄qHl`<yvۭ:%`y 6>nɄzA"Gxa^}Q9 2QXL#PJiP@"`|6C 8cDq]n@?JY#-b5FaVC9px#XGU ( 4&?S]zܿudp t\X=5ڔ3h4LWGl]| -ETnz)vaFwwpU.nv a: ( (Pɸ}{|0=HF[2c+\ݶ (Pfjc (1rvܽ׾|pDǶZ ( (caXq4k*@c kݣV@KTP@P@zð7w (P[Vsm ( ( ( (u,`?\MP@P@P@PfsP@P@P@P@Xl] ( ( ( (@ * ( ( ( Աٺ:p=4P@P@P@P@0[WcU@P@P@P@cuuzh ( ( ( (5&`>0 ( ( ( (PfP@P@P@P@jLl]}`6WP@P@P@P) ( ( ( Ԙٺl ( ( ( (@ CS@P@P@P@1u5\P@P@P@P@:0[W ( ( ( (Pcfj ( ( ( (u,`?\MP@P@P@PfsP@P@P@P@Xl] ( ( ( (@ * ( ( ( Աٺ:p=4P@P@P@P@0[WcU@P@P@P@cuuzh ( ( ( (5&`>0 ( ( ( (PfP@P@P@P@jLl]}`6WP@P@P@P) ( ( ( Ԙٺl ( ( ( (@ CS@P@P@P@1u5\P@P@P@P@:0[W ( ( ( (Pcfj ( ( ( (u,`?\MP@P@P@PzX{m&0q{Zm{h8 w (#zb8|@ fJ@u 4 z! Ԅ@|mk6R?cL=0pף^uY`Wf͚U)PGD񵭣cPP@Z0vO˶6S P~i{p ( ( ( ( (PLLs ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PLl]1'k) ( ( ( (Pٺ݃ ( ( ( (s ( ( ( ( +=( ( ( ( (PL<fIENDB`barman-2.18/doc/barman-cloud-backup-delete.10000644000621200062120000001246514172556763017006 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-CLOUD\-BACKUP\-DELETE" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-cloud\-backup\-delete \- Delete backups stored in the Cloud .SH SYNOPSIS .PP barman\-cloud\-backup\-delete [\f[I]OPTIONS\f[]] \f[I]SOURCE_URL\f[] \f[I]SERVER_NAME\f[] .SH DESCRIPTION .PP This script can be used to delete backups previously made with the \f[C]barman\-cloud\-backup\f[] command. Currently AWS S3 and Azure Blob Storage are supported. .PP The target backups can be specified either using the backup ID (as returned by barman\-cloud\-backup\-list) or by retention policy. Retention policies are the same as those for Barman server and work as described in the Barman manual: all backups not required to meet the specified policy will be deleted. .PP When a backup is succesfully deleted any unused WALs associated with that backup are removed. WALs are only considered unused if: .IP "1." 3 There are no older backups than the deleted backup \f[I]or\f[] all older backups are archival backups. .IP "2." 3 The WALs pre\-date the begin_wal value of the oldest remaining backup. .IP "3." 3 The WALs are not required by any archival backups present in cloud storage. .PP Note: The deletion of each backup involves three separate delete requests to the cloud provider (once for the backup files, once for the backup.info file and once for any associated WALs). If you have a significant number of backups accumulated in cloud storage then deleting by retention policy could result in a large number of delete requests. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .SH POSITIONAL ARGUMENTS .TP .B SOURCE_URL URL of the cloud source, such as a bucket in AWS S3. For example: \f[C]s3://BUCKET_NAME/path/to/folder\f[] (where \f[C]BUCKET_NAME\f[] is the bucket you have created in AWS). .RS .RE .TP .B SERVER_NAME the name of the server as configured in Barman. .RS .RE .SH OPTIONS .TP .B \-h, \[en]help show a help message and exit .RS .RE .TP .B \-V, \[en]version show program's version number and exit .RS .RE .TP .B \-v, \[en]verbose increase output verbosity (e.g., \-vv is more than \-v) .RS .RE .TP .B \-q, \[en]quiet decrease output verbosity (e.g., \-qq is less than \-q) .RS .RE .TP .B \-t, \[en]test test connectivity to the cloud destination and exit .RS .RE .TP .B \-b \f[I]BACKUP_ID\f[], \[en]backup\-id \f[I]BACKUP_ID\f[] a valid Backup ID for a backup in cloud storage which is to be deleted .RS .RE .TP .B \-r \f[I]RETENTION_POLICY\f[], \[en]retention\-policy \f[I]RETENTION_POLICY\f[] used instead of \[en]backup\-id, a retention policy for selecting the backups to be deleted, e.g. \[lq]REDUNDANCY 3\[rq] or \[lq]RECOVERY WINDOW OF 2 WEEKS\[rq] .RS .RE .TP .B \[en]dry\-run run without actually deleting any objects while printing information about the objects which would be deleted to stdout .RS .RE .TP .B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage} the cloud provider to which the backup should be uploaded .RS .RE .TP .B \-P, \[en]profile profile name (e.g.\ INI section in AWS credentials file) .RS .RE .TP .B \[en]endpoint\-url override the default S3 URL construction mechanism by specifying an endpoint. .RS .RE .TP .B \[en]credential {azure\-cli,managed\-identity} optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. .RS .RE .SH REFERENCES .PP For Boto: .IP \[bu] 2 https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html .PP For AWS: .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-set\-up.html .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-started.html. .PP For Azure Blob Storage: .IP \[bu] 2 https://docs.microsoft.com/en\-us/azure/storage/blobs/authorize\-data\-operations\-cli#set\-environment\-variables\-for\-authorization\-parameters .IP \[bu] 2 https://docs.microsoft.com/en\-us/python/api/azure\-storage\-blob/?view=azure\-python .SH DEPENDENCIES .PP If using \f[C]\-\-cloud\-provider=aws\-s3\f[]: .IP \[bu] 2 boto3 .PP If using \f[C]\-\-cloud\-provider=azure\-blob\-storage\f[]: .IP \[bu] 2 azure\-storage\-blob .IP \[bu] 2 azure\-identity (optional, if you wish to use DefaultAzureCredential) .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B 1 The delete operation was not successful .RS .RE .TP .B 2 The connection to the cloud provider failed .RS .RE .TP .B 3 There was an error in the command input .RS .RE .TP .B Other non\-zero codes Failure .RS .RE .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman-cloud-backup-keep.10000644000621200062120000001150014172556763016455 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-CLOUD\-BACKUP\-DELETE" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-cloud\-backup\-keep \- Flag backups which should be kept forever .SH SYNOPSIS .PP barman\-cloud\-backup\-keep [\f[I]OPTIONS\f[]] \f[I]SOURCE_URL\f[] \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] .SH DESCRIPTION .PP This script can be used to flag backups previously made with \f[C]barman\-cloud\-backup\f[] as archival backups. Archival backups are kept forever regardless of any retention policies applied. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .SH POSITIONAL ARGUMENTS .TP .B SOURCE_URL URL of the cloud source, such as a bucket in AWS S3. For example: \f[C]s3://BUCKET_NAME/path/to/folder\f[] (where \f[C]BUCKET_NAME\f[] is the bucket you have created in AWS). .RS .RE .TP .B SERVER_NAME the name of the server as configured in Barman. .RS .RE .TP .B BACKUP_ID a valid Backup ID for a backup in cloud storage .RS .RE .SH OPTIONS .TP .B \-h, \[en]help show a help message and exit .RS .RE .TP .B \-V, \[en]version show program's version number and exit .RS .RE .TP .B \-v, \[en]verbose increase output verbosity (e.g., \-vv is more than \-v) .RS .RE .TP .B \-q, \[en]quiet decrease output verbosity (e.g., \-qq is less than \-q) .RS .RE .TP .B \-t, \[en]test test connectivity to the cloud destination and exit .RS .RE .TP .B \[en]target \f[I]RECOVERY_TARGET\f[] Specify the recovery target for the archival backup. Possible values for \f[I]RECOVERY_TARGET\f[] are: .RS .IP \[bu] 2 \f[I]full\f[]: The backup can always be used to recover to the latest point in time. To achieve this, Barman will retain all WALs needed to ensure consistency of the backup and all subsequent WALs. .IP \[bu] 2 \f[I]standalone\f[]: The backup can only be used to recover the server to its state at the time the backup was taken. Barman will only retain the WALs needed to ensure consistency of the backup. .RE .TP .B \-s, \[en]status Report the archival status of the backup. This will either be the recovery target of \f[I]full\f[] or \f[I]standalone\f[] for archival backups or \f[I]nokeep\f[] for backups which have not been flagged as archival. .RS .RE .TP .B \-r, \[en]release Release the keep flag from this backup. This will remove its archival status and make it available for deletion, either directly or by retention policy. .RS .RE .TP .B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage} the cloud provider to which the backup should be uploaded .RS .RE .TP .B \-P, \[en]profile profile name (e.g.\ INI section in AWS credentials file) .RS .RE .TP .B \[en]endpoint\-url override the default S3 URL construction mechanism by specifying an endpoint. .RS .RE .TP .B \[en]credential {azure\-cli,managed\-identity} optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. .RS .RE .SH REFERENCES .PP For Boto: .IP \[bu] 2 https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html .PP For AWS: .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-set\-up.html .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-started.html. .PP For Azure Blob Storage: .IP \[bu] 2 https://docs.microsoft.com/en\-us/azure/storage/blobs/authorize\-data\-operations\-cli#set\-environment\-variables\-for\-authorization\-parameters .IP \[bu] 2 https://docs.microsoft.com/en\-us/python/api/azure\-storage\-blob/?view=azure\-python .SH DEPENDENCIES .PP If using \f[C]\-\-cloud\-provider=aws\-s3\f[]: .IP \[bu] 2 boto3 .PP If using \f[C]\-\-cloud\-provider=azure\-blob\-storage\f[]: .IP \[bu] 2 azure\-storage\-blob .IP \[bu] 2 azure\-identity (optional, if you wish to use DefaultAzureCredential) .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B 1 The keep command was not successful .RS .RE .TP .B 2 The connection to the cloud provider failed .RS .RE .TP .B 3 There was an error in the command input .RS .RE .TP .B Other non\-zero codes Failure .RS .RE .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman-cloud-wal-restore.10000644000621200062120000001001314172556763016530 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-CLOUD\-WAL\-RESTORE" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-cloud\-wal\-restore \- Restore PostgreSQL WAL files from the Cloud using \f[C]restore_command\f[] .SH SYNOPSIS .PP barman\-cloud\-wal\-restore [\f[I]OPTIONS\f[]] \f[I]SOURCE_URL\f[] \f[I]SERVER_NAME\f[] \f[I]WAL_NAME\f[] \f[I]WAL_PATH\f[] .SH DESCRIPTION .PP This script can be used as a \f[C]restore_command\f[] to download WAL files previously archived with \f[C]barman\-cloud\-wal\-archive\f[] command. Currently AWS S3 and Azure Blob Storage are supported. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .SH POSITIONAL ARGUMENTS .TP .B SOURCE_URL URL of the cloud source, such as a bucket in AWS S3. For example: \f[C]s3://BUCKET_NAME/path/to/folder\f[] (where \f[C]BUCKET_NAME\f[] is the bucket you have created in AWS). .RS .RE .TP .B SERVER_NAME the name of the server as configured in Barman. .RS .RE .TP .B WAL_NAME the name of the WAL file, equivalent of `%f' keyword (according to `restore_command'). .RS .RE .TP .B WAL_PATH the value of the `%p' keyword (according to `restore_command'). .RS .RE .SH OPTIONS .TP .B \-h, \[en]help show a help message and exit .RS .RE .TP .B \-V, \[en]version show program's version number and exit .RS .RE .TP .B \-v, \[en]verbose increase output verbosity (e.g., \-vv is more than \-v) .RS .RE .TP .B \-q, \[en]quiet decrease output verbosity (e.g., \-qq is less than \-q) .RS .RE .TP .B \-t, \[en]test test connectivity to the cloud destination and exit .RS .RE .TP .B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage} the cloud provider to which the backup should be uploaded .RS .RE .TP .B \-P, \[en]profile profile name (e.g.\ INI section in AWS credentials file) .RS .RE .TP .B \[en]endpoint\-url override the default S3 URL construction mechanism by specifying an endpoint. .RS .RE .TP .B \[en]credential {azure\-cli,managed\-identity} optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. .RS .RE .SH REFERENCES .PP For Boto: .IP \[bu] 2 https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html .PP For AWS: .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-set\-up.html .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-started.html. .PP For Azure Blob Storage: .IP \[bu] 2 https://docs.microsoft.com/en\-us/azure/storage/blobs/authorize\-data\-operations\-cli#set\-environment\-variables\-for\-authorization\-parameters .IP \[bu] 2 https://docs.microsoft.com/en\-us/python/api/azure\-storage\-blob/?view=azure\-python .SH DEPENDENCIES .PP If using \f[C]\-\-cloud\-provider=aws\-s3\f[]: .IP \[bu] 2 boto3 .PP If using \f[C]\-\-cloud\-provider=azure\-blob\-storage\f[]: .IP \[bu] 2 azure\-storage\-blob .IP \[bu] 2 azure\-identity (optional, if you wish to use DefaultAzureCredential) .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B 1 The requested WAL could not be found .RS .RE .TP .B 2 The connection to the cloud provider failed .RS .RE .TP .B 3 There was an error in the command input .RS .RE .TP .B Other non\-zero codes Failure .RS .RE .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman-cloud-backup-keep.1.md0000644000621200062120000001043414172556763017061 0ustar 00000000000000% BARMAN-CLOUD-BACKUP-DELETE(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-cloud-backup-keep - Flag backups which should be kept forever # SYNOPSIS barman-cloud-backup-keep [*OPTIONS*] *SOURCE_URL* *SERVER_NAME* *BACKUP_ID* # DESCRIPTION This script can be used to flag backups previously made with `barman-cloud-backup` as archival backups. Archival backups are kept forever regardless of any retention policies applied. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. # POSITIONAL ARGUMENTS SOURCE_URL : URL of the cloud source, such as a bucket in AWS S3. For example: `s3://BUCKET_NAME/path/to/folder` (where `BUCKET_NAME` is the bucket you have created in AWS). SERVER_NAME : the name of the server as configured in Barman. BACKUP_ID : a valid Backup ID for a backup in cloud storage # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -v, --verbose : increase output verbosity (e.g., -vv is more than -v) -q, --quiet : decrease output verbosity (e.g., -qq is less than -q) -t, --test : test connectivity to the cloud destination and exit --target *RECOVERY_TARGET* : Specify the recovery target for the archival backup. Possible values for *RECOVERY_TARGET* are: - *full*: The backup can always be used to recover to the latest point in time. To achieve this, Barman will retain all WALs needed to ensure consistency of the backup and all subsequent WALs. - *standalone*: The backup can only be used to recover the server to its state at the time the backup was taken. Barman will only retain the WALs needed to ensure consistency of the backup. -s, --status : Report the archival status of the backup. This will either be the recovery target of *full* or *standalone* for archival backups or *nokeep* for backups which have not been flagged as archival. -r, --release : Release the keep flag from this backup. This will remove its archival status and make it available for deletion, either directly or by retention policy. --cloud-provider {aws-s3,azure-blob-storage} : the cloud provider to which the backup should be uploaded -P, --profile : profile name (e.g. INI section in AWS credentials file) --endpoint-url : override the default S3 URL construction mechanism by specifying an endpoint. --credential {azure-cli,managed-identity} : optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. # REFERENCES For Boto: * https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html For AWS: * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html. For Azure Blob Storage: * https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-data-operations-cli#set-environment-variables-for-authorization-parameters * https://docs.microsoft.com/en-us/python/api/azure-storage-blob/?view=azure-python # DEPENDENCIES If using `--cloud-provider=aws-s3`: * boto3 If using `--cloud-provider=azure-blob-storage`: * azure-storage-blob * azure-identity (optional, if you wish to use DefaultAzureCredential) # EXIT STATUS 0 : Success 1 : The keep command was not successful 2 : The connection to the cloud provider failed 3 : There was an error in the command input Other non-zero codes : Failure # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman-cloud-restore.1.md0000644000621200062120000000703414172556763016357 0ustar 00000000000000% BARMAN-CLOUD-RESTORE(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-cloud-restore - Restore a PostgreSQL backup from the Cloud # SYNOPSIS barman-cloud-restore [*OPTIONS*] *SOURCE_URL* *SERVER_NAME* *BACKUP_ID* *RECOVERY_DIR* # DESCRIPTION This script can be used to download a backup previouslymade with `barman-cloud-backup` command. Currently AWS S3 and Azure Blob Storage are supported. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. # POSITIONAL ARGUMENTS SOURCE_URL : URL of the cloud source, such as a bucket in AWS S3. For example: `s3://BUCKET_NAME/path/to/folder` (where `BUCKET_NAME` is the bucket you have created in AWS). SERVER_NAME : the name of the server as configured in Barman. BACKUP_ID : the ID of the backup to restore RECOVERY_DIR : the path to a local directory for recovery (used as PGDATA). # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -v, --verbose : increase output verbosity (e.g., -vv is more than -v) -q, --quiet : decrease output verbosity (e.g., -qq is less than -q) -t, --test : test connectivity to the cloud destination and exit --tablespace NAME:LOCATION : extract the named tablespace to the given directory instead of its original location (you may repeat the option for multiple tablespaces) --cloud-provider {aws-s3,azure-blob-storage} : the cloud provider to which the backup should be uploaded -P, --profile : profile name (e.g. INI section in AWS credentials file) --endpoint-url : override the default S3 URL construction mechanism by specifying an endpoint. --credential {azure-cli,managed-identity} : optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. # REFERENCES For Boto: * https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html For AWS: * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html. For Azure Blob Storage: * https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-data-operations-cli#set-environment-variables-for-authorization-parameters * https://docs.microsoft.com/en-us/python/api/azure-storage-blob/?view=azure-python # DEPENDENCIES If using `--cloud-provider=aws-s3`: * boto3 If using `--cloud-provider=azure-blob-storage`: * azure-storage-blob * azure-identity (optional, if you wish to use DefaultAzureCredential) # EXIT STATUS 0 : Success 1 : The restore was not successful 2 : The connection to the cloud provider failed 3 : There was an error in the command input Other non-zero codes : Failure # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman-cloud-backup.1.md0000644000621200062120000001331214172556763016135 0ustar 00000000000000% BARMAN-CLOUD-BACKUP(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-cloud-backup - Backup a PostgreSQL instance and stores it in the Cloud # SYNOPSIS barman-cloud-backup [*OPTIONS*] *DESTINATION_URL* *SERVER_NAME* # DESCRIPTION This script can be used to perform a backup of a local PostgreSQL instance and ship the resulting tarball(s) to the Cloud. Currently AWS S3 and Azure Blob Storage are supported. It requires read access to PGDATA and tablespaces (normally run as `postgres` user). It can also be used as a hook script on a barman server, in which case it requires read access to the directory where barman backups are stored. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. **IMPORTANT:** the Cloud upload process may fail if any file with a size greater than the configured `--max-archive-size` is present either in the data directory or in any tablespaces. However, PostgreSQL creates files with a maximum size of 1GB, and that size is always allowed, regardless of the `max-archive-size` parameter. # POSITIONAL ARGUMENTS DESTINATION_URL : URL of the cloud destination, such as a bucket in AWS S3. For example: `s3://BUCKET_NAME/path/to/folder` (where `BUCKET_NAME` is the bucket you have created in AWS). SERVER_NAME : the name of the server as configured in Barman. # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -v, --verbose : increase output verbosity (e.g., -vv is more than -v) -q, --quiet : decrease output verbosity (e.g., -qq is less than -q) -t, --test : test connectivity to the cloud destination and exit -z, --gzip : gzip-compress the tar files when uploading to the cloud -j, --bzip2 : bzip2-compress the tar files when uploading to the cloud --snappy : snappy-compress the tar files when uploading to the cloud (requires optional python-snappy library) -d, --dbname : database name or conninfo string for Postgres connection (default: postgres) -h, --host : host or Unix socket for PostgreSQL connection (default: libpq settings) -p, --port : port for PostgreSQL connection (default: libpq settings) -U, --user : user name for PostgreSQL connection (default: libpq settings) --immediate-checkpoint : forces the initial checkpoint to be done as quickly as possible -J JOBS, --jobs JOBS : number of subprocesses to upload data to cloud storage (default: 2) -S MAX_ARCHIVE_SIZE, --max-archive-size MAX_ARCHIVE_SIZE : maximum size of an archive when uploading to cloud storage (default: 100GB) --cloud-provider {aws-s3,azure-blob-storage} : the cloud provider to which the backup should be uploaded --tags KEY1,VALUE1 KEY2,VALUE2 ... : a space-separated list of comma-separated key-value pairs representing tags to be added to each object created in cloud storage -P, --profile : profile name (e.g. INI section in AWS credentials file) --endpoint-url : override the default S3 URL construction mechanism by specifying an endpoint -e, --encryption : the encryption algorithm used when storing the uploaded data in S3 Allowed values: 'AES256'|'aws:kms' --encryption-scope : the name of an encryption scope defined in the Azure Blob Storage service which is to be used to encrypt the data in Azure --credential {azure-cli,managed-identity} : optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. # REFERENCES For Boto: * https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html For AWS: * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html. For Azure Blob Storage: * https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-data-operations-cli#set-environment-variables-for-authorization-parameters * https://docs.microsoft.com/en-us/python/api/azure-storage-blob/?view=azure-python For libpq settings information: * https://www.postgresql.org/docs/current/libpq-envars.html # DEPENDENCIES If using `--cloud-provider=aws-s3`: * boto3 If using `--cloud-provider=azure-blob-storage`: * azure-storage-blob * azure-identity (optional, if you wish to use DefaultAzureCredential) # EXIT STATUS 0 : Success 1 : The backup was not successful 2 : The connection to the cloud provider failed 3 : There was an error in the command input Other non-zero codes : Failure # SEE ALSO This script can be used in conjunction with `post_backup_script` or `post_backup_retry_script` to relay barman backups to cloud storage as follows: ``` post_backup_retry_script = 'barman-cloud-backup [*OPTIONS*] *DESTINATION_URL* ${BARMAN_SERVER}' ``` When running as a hook script, barman-cloud-backup will read the location of the backup directory and the backup ID from BACKUP_DIR and BACKUP_ID environment variables set by barman. # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman-cloud-check-wal-archive.10000644000621200062120000001022214172556763017543 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-CLOUD\-CHECK\-WAL\-ARCHIVE" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-cloud\-check\-wal\-archive \- Check a WAL archive destination for a new PostgreSQL cluster .SH SYNOPSIS .PP barman\-cloud\-check\-wal\-archive [\f[I]OPTIONS\f[]] \f[I]SOURCE_URL\f[] \f[I]SERVER_NAME\f[] .SH DESCRIPTION .PP Check that the WAL archive destination for \f[I]SERVER_NAME\f[] is safe to use for a new PostgreSQL cluster. With no optional args (the default) this check will pass if the WAL archive is empty or if the target bucket cannot be found. All other conditions will result in failure. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .SH POSITIONAL ARGUMENTS .TP .B SOURCE_URL URL of the cloud source, such as a bucket in AWS S3. For example: \f[C]s3://BUCKET_NAME/path/to/folder\f[] (where \f[C]BUCKET_NAME\f[] is the bucket you have created in AWS). .RS .RE .TP .B SERVER_NAME the name of the server as configured in Barman. .RS .RE .SH OPTIONS .TP .B \-h, \[en]help show a help message and exit .RS .RE .TP .B \-V, \[en]version show program's version number and exit .RS .RE .TP .B \-v, \[en]verbose increase output verbosity (e.g., \-vv is more than \-v) .RS .RE .TP .B \-q, \[en]quiet decrease output verbosity (e.g., \-qq is less than \-q) .RS .RE .TP .B \-t, \[en]test test connectivity to the cloud destination and exit .RS .RE .TP .B \[en]timeline A positive integer specifying the earliest timeline for which associated WALs should cause the check to fail. The check will pass if all WAL content in the archive relates to earlier timelines. If any WAL files are on this timeline or greater then the check will fail. .RS .RE .TP .B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage} the cloud provider to which the backup should be uploaded .RS .RE .TP .B \-P, \[en]profile profile name (e.g.\ INI section in AWS credentials file) .RS .RE .TP .B \[en]endpoint\-url override the default S3 URL construction mechanism by specifying an endpoint. .RS .RE .TP .B \[en]credential {azure\-cli,managed\-identity} optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. .RS .RE .SH REFERENCES .PP For Boto: .IP \[bu] 2 https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html .PP For AWS: .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-set\-up.html .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-started.html. .PP For Azure Blob Storage: .IP \[bu] 2 https://docs.microsoft.com/en\-us/azure/storage/blobs/authorize\-data\-operations\-cli#set\-environment\-variables\-for\-authorization\-parameters .IP \[bu] 2 https://docs.microsoft.com/en\-us/python/api/azure\-storage\-blob/?view=azure\-python .SH DEPENDENCIES .PP If using \f[C]\-\-cloud\-provider=aws\-s3\f[]: .IP \[bu] 2 boto3 .PP If using \f[C]\-\-cloud\-provider=azure\-blob\-storage\f[]: .IP \[bu] 2 azure\-storage\-blob .IP \[bu] 2 azure\-identity (optional, if you wish to use DefaultAzureCredential) .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B 1 Failure .RS .RE .TP .B 2 The connection to the cloud provider failed .RS .RE .TP .B 3 There was an error in the command input .RS .RE .TP .B Other non\-zero codes Error running the check .RS .RE .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman-cloud-backup-delete.1.md0000644000621200062120000001133014172556763017373 0ustar 00000000000000% BARMAN-CLOUD-BACKUP-DELETE(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-cloud-backup-delete - Delete backups stored in the Cloud # SYNOPSIS barman-cloud-backup-delete [*OPTIONS*] *SOURCE_URL* *SERVER_NAME* # DESCRIPTION This script can be used to delete backups previously made with the `barman-cloud-backup` command. Currently AWS S3 and Azure Blob Storage are supported. The target backups can be specified either using the backup ID (as returned by barman-cloud-backup-list) or by retention policy. Retention policies are the same as those for Barman server and work as described in the Barman manual: all backups not required to meet the specified policy will be deleted. When a backup is succesfully deleted any unused WALs associated with that backup are removed. WALs are only considered unused if: 1. There are no older backups than the deleted backup *or* all older backups are archival backups. 2. The WALs pre-date the begin_wal value of the oldest remaining backup. 3. The WALs are not required by any archival backups present in cloud storage. Note: The deletion of each backup involves three separate delete requests to the cloud provider (once for the backup files, once for the backup.info file and once for any associated WALs). If you have a significant number of backups accumulated in cloud storage then deleting by retention policy could result in a large number of delete requests. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. # POSITIONAL ARGUMENTS SOURCE_URL : URL of the cloud source, such as a bucket in AWS S3. For example: `s3://BUCKET_NAME/path/to/folder` (where `BUCKET_NAME` is the bucket you have created in AWS). SERVER_NAME : the name of the server as configured in Barman. # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -v, --verbose : increase output verbosity (e.g., -vv is more than -v) -q, --quiet : decrease output verbosity (e.g., -qq is less than -q) -t, --test : test connectivity to the cloud destination and exit -b *BACKUP_ID*, --backup-id *BACKUP_ID* : a valid Backup ID for a backup in cloud storage which is to be deleted -r *RETENTION_POLICY*, --retention-policy *RETENTION_POLICY* : used instead of --backup-id, a retention policy for selecting the backups to be deleted, e.g. "REDUNDANCY 3" or "RECOVERY WINDOW OF 2 WEEKS" --dry-run : run without actually deleting any objects while printing information about the objects which would be deleted to stdout --cloud-provider {aws-s3,azure-blob-storage} : the cloud provider to which the backup should be uploaded -P, --profile : profile name (e.g. INI section in AWS credentials file) --endpoint-url : override the default S3 URL construction mechanism by specifying an endpoint. --credential {azure-cli,managed-identity} : optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. # REFERENCES For Boto: * https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html For AWS: * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html. For Azure Blob Storage: * https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-data-operations-cli#set-environment-variables-for-authorization-parameters * https://docs.microsoft.com/en-us/python/api/azure-storage-blob/?view=azure-python # DEPENDENCIES If using `--cloud-provider=aws-s3`: * boto3 If using `--cloud-provider=azure-blob-storage`: * azure-storage-blob * azure-identity (optional, if you wish to use DefaultAzureCredential) # EXIT STATUS 0 : Success 1 : The delete operation was not successful 2 : The connection to the cloud provider failed 3 : There was an error in the command input Other non-zero codes : Failure # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman.1.d/0000755000621200062120000000000014172556766013471 5ustar 00000000000000barman-2.18/doc/barman.1.d/85-bugs.md0000644000621200062120000000053314172556763015203 0ustar 00000000000000# BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github bug tracker. Along with the bug submission, users can provide developers with diagnostics information obtained through the `barman diagnose` command. barman-2.18/doc/barman.1.d/50-diagnose.md0000644000621200062120000000041614172556763016024 0ustar 00000000000000diagnose : Collect diagnostic information about the server where barman is installed and all the configured servers, including: global configuration, SSH version, Python version, `rsync` version, as well as current configuration and status of all servers. barman-2.18/doc/barman.1.d/50-check-backup.md0000644000621200062120000000051514172556763016553 0ustar 00000000000000check-backup *SERVER_NAME* *BACKUP_ID* : Make sure that all the required WAL files to check the consistency of a physical backup (that is, from the beginning to the end of the full backup) are correctly archived. This command is automatically invoked by the `cron` command and at the end of every backup operation. barman-2.18/doc/barman.1.d/50-list-servers.md0000644000621200062120000000011214172556763016666 0ustar 00000000000000list-servers : Show all the configured servers, and their descriptions. barman-2.18/doc/barman.1.d/50-show-servers.md0000644000621200062120000000036114172556763016701 0ustar 00000000000000show-servers *SERVER_NAME* : Show information about `SERVER_NAME`, including: `conninfo`, `backup_directory`, `wals_directory` and many more. Specify `all` as `SERVER_NAME` to show information about all the configured servers. barman-2.18/doc/barman.1.d/00-header.md0000644000621200062120000000016014172556763015452 0ustar 00000000000000% BARMAN(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 barman-2.18/doc/barman.1.d/50-rebuild-xlogdb.md0000644000621200062120000000044714172556763017142 0ustar 00000000000000rebuild-xlogdb *SERVER_NAME* : Perform a rebuild of the WAL file metadata for `SERVER_NAME` (or every server, using the `all` shortcut) guessing it from the disk content. The metadata of the WAL archive is contained in the `xlog.db` file, and every Barman server has its own copy. barman-2.18/doc/barman.1.d/50-check.md0000644000621200062120000000061114172556763015305 0ustar 00000000000000check *SERVER_NAME* : Show diagnostic information about `SERVER_NAME`, including: Ssh connection check, PostgreSQL version, configuration and backup directories, archiving process, streaming process, replication slots, etc. Specify `all` as `SERVER_NAME` to show diagnostic information about all the configured servers. --nagios : Nagios plugin compatible output barman-2.18/doc/barman.1.d/50-sync-backup.md0000644000621200062120000000056014172556763016452 0ustar 00000000000000sync-backup *SERVER_NAME* *BACKUP_ID* : Command used for the synchronisation of a passive node with its primary. Executes a copy of all the files of a `BACKUP_ID` that is present on `SERVER_NAME` node. This command is available only for passive nodes, and uses the `primary_ssh_command` option to establish a secure connection with the primary node. barman-2.18/doc/barman.1.d/50-backup.md0000644000621200062120000000450714172556763015505 0ustar 00000000000000backup *SERVER_NAME* : Perform a backup of `SERVER_NAME` using parameters specified in the configuration file. Specify `all` as `SERVER_NAME` to perform a backup of all the configured servers. --immediate-checkpoint : forces the initial checkpoint to be done as quickly as possible. Overrides value of the parameter `immediate_checkpoint`, if present in the configuration file. --no-immediate-checkpoint : forces to wait for the checkpoint. Overrides value of the parameter `immediate_checkpoint`, if present in the configuration file. --reuse-backup [INCREMENTAL_TYPE] : Overrides `reuse_backup` option behaviour. Possible values for `INCREMENTAL_TYPE` are: - *off*: do not reuse the last available backup; - *copy*: reuse the last available backup for a server and create a copy of the unchanged files (reduce backup time); - *link*: reuse the last available backup for a server and create a hard link of the unchanged files (reduce backup time and space); `link` is the default target if `--reuse-backup` is used and `INCREMENTAL_TYPE` is not explicit. --retry-times : Number of retries of base backup copy, after an error. Used during both backup and recovery operations. Overrides value of the parameter `basebackup_retry_times`, if present in the configuration file. --no-retry : Same as `--retry-times 0` --retry-sleep : Number of seconds of wait after a failed copy, before retrying. Used during both backup and recovery operations. Overrides value of the parameter `basebackup_retry_sleep`, if present in the configuration file. -j, --jobs : Number of parallel workers to copy files during backup. Overrides value of the parameter `parallel_jobs`, if present in the configuration file. --bwlimit KBPS : maximum transfer rate in kilobytes per second. A value of 0 means no limit. Overrides 'bandwidth_limit' configuration option. Default is undefined. --wait, -w : wait for all required WAL files by the base backup to be archived --wait-timeout : the time, in seconds, spent waiting for the required WAL files to be archived before timing out barman-2.18/doc/barman.1.d/10-synopsis.md0000644000621200062120000000005114172556763016111 0ustar 00000000000000# SYNOPSIS barman [*OPTIONS*] *COMMAND* barman-2.18/doc/barman.1.d/50-recover.md0000644000621200062120000000622014172556763015677 0ustar 00000000000000recover *\[OPTIONS\]* *SERVER_NAME* *BACKUP_ID* *DESTINATION_DIRECTORY* : Recover a backup in a given directory (local or remote, depending on the `--remote-ssh-command` option settings). See the [Backup ID shortcuts](#shortcuts) section below for available shortcuts. --target-tli *TARGET_TLI* : Recover the specified timeline. --target-time *TARGET_TIME* : Recover to the specified time. You can use any valid unambiguous representation (e.g: "YYYY-MM-DD HH:MM:SS.mmm"). --target-xid *TARGET_XID* : Recover to the specified transaction ID. --target-lsn *TARGET_LSN* : Recover to the specified LSN (Log Sequence Number). Requires PostgreSQL 10 or above. --target-name *TARGET_NAME* : Recover to the named restore point previously created with the `pg_create_restore_point(name)` (for PostgreSQL 9.1 and above users). --target-immediate : Recover ends when a consistent state is reached (end of the base backup) --exclusive : Set target (time, XID or LSN) to be non inclusive. --target-action *ACTION* : Trigger the specified action once the recovery target is reached. Possible actions are: `pause` (PostgreSQL 9.1 and above), `shutdown` (PostgreSQL 9.5 and above) and `promote` (ditto). This option requires a target to be defined, with one of the above options. --tablespace *NAME:LOCATION* : Specify tablespace relocation rule. --remote-ssh-command *SSH_COMMAND* : This options activates remote recovery, by specifying the secure shell command to be launched on a remote host. This is the equivalent of the "ssh_command" server option in the configuration file for remote recovery. Example: 'ssh postgres@db2'. --retry-times *RETRY_TIMES* : Number of retries of data copy during base backup after an error. Overrides value of the parameter `basebackup_retry_times`, if present in the configuration file. --no-retry : Same as `--retry-times 0` --retry-sleep : Number of seconds of wait after a failed copy, before retrying. Overrides value of the parameter `basebackup_retry_sleep`, if present in the configuration file. --bwlimit KBPS : maximum transfer rate in kilobytes per second. A value of 0 means no limit. Overrides 'bandwidth_limit' configuration option. Default is undefined. -j , --jobs : Number of parallel workers to copy files during recovery. Overrides value of the parameter `parallel_jobs`, if present in the configuration file. Works only for servers configured through `rsync`/SSH. --get-wal, --no-get-wal : Enable/Disable usage of `get-wal` for WAL fetching during recovery. Default is based on `recovery_options` setting. --network-compression, --no-network-compression : Enable/Disable network compression during remote recovery. Default is based on `network_compression` configuration setting. --standby-mode : Specifies whether to start the PostgreSQL server as a standby. Default is undefined. barman-2.18/doc/barman.1.d/80-see-also.md0000644000621200062120000000003214172556763015740 0ustar 00000000000000# SEE ALSO `barman` (5). barman-2.18/doc/barman.1.d/75-exit-status.md0000644000621200062120000000006314172556763016532 0ustar 00000000000000# EXIT STATUS 0 : Success Not zero : Failure barman-2.18/doc/barman.1.d/50-sync-info.md0000644000621200062120000000120214172556763016132 0ustar 00000000000000sync-info *SERVER_NAME* \[*LAST_WAL* \[*LAST_POSITION*\]\] : Collect information regarding the current status of a Barman server, to be used for synchronisation purposes. Returns a JSON output representing `SERVER_NAME`, that contains: all the successfully finished backup, all the archived WAL files, the configuration, last WAL file been read from the `xlog.db` and the position in the file. LAST_WAL : tells sync-info to skip any WAL file previous to that (incremental synchronisation) LAST_POSITION : hint for quickly positioning in the `xlog.db` file (incremental synchronisation) barman-2.18/doc/barman.1.d/50-keep.md0000644000621200062120000000236414172556763015163 0ustar 00000000000000keep *SERVER_NAME* *BACKUP_ID* : Flag the specified backup as an archival backup which should be kept forever, regardless of any retention policies in effect. See the [Backup ID shortcuts](#shortcuts) section below for available shortcuts. --target *RECOVERY_TARGET* : Specify the recovery target for the archival backup. Possible values for *RECOVERY_TARGET* are: - *full*: The backup can always be used to recover to the latest point in time. To achieve this, Barman will retain all WALs needed to ensure consistency of the backup and all subsequent WALs. - *standalone*: The backup can only be used to recover the server to its state at the time the backup was taken. Barman will only retain the WALs needed to ensure consistency of the backup. --status : Report the archival status of the backup. This will either be the recovery target of *full* or *standalone* for archival backups or *nokeep* for backups which have not been flagged as archival. --release : Release the keep flag from this backup. This will remove its archival status and make it available for deletion, either directly or by retention policy. barman-2.18/doc/barman.1.d/50-status.md0000644000621200062120000000150414172556763015555 0ustar 00000000000000status *SERVER_NAME* : Show information about the status of a server, including: number of available backups, `archive_command`, `archive_status` and many more. For example: ``` Server quagmire: Description: The Giggity database Passive node: False PostgreSQL version: 9.3.9 pgespresso extension: Not available PostgreSQL Data directory: /srv/postgresql/9.3/data PostgreSQL 'archive_command' setting: rsync -a %p barman@backup:/var/lib/barman/quagmire/incoming Last archived WAL: 0000000100003103000000AD Current WAL segment: 0000000100003103000000AE Retention policies: enforced (mode: auto, retention: REDUNDANCY 2, WAL retention: MAIN) No. of available backups: 2 First available backup: 20150908T003001 Last available backup: 20150909T003001 Minimum redundancy requirements: satisfied (2/1) ``` barman-2.18/doc/barman.1.d/50-switch-xlog.md0000644000621200062120000000012114172556763016474 0ustar 00000000000000switch-xlog *SERVER_NAME* : Alias for switch-wal (kept for back-compatibility) barman-2.18/doc/barman.1.d/50-check-wal-archive.md0000644000621200062120000000111614172556763017506 0ustar 00000000000000check-wal-archive *SERVER_NAME* : Check that the WAL archive destination for *SERVER_NAME* is safe to use for a new PostgreSQL cluster. With no optional args (the default) this will pass if the WAL archive is empty and fail otherwise. --timeline [TIMELINE] : A positive integer specifying the earliest timeline for which associated WALs should cause the check to fail. The check will pass if all WAL content in the archive relates to earlier timelines. If any WAL files are on this timeline or greater then the check will fail. barman-2.18/doc/barman.1.d/50-list-files.md0000644000621200062120000000142414172556763016306 0ustar 00000000000000list-files *\[OPTIONS\]* *SERVER_NAME* *BACKUP_ID* : List all the files in a particular backup, identified by the server name and the backup ID. See the [Backup ID shortcuts](#shortcuts) section below for available shortcuts. --target *TARGET_TYPE* : Possible values for TARGET_TYPE are: - *data*: lists just the data files; - *standalone*: lists the base backup files, including required WAL files; - *wal*: lists all the WAL files between the start of the base backup and the end of the log / the start of the following base backup (depending on whether the specified base backup is the most recent one available); - *full*: same as data + wal. The default value is `standalone`. barman-2.18/doc/barman.1.d/50-switch-wal.md0000644000621200062120000000152214172556763016314 0ustar 00000000000000switch-wal *SERVER_NAME* : Execute pg_switch_wal() on the target server (from PostgreSQL 10), or pg_switch_xlog (for PostgreSQL 8.3 to 9.6). --force : Forces the switch by executing CHECKPOINT before pg_switch_xlog(). *IMPORTANT:* executing a CHECKPOINT might increase I/O load on a PostgreSQL server. Use this option with care. --archive : Wait for one xlog file to be archived. If after a defined amount of time (default: 30 seconds) no xlog file is archived, Barman will terminate with failure exit code. Available also on standby servers. --archive-timeout *TIMEOUT* : Specifies the amount of time in seconds (default: 30 seconds) the archiver will wait for a new xlog file to be archived before timing out. Available also on standby servers. barman-2.18/doc/barman.1.d/50-archive-wal.md0000644000621200062120000000041214172556763016431 0ustar 00000000000000archive-wal *SERVER_NAME* : Get any incoming xlog file (both through standard `archive_command` and streaming replication, where applicable) and moves them in the WAL archive for that server. If necessary, apply compression when requested by the user. barman-2.18/doc/barman.1.d/50-replication-status.md0000644000621200062120000000110214172556763020056 0ustar 00000000000000replication-status *\[OPTIONS\]* *SERVER_NAME* : Shows live information and status of any streaming client attached to the given server (or servers). Default behaviour can be changed through the following options: --minimal : machine readable output (default: False) --target *TARGET_TYPE* : Possible values for TARGET_TYPE are: - *hot-standby*: lists only hot standby servers - *wal-streamer*: lists only WAL streaming clients, such as pg_receivewal - *all*: any streaming client (default) barman-2.18/doc/barman.1.d/50-show-backup.md0000644000621200062120000000261414172556763016460 0ustar 00000000000000show-backup *SERVER_NAME* *BACKUP_ID* : Show detailed information about a particular backup, identified by the server name and the backup ID. See the [Backup ID shortcuts](#shortcuts) section below for available shortcuts. For example: ``` Backup 20150828T130001: Server Name : quagmire Status : DONE PostgreSQL Version : 90402 PGDATA directory : /srv/postgresql/9.4/main/data Base backup information: Disk usage : 12.4 TiB (12.4 TiB with WALs) Incremental size : 4.9 TiB (-60.02%) Timeline : 1 Begin WAL : 0000000100000CFD000000AD End WAL : 0000000100000D0D00000008 WAL number : 3932 WAL compression ratio: 79.51% Begin time : 2015-08-28 13:00:01.633925+00:00 End time : 2015-08-29 10:27:06.522846+00:00 Begin Offset : 1575048 End Offset : 13853016 Begin XLOG : CFD/AD180888 End XLOG : D0D/8D36158 WAL information: No of files : 35039 Disk usage : 121.5 GiB WAL rate : 275.50/hour Compression ratio : 77.81% Last available : 0000000100000D95000000E7 Catalog information: Retention Policy : not enforced Previous Backup : 20150821T130001 Next Backup : - (this is the latest base backup) ``` barman-2.18/doc/barman.1.d/50-receive-wal.md0000644000621200062120000000123414172556763016435 0ustar 00000000000000receive-wal *SERVER_NAME* : Start the stream of transaction logs for a server. The process relies on `pg_receivewal`/`pg_receivexlog` to receive WAL files from the PostgreSQL servers through the streaming protocol. --stop : stop the receive-wal process for the server --reset : reset the status of receive-wal, restarting the streaming from the current WAL file of the server --create-slot : create the physical replication slot configured with the `slot_name` configuration parameter --drop-slot : drop the physical replication slot configured with the `slot_name` configuration parameter barman-2.18/doc/barman.1.d/99-copying.md0000644000621200062120000000025614172556763015722 0ustar 00000000000000# COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman.1.d/70-backup-id-shortcuts.md0000644000621200062120000000067414172556763020136 0ustar 00000000000000# BACKUP ID SHORTCUTS {#shortcuts} Rather than using the timestamp backup ID, you can use any of the following shortcuts/aliases to identity a backup for a given server: first : Oldest available backup for that server, in chronological order. last : Latest available backup for that server, in chronological order. latest : same ast *last*. oldest : same ast *first*. last-failed : Latest failed backup, in chronological order. barman-2.18/doc/barman.1.d/50-sync-wals.md0000644000621200062120000000053714172556763016157 0ustar 00000000000000sync-wals *SERVER_NAME* : Command used for the synchronisation of a passive node with its primary. Executes a copy of all the archived WAL files that are present on `SERVER_NAME` node. This command is available only for passive nodes, and uses the `primary_ssh_command` option to establish a secure connection with the primary node. barman-2.18/doc/barman.1.d/50-delete.md0000644000621200062120000000021714172556763015474 0ustar 00000000000000delete *SERVER_NAME* *BACKUP_ID* : Delete the specified backup. [Backup ID shortcuts](#shortcuts) section below for available shortcuts. barman-2.18/doc/barman.1.d/90-authors.md0000644000621200062120000000112114172556763015716 0ustar 00000000000000# AUTHORS Barman maintainers (in alphabetical order): * Abhijit Menon-Sen * Jane Threefoot * Michael Wallace Past contributors (in alphabetical order): * Anna Bellandi (QA/testing) * Britt Cole (documentation reviewer) * Carlo Ascani (developer) * Francesco Canovai (QA/testing) * Gabriele Bartolini (architect) * Gianni Ciolli (QA/testing) * Giulio Calacoci (developer) * Giuseppe Broccolo (developer) * Jonathan Battiato (QA/testing) * Leonardo Cecchi (developer) * Marco Nenciarini (project leader) * Niccolò Fei (QA/testing) * Rubens Souza (QA/testing) * Stefano Bianucci (developer) barman-2.18/doc/barman.1.d/50-put-wal.md0000644000621200062120000000127514172556763015630 0ustar 00000000000000put-wal *\[OPTIONS\]* *SERVER_NAME* : Receive a WAL file from a remote server and securely store it into the `SERVER_NAME` incoming directory. The WAL file is retrieved from the `STDIN`, and must be encapsulated in a tar stream together with a `MD5SUMS` file to validate it. This command is meant to be invoked through SSH from a remote `barman-wal-archive` utility (part of `barman-cli` package). Do not use this command directly unless you take full responsibility of the content of files. -t, --test : test both the connection and the configuration of the requested PostgreSQL server in Barman to make sure it is ready to receive WAL files. barman-2.18/doc/barman.1.d/95-resources.md0000644000621200062120000000023014172556763016250 0ustar 00000000000000# RESOURCES * Homepage: * Documentation: * Professional support: barman-2.18/doc/barman.1.d/45-commands.md0000644000621200062120000000006714172556763016042 0ustar 00000000000000# COMMANDS Important: every command has a help option barman-2.18/doc/barman.1.d/50-list-backups.md0000644000621200062120000000044314172556763016634 0ustar 00000000000000list-backups *SERVER_NAME* : Show available backups for `SERVER_NAME`. This command is useful to retrieve a backup ID. For example: ``` servername 20111104T102647 - Fri Nov 4 10:26:48 2011 - Size: 17.0 MiB - WAL Size: 100 B ``` In this case, *20111104T102647* is the backup ID. barman-2.18/doc/barman.1.d/05-name.md0000644000621200062120000000007414172556763015153 0ustar 00000000000000# NAME barman - Backup and Recovery Manager for PostgreSQL barman-2.18/doc/barman.1.d/50-get-wal.md0000644000621200062120000000203014172556763015565 0ustar 00000000000000get-wal *\[OPTIONS\]* *SERVER_NAME* *WAL\_NAME* : Retrieve a WAL file from the `xlog` archive of a given server. By default, the requested WAL file, if found, is returned as uncompressed content to `STDOUT`. The following options allow users to change this behaviour: -o *OUTPUT_DIRECTORY* : destination directory where the `get-wal` will deposit the requested WAL -P, --partial : retrieve also partial WAL files (.partial) -z : output will be compressed using gzip -j : output will be compressed using bzip2 -p *SIZE* : peek from the WAL archive up to *SIZE* WAL files, starting from the requested one. 'SIZE' must be an integer >= 1. When invoked with this option, get-wal returns a list of zero to 'SIZE' WAL segment names, one per row. -t, --test : test both the connection and the configuration of the requested PostgreSQL server in Barman for WAL retrieval. With this option, the 'WAL_NAME' mandatory argument is ignored. barman-2.18/doc/barman.1.d/15-description.md0000644000621200062120000000042014172556763016552 0ustar 00000000000000# DESCRIPTION Barman is an administration tool for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. Barman can perform remote backups of multiple servers in business critical environments and helps DBAs during the recovery phase. barman-2.18/doc/barman.1.d/20-options.md0000644000621200062120000000112414172556763015720 0ustar 00000000000000# OPTIONS -h, --help : Show a help message and exit. -v, --version : Show program version number and exit. -c *CONFIG*, --config *CONFIG* : Use the specified configuration file. --color *{never,always,auto}*, --colour *{never,always,auto}* : Whether to use colors in the output (default: *auto*) -q, --quiet : Do not output anything. Useful for cron scripts. -d, --debug : debug output (default: False) --log-level {NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL} : Override the default log level -f {json,console}, --format {json,console} : output format (default: 'console') barman-2.18/doc/barman.1.d/50-cron.md0000644000621200062120000000042714172556763015176 0ustar 00000000000000cron : Perform maintenance tasks, such as enforcing retention policies or WAL files management. --keep-descriptors : Keep the stdout and the stderr streams of the Barman subprocesses attached to this one. This is useful for Docker based installations. barman-2.18/doc/barman-wal-restore.1.md0000644000621200062120000000565114172556763016037 0ustar 00000000000000% BARMAN-WAL-RESTORE(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-wal-restore - 'restore_command' based on Barman's get-wal # SYNOPSIS barman-wal-restore [*OPTIONS*] *BARMAN_HOST* *SERVER_NAME* *WAL_NAME* *WAL_DEST* # DESCRIPTION This script can be used as a 'restore_command' for PostgreSQL servers, retrieving WAL files using the 'get-wal' feature of Barman. An SSH connection will be opened to the Barman host. `barman-wal-restore` allows the integration of Barman in PostgreSQL clusters for better business continuity results. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. # POSITIONAL ARGUMENTS BARMAN_HOST : the host of the Barman server. SERVER_NAME : the server name configured in Barman from which WALs are taken. WAL_NAME : the value of the '%f' keyword (according to 'restore_command'). WAL_DEST : the value of the '%p' keyword (according to 'restore_command'). # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -U *USER*, --user *USER* : the user used for the ssh connection to the Barman server. Defaults to 'barman'. -s *SECONDS*, --sleep *SECONDS* : sleep for SECONDS after a failure of get-wal request. Defaults to 0 (nowait). -p *JOBS*, --parallel *JOBS* : specifies the number of files to peek and transfer in parallel, defaults to 0 (disabled). --spool-dir *SPOOL_DIR* : Specifies spool directory for WAL files. Defaults to '/var/tmp/walrestore' -P, --partial : retrieve also partial WAL files (.partial) -z, --gzip : transfer the WAL files compressed with gzip -j, --bzip2 : transfer the WAL files compressed with bzip2 -c *CONFIG*, --config *CONFIG* : configuration file on the Barman server -t, --test : test both the connection and the configuration of the requested PostgreSQL server in Barman to make sure it is ready to receive WAL files. With this option, the 'WAL_NAME' and 'WAL\_DEST' mandatory arguments are ignored. # EXIT STATUS 0 : Success 1 : The remote `get-wal` command failed, most likely because the requested WAL could not be found. 2 : The SSH connection to the Barman server failed. Other non-zero codes : Failure # SEE ALSO `barman` (1), `barman` (5). # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman-cloud-check-wal-archive.1.md0000644000621200062120000000726214172556763020154 0ustar 00000000000000% BARMAN-CLOUD-CHECK-WAL-ARCHIVE(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-cloud-check-wal-archive - Check a WAL archive destination for a new PostgreSQL cluster # SYNOPSIS barman-cloud-check-wal-archive [*OPTIONS*] *SOURCE_URL* *SERVER_NAME* # DESCRIPTION Check that the WAL archive destination for *SERVER_NAME* is safe to use for a new PostgreSQL cluster. With no optional args (the default) this check will pass if the WAL archive is empty or if the target bucket cannot be found. All other conditions will result in failure. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. # POSITIONAL ARGUMENTS SOURCE_URL : URL of the cloud source, such as a bucket in AWS S3. For example: `s3://BUCKET_NAME/path/to/folder` (where `BUCKET_NAME` is the bucket you have created in AWS). SERVER_NAME : the name of the server as configured in Barman. # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -v, --verbose : increase output verbosity (e.g., -vv is more than -v) -q, --quiet : decrease output verbosity (e.g., -qq is less than -q) -t, --test : test connectivity to the cloud destination and exit --timeline : A positive integer specifying the earliest timeline for which associated WALs should cause the check to fail. The check will pass if all WAL content in the archive relates to earlier timelines. If any WAL files are on this timeline or greater then the check will fail. --cloud-provider {aws-s3,azure-blob-storage} : the cloud provider to which the backup should be uploaded -P, --profile : profile name (e.g. INI section in AWS credentials file) --endpoint-url : override the default S3 URL construction mechanism by specifying an endpoint. --credential {azure-cli,managed-identity} : optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. # REFERENCES For Boto: * https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html For AWS: * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html. For Azure Blob Storage: * https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-data-operations-cli#set-environment-variables-for-authorization-parameters * https://docs.microsoft.com/en-us/python/api/azure-storage-blob/?view=azure-python # DEPENDENCIES If using `--cloud-provider=aws-s3`: * boto3 If using `--cloud-provider=azure-blob-storage`: * azure-storage-blob * azure-identity (optional, if you wish to use DefaultAzureCredential) # EXIT STATUS 0 : Success 1 : Failure 2 : The connection to the cloud provider failed 3 : There was an error in the command input Other non-zero codes : Error running the check # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman-cloud-backup.10000644000621200062120000001465514172556763015551 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-CLOUD\-BACKUP" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-cloud\-backup \- Backup a PostgreSQL instance and stores it in the Cloud .SH SYNOPSIS .PP barman\-cloud\-backup [\f[I]OPTIONS\f[]] \f[I]DESTINATION_URL\f[] \f[I]SERVER_NAME\f[] .SH DESCRIPTION .PP This script can be used to perform a backup of a local PostgreSQL instance and ship the resulting tarball(s) to the Cloud. Currently AWS S3 and Azure Blob Storage are supported. .PP It requires read access to PGDATA and tablespaces (normally run as \f[C]postgres\f[] user). It can also be used as a hook script on a barman server, in which case it requires read access to the directory where barman backups are stored. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .PP \f[B]IMPORTANT:\f[] the Cloud upload process may fail if any file with a size greater than the configured \f[C]\-\-max\-archive\-size\f[] is present either in the data directory or in any tablespaces. However, PostgreSQL creates files with a maximum size of 1GB, and that size is always allowed, regardless of the \f[C]max\-archive\-size\f[] parameter. .SH POSITIONAL ARGUMENTS .TP .B DESTINATION_URL URL of the cloud destination, such as a bucket in AWS S3. For example: \f[C]s3://BUCKET_NAME/path/to/folder\f[] (where \f[C]BUCKET_NAME\f[] is the bucket you have created in AWS). .RS .RE .TP .B SERVER_NAME the name of the server as configured in Barman. .RS .RE .SH OPTIONS .TP .B \-h, \[en]help show a help message and exit .RS .RE .TP .B \-V, \[en]version show program's version number and exit .RS .RE .TP .B \-v, \[en]verbose increase output verbosity (e.g., \-vv is more than \-v) .RS .RE .TP .B \-q, \[en]quiet decrease output verbosity (e.g., \-qq is less than \-q) .RS .RE .TP .B \-t, \[en]test test connectivity to the cloud destination and exit .RS .RE .TP .B \-z, \[en]gzip gzip\-compress the tar files when uploading to the cloud .RS .RE .TP .B \-j, \[en]bzip2 bzip2\-compress the tar files when uploading to the cloud .RS .RE .TP .B \[en]snappy snappy\-compress the tar files when uploading to the cloud (requires optional python\-snappy library) .RS .RE .TP .B \-d, \[en]dbname database name or conninfo string for Postgres connection (default: postgres) .RS .RE .TP .B \-h, \[en]host host or Unix socket for PostgreSQL connection (default: libpq settings) .RS .RE .TP .B \-p, \[en]port port for PostgreSQL connection (default: libpq settings) .RS .RE .TP .B \-U, \[en]user user name for PostgreSQL connection (default: libpq settings) .RS .RE .TP .B \[en]immediate\-checkpoint forces the initial checkpoint to be done as quickly as possible .RS .RE .TP .B \-J JOBS, \[en]jobs JOBS number of subprocesses to upload data to cloud storage (default: 2) .RS .RE .TP .B \-S MAX_ARCHIVE_SIZE, \[en]max\-archive\-size MAX_ARCHIVE_SIZE maximum size of an archive when uploading to cloud storage (default: 100GB) .RS .RE .TP .B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage} the cloud provider to which the backup should be uploaded .RS .RE .TP .B \[en]tags KEY1,VALUE1 KEY2,VALUE2 \&... a space\-separated list of comma\-separated key\-value pairs representing tags to be added to each object created in cloud storage .RS .RE .TP .B \-P, \[en]profile profile name (e.g.\ INI section in AWS credentials file) .RS .RE .TP .B \[en]endpoint\-url override the default S3 URL construction mechanism by specifying an endpoint .RS .RE .TP .B \-e, \[en]encryption the encryption algorithm used when storing the uploaded data in S3 Allowed values: `AES256'|`aws:kms' .RS .RE .TP .B \[en]encryption\-scope the name of an encryption scope defined in the Azure Blob Storage service which is to be used to encrypt the data in Azure .RS .RE .TP .B \[en]credential {azure\-cli,managed\-identity} optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. .RS .RE .SH REFERENCES .PP For Boto: .IP \[bu] 2 https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html .PP For AWS: .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-set\-up.html .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-started.html. .PP For Azure Blob Storage: .IP \[bu] 2 https://docs.microsoft.com/en\-us/azure/storage/blobs/authorize\-data\-operations\-cli#set\-environment\-variables\-for\-authorization\-parameters .IP \[bu] 2 https://docs.microsoft.com/en\-us/python/api/azure\-storage\-blob/?view=azure\-python .PP For libpq settings information: .IP \[bu] 2 https://www.postgresql.org/docs/current/libpq\-envars.html .SH DEPENDENCIES .PP If using \f[C]\-\-cloud\-provider=aws\-s3\f[]: .IP \[bu] 2 boto3 .PP If using \f[C]\-\-cloud\-provider=azure\-blob\-storage\f[]: .IP \[bu] 2 azure\-storage\-blob .IP \[bu] 2 azure\-identity (optional, if you wish to use DefaultAzureCredential) .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B 1 The backup was not successful .RS .RE .TP .B 2 The connection to the cloud provider failed .RS .RE .TP .B 3 There was an error in the command input .RS .RE .TP .B Other non\-zero codes Failure .RS .RE .SH SEE ALSO .PP This script can be used in conjunction with \f[C]post_backup_script\f[] or \f[C]post_backup_retry_script\f[] to relay barman backups to cloud storage as follows: .IP .nf \f[C] post_backup_retry_script\ =\ \[aq]barman\-cloud\-backup\ [*OPTIONS*]\ *DESTINATION_URL*\ ${BARMAN_SERVER}\[aq] \f[] .fi .PP When running as a hook script, barman\-cloud\-backup will read the location of the backup directory and the backup ID from BACKUP_DIR and BACKUP_ID environment variables set by barman. .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman.d/0000755000621200062120000000000014172556766013332 5ustar 00000000000000barman-2.18/doc/barman.d/passive-server.conf-template0000644000621200062120000000166414172556763020774 0ustar 00000000000000; Barman, Backup and Recovery Manager for PostgreSQL ; http://www.pgbarman.org/ - http://www.enterprisedb.com/ ; ; Template configuration file for a server using ; SSH connections and rsync for copy. ; [passive] ; Human readable description description = "Example of a Barman passive server" ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Passive server configuration ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Local parameter that identifies a barman server as 'passive'. ; A passive node uses as source for backups another barman server ; instead of a PostgreSQL cluster. ; If a primary ssh command is specified, barman will use it to establish a ; connection with the barman "master" server. ; Empty by default it can be also set as global value. primary_ssh_command = ssh barman@backup ; Incremental backup settings ;reuse_backup = link ; Compression: must be identical to the source ;compression = gzip barman-2.18/doc/barman.d/streaming-server.conf-template0000644000621200062120000000272314172556763021310 0ustar 00000000000000; Barman, Backup and Recovery Manager for PostgreSQL ; http://www.pgbarman.org/ - http://www.enterprisedb.com/ ; ; Template configuration file for a server using ; only streaming replication protocol ; [streaming] ; Human readable description description = "Example of PostgreSQL Database (Streaming-Only)" ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PostgreSQL connection string (mandatory) ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; conninfo = host=pg user=barman dbname=postgres ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PostgreSQL streaming connection string ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; To be used by pg_basebackup for backup and pg_receivewal for WAL streaming ; NOTE: streaming_barman is a regular user with REPLICATION privilege streaming_conninfo = host=pg user=streaming_barman ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Backup settings (via pg_basebackup) ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; backup_method = postgres ;streaming_backup_name = barman_streaming_backup ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; WAL streaming settings (via pg_receivewal) ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; streaming_archiver = on slot_name = barman ;create_slot = auto ;streaming_archiver_name = barman_receive_wal ;streaming_archiver_batch_size = 50 ; PATH setting for this server ;path_prefix = "/usr/pgsql-12/bin" barman-2.18/doc/barman.d/ssh-server.conf-template0000644000621200062120000000303614172556763020112 0ustar 00000000000000; Barman, Backup and Recovery Manager for PostgreSQL ; http://www.pgbarman.org/ - http://www.enterprisedb.com/ ; ; Template configuration file for a server using ; SSH connections and rsync for copy. ; [ssh] ; Human readable description description = "Example of PostgreSQL Database (via SSH)" ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; SSH options (mandatory) ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ssh_command = ssh postgres@pg ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PostgreSQL connection string (mandatory) ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; conninfo = host=pg user=barman dbname=postgres ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Backup settings (via rsync over SSH) ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; backup_method = rsync ; Incremental backup support: possible values are None (default), link or copy ;reuse_backup = link ; Identify the standard behavior for backup operations: possible values are ; exclusive_backup (default), concurrent_backup ; concurrent_backup is the preferred method with PostgreSQL >= 9.6 backup_options = exclusive_backup ; Number of parallel workers to perform file copy during backup and recover ;parallel_jobs = 1 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Continuous WAL archiving (via 'archive_command') ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; archiver = on ;archiver_batch_size = 50 ; PATH setting for this server ;path_prefix = "/usr/pgsql-12/bin" barman-2.18/doc/barman-wal-archive.1.md0000644000621200062120000000415714172556763015775 0ustar 00000000000000% BARMAN-WAL-ARCHIVE(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-wal-archive - `archive_command` based on Barman's put-wal # SYNOPSIS barman-wal-archive [*OPTIONS*] *BARMAN_HOST* *SERVER_NAME* *WAL_PATH* # DESCRIPTION This script can be used in the `archive_command` of a PostgreSQL server to ship WAL files to a Barman host using the 'put-wal' command (introduced in Barman 2.6). An SSH connection will be opened to the Barman host. `barman-wal-archive` allows the integration of Barman in PostgreSQL clusters for better business continuity results. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. # POSITIONAL ARGUMENTS BARMAN_HOST : the host of the Barman server. SERVER_NAME : the server name configured in Barman from which WALs are taken. WAL_PATH : the value of the '%p' keyword (according to 'archive_command'). # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -U *USER*, --user *USER* : the user used for the ssh connection to the Barman server. Defaults to 'barman'. -c *CONFIG*, --config *CONFIG* : configuration file on the Barman server -t, --test : test both the connection and the configuration of the requested PostgreSQL server in Barman for WAL retrieval. With this option, the 'WAL_PATH' mandatory argument is ignored. # EXIT STATUS 0 : Success Not zero : Failure # SEE ALSO `barman` (1), `barman` (5). # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/doc/barman-wal-restore.10000644000621200062120000000670314172556763015437 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-WAL\-RESTORE" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-wal\-restore \- \[aq]restore_command\[aq] based on Barman\[aq]s get\-wal .SH SYNOPSIS .PP barman\-wal\-restore [\f[I]OPTIONS\f[]] \f[I]BARMAN_HOST\f[] \f[I]SERVER_NAME\f[] \f[I]WAL_NAME\f[] \f[I]WAL_DEST\f[] .SH DESCRIPTION .PP This script can be used as a \[aq]restore_command\[aq] for PostgreSQL servers, retrieving WAL files using the \[aq]get\-wal\[aq] feature of Barman. An SSH connection will be opened to the Barman host. \f[C]barman\-wal\-restore\f[] allows the integration of Barman in PostgreSQL clusters for better business continuity results. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .SH POSITIONAL ARGUMENTS .TP .B BARMAN_HOST the host of the Barman server. .RS .RE .TP .B SERVER_NAME the server name configured in Barman from which WALs are taken. .RS .RE .TP .B WAL_NAME the value of the \[aq]%f\[aq] keyword (according to \[aq]restore_command\[aq]). .RS .RE .TP .B WAL_DEST the value of the \[aq]%p\[aq] keyword (according to \[aq]restore_command\[aq]). .RS .RE .SH OPTIONS .TP .B \-h, \-\-help show a help message and exit .RS .RE .TP .B \-V, \-\-version show program\[aq]s version number and exit .RS .RE .TP .B \-U \f[I]USER\f[], \-\-user \f[I]USER\f[] the user used for the ssh connection to the Barman server. Defaults to \[aq]barman\[aq]. .RS .RE .TP .B \-s \f[I]SECONDS\f[], \-\-sleep \f[I]SECONDS\f[] sleep for SECONDS after a failure of get\-wal request. Defaults to 0 (nowait). .RS .RE .TP .B \-p \f[I]JOBS\f[], \-\-parallel \f[I]JOBS\f[] specifies the number of files to peek and transfer in parallel, defaults to 0 (disabled). .RS .RE .TP .B \-\-spool\-dir \f[I]SPOOL_DIR\f[] Specifies spool directory for WAL files. Defaults to \[aq]/var/tmp/walrestore\[aq] .RS .RE .TP .B \-P, \-\-partial retrieve also partial WAL files (.partial) .RS .RE .TP .B \-z, \-\-gzip transfer the WAL files compressed with gzip .RS .RE .TP .B \-j, \-\-bzip2 transfer the WAL files compressed with bzip2 .RS .RE .TP .B \-c \f[I]CONFIG\f[], \-\-config \f[I]CONFIG\f[] configuration file on the Barman server .RS .RE .TP .B \-t, \-\-test test both the connection and the configuration of the requested PostgreSQL server in Barman to make sure it is ready to receive WAL files. With this option, the \[aq]WAL_NAME\[aq] and \[aq]WAL_DEST\[aq] mandatory arguments are ignored. .RS .RE .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B 1 The remote \f[C]get\-wal\f[] command failed, most likely because the requested WAL could not be found. .RS .RE .TP .B 2 The SSH connection to the Barman server failed. .RS .RE .TP .B Other non\-zero codes Failure .RS .RE .SH SEE ALSO .PP \f[C]barman\f[] (1), \f[C]barman\f[] (5). .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman-cloud-backup-list.10000644000621200062120000000742414172556763016516 0ustar 00000000000000.\" Automatically generated by Pandoc 2.2.1 .\" .TH "BARMAN\-CLOUD\-BACKUP\-LIST" "1" "January 21, 2022" "Barman User manuals" "Version 2.18" .hy .SH NAME .PP barman\-cloud\-backup\-list \- List backups stored in the Cloud .SH SYNOPSIS .PP barman\-cloud\-backup\-list [\f[I]OPTIONS\f[]] \f[I]SOURCE_URL\f[] \f[I]SERVER_NAME\f[] .SH DESCRIPTION .PP This script can be used to list backups previously made with \f[C]barman\-cloud\-backup\f[] command. Currently AWS S3 and Azure Blob Storage are supported. .PP This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. .SH POSITIONAL ARGUMENTS .TP .B SOURCE_URL URL of the cloud source, such as a bucket in AWS S3. For example: \f[C]s3://BUCKET_NAME/path/to/folder\f[] (where \f[C]BUCKET_NAME\f[] is the bucket you have created in AWS). .RS .RE .TP .B SERVER_NAME the name of the server as configured in Barman. .RS .RE .SH OPTIONS .TP .B \-h, \[en]help show a help message and exit .RS .RE .TP .B \-V, \[en]version show program's version number and exit .RS .RE .TP .B \-v, \[en]verbose increase output verbosity (e.g., \-vv is more than \-v) .RS .RE .TP .B \-q, \[en]quiet decrease output verbosity (e.g., \-qq is less than \-q) .RS .RE .TP .B \-t, \[en]test test connectivity to the cloud destination and exit .RS .RE .TP .B \[en]format {json,console} output format (default: `console') .RS .RE .TP .B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage} the cloud provider to which the backup should be uploaded .RS .RE .TP .B \-P, \[en]profile profile name (e.g.\ INI section in AWS credentials file) .RS .RE .TP .B \[en]endpoint\-url override the default S3 URL construction mechanism by specifying an endpoint. .RS .RE .TP .B \[en]credential {azure\-cli,managed\-identity} optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. .RS .RE .SH REFERENCES .PP For Boto: .IP \[bu] 2 https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html .PP For AWS: .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-set\-up.html .IP \[bu] 2 http://docs.aws.amazon.com/cli/latest/userguide/cli\-chap\-getting\-started.html. .PP For Azure Blob Storage: .IP \[bu] 2 https://docs.microsoft.com/en\-us/azure/storage/blobs/authorize\-data\-operations\-cli#set\-environment\-variables\-for\-authorization\-parameters .IP \[bu] 2 https://docs.microsoft.com/en\-us/python/api/azure\-storage\-blob/?view=azure\-python .SH DEPENDENCIES .PP If using \f[C]\-\-cloud\-provider=aws\-s3\f[]: .IP \[bu] 2 boto3 .PP If using \f[C]\-\-cloud\-provider=azure\-blob\-storage\f[]: .IP \[bu] 2 azure\-storage\-blob .IP \[bu] 2 azure\-identity (optional, if you wish to use DefaultAzureCredential) .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B 1 The list command was not successful .RS .RE .TP .B 2 The connection to the cloud provider failed .RS .RE .TP .B 3 There was an error in the command input .RS .RE .TP .B Other non\-zero codes Failure .RS .RE .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Github issue tracker. .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .IP \[bu] 2 Professional support: .SH COPYING .PP Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. .PP © Copyright EnterpriseDB UK Limited 2011\-2022 .SH AUTHORS EnterpriseDB . barman-2.18/doc/barman-cloud-backup-list.1.md0000644000621200062120000000643614172556763017117 0ustar 00000000000000% BARMAN-CLOUD-BACKUP-LIST(1) Barman User manuals | Version 2.18 % EnterpriseDB % January 21, 2022 # NAME barman-cloud-backup-list - List backups stored in the Cloud # SYNOPSIS barman-cloud-backup-list [*OPTIONS*] *SOURCE_URL* *SERVER_NAME* # DESCRIPTION This script can be used to list backups previously made with `barman-cloud-backup` command. Currently AWS S3 and Azure Blob Storage are supported. This script and Barman are administration tools for disaster recovery of PostgreSQL servers written in Python and maintained by EnterpriseDB. # POSITIONAL ARGUMENTS SOURCE_URL : URL of the cloud source, such as a bucket in AWS S3. For example: `s3://BUCKET_NAME/path/to/folder` (where `BUCKET_NAME` is the bucket you have created in AWS). SERVER_NAME : the name of the server as configured in Barman. # OPTIONS -h, --help : show a help message and exit -V, --version : show program's version number and exit -v, --verbose : increase output verbosity (e.g., -vv is more than -v) -q, --quiet : decrease output verbosity (e.g., -qq is less than -q) -t, --test : test connectivity to the cloud destination and exit --format {json,console} : output format (default: 'console') --cloud-provider {aws-s3,azure-blob-storage} : the cloud provider to which the backup should be uploaded -P, --profile : profile name (e.g. INI section in AWS credentials file) --endpoint-url : override the default S3 URL construction mechanism by specifying an endpoint. --credential {azure-cli,managed-identity} : optionally specify the type of credential to use when authenticating with Azure Blob Storage. If omitted then the credential will be obtained from the environment. If no credentials can be found in the environment then the default Azure authentication flow will be used. # REFERENCES For Boto: * https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html For AWS: * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html * http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html. For Azure Blob Storage: * https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-data-operations-cli#set-environment-variables-for-authorization-parameters * https://docs.microsoft.com/en-us/python/api/azure-storage-blob/?view=azure-python # DEPENDENCIES If using `--cloud-provider=aws-s3`: * boto3 If using `--cloud-provider=azure-blob-storage`: * azure-storage-blob * azure-identity (optional, if you wish to use DefaultAzureCredential) # EXIT STATUS 0 : Success 1 : The list command was not successful 2 : The connection to the cloud provider failed 3 : There was an error in the command input Other non-zero codes : Failure # BUGS Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. Any bug can be reported via the Github issue tracker. # RESOURCES * Homepage: * Documentation: * Professional support: # COPYING Barman is the property of EnterpriseDB UK Limited and its code is distributed under GNU General Public License v3. © Copyright EnterpriseDB UK Limited 2011-2022 barman-2.18/LICENSE0000644000621200062120000010451314172556763012111 0ustar 00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . barman-2.18/setup.cfg0000644000621200062120000000043014172556766012721 0ustar 00000000000000[bdist_wheel] universal = 1 [aliases] test = pytest [isort] known_first_party = barman known_third_party = setuptools distutils argcomplete dateutil psycopg2 mock pytest boto3 botocore sphinx sphinx_bootstrap_theme skip = .tox [egg_info] tag_build = tag_date = 0 barman-2.18/PKG-INFO0000644000621200062120000000275014172556766012204 0ustar 00000000000000Metadata-Version: 2.1 Name: barman Version: 2.18 Summary: Backup and Recovery Manager for PostgreSQL Home-page: https://www.pgbarman.org/ Author: EnterpriseDB Author-email: barman@enterprisedb.com License: GPL-3.0 Description: Barman (Backup and Recovery Manager) is an open-source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments to reduce risk and help DBAs during the recovery phase. Barman is distributed under GNU GPL 3 and maintained by EnterpriseDB. Platform: Linux Platform: Mac OS X Classifier: Environment :: Console Classifier: Development Status :: 5 - Production/Stable Classifier: Topic :: System :: Archiving :: Backup Classifier: Topic :: Database Classifier: Topic :: System :: Recovery Tools Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+) Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Provides-Extra: azure Provides-Extra: snappy Provides-Extra: cloud barman-2.18/scripts/0000755000621200062120000000000014172556766012572 5ustar 00000000000000barman-2.18/scripts/barman.bash_completion0000644000621200062120000000014214172556763017114 0ustar 00000000000000eval "$((register-python-argcomplete3 barman || register-python-argcomplete barman) 2>/dev/null)" barman-2.18/setup.py0000755000621200062120000001110614172556763012614 0ustar 00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # barman - Backup and Recovery Manager for PostgreSQL # # © Copyright EnterpriseDB UK Limited 2011-2022 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Backup and Recovery Manager for PostgreSQL Barman (Backup and Recovery Manager) is an open-source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments to reduce risk and help DBAs during the recovery phase. Barman is distributed under GNU GPL 3 and maintained by EnterpriseDB. """ import sys from setuptools import find_packages, setup if sys.version_info < (2, 7): raise SystemExit("ERROR: Barman needs at least python 2.7 to work") # Depend on pytest_runner only when the tests are actually invoked needs_pytest = set(["pytest", "test"]).intersection(sys.argv) pytest_runner = ["pytest_runner"] if needs_pytest else [] setup_requires = pytest_runner install_requires = [ "psycopg2 >= 2.4.2", "python-dateutil", "argcomplete", ] barman = {} with open("barman/version.py", "r") as fversion: exec(fversion.read(), barman) setup( name="barman", version=barman["__version__"], author="EnterpriseDB", author_email="barman@enterprisedb.com", url="https://www.pgbarman.org/", packages=find_packages(exclude=["tests"]), data_files=[ ( "share/man/man1", [ "doc/barman.1", "doc/barman-cloud-backup.1", "doc/barman-cloud-backup-keep.1", "doc/barman-cloud-backup-list.1", "doc/barman-cloud-backup-delete.1", "doc/barman-cloud-check-wal-archive.1", "doc/barman-cloud-restore.1", "doc/barman-cloud-wal-archive.1", "doc/barman-cloud-wal-restore.1", "doc/barman-wal-archive.1", "doc/barman-wal-restore.1", ], ), ("share/man/man5", ["doc/barman.5"]), ], entry_points={ "console_scripts": [ "barman=barman.cli:main", "barman-cloud-backup=barman.clients.cloud_backup:main", "barman-cloud-wal-archive=barman.clients.cloud_walarchive:main", "barman-cloud-restore=barman.clients.cloud_restore:main", "barman-cloud-wal-restore=barman.clients.cloud_walrestore:main", "barman-cloud-backup-delete=barman.clients.cloud_backup_delete:main", "barman-cloud-backup-keep=barman.clients.cloud_backup_keep:main", "barman-cloud-backup-list=barman.clients.cloud_backup_list:main", "barman-cloud-check-wal-archive=barman.clients.cloud_check_wal_archive:main", "barman-wal-archive=barman.clients.walarchive:main", "barman-wal-restore=barman.clients.walrestore:main", ], }, license="GPL-3.0", description=__doc__.split("\n")[0], long_description="\n".join(__doc__.split("\n")[2:]), install_requires=install_requires, extras_require={ "cloud": ["boto3"], "azure": ["azure-identity", "azure-storage-blob"], "snappy": ["python-snappy >= 0.6.0"], }, platforms=["Linux", "Mac OS X"], classifiers=[ "Environment :: Console", "Development Status :: 5 - Production/Stable", "Topic :: System :: Archiving :: Backup", "Topic :: Database", "Topic :: System :: Recovery Tools", "Intended Audience :: System Administrators", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], setup_requires=setup_requires, )