barman-1.5.1/0000755000076500000240000000000012621417067012261 5ustar mnenciastaffbarman-1.5.1/AUTHORS0000644000076500000240000000227012565622565013342 0ustar mnenciastaffBarman Core Team (in alphabetical order): * Gabriele Bartolini (project leader) * Stefano Bianucci (developer) * Giuseppe Broccolo (QA/testing) * Giulio Calacoci (developer) * Francesco Canovai (QA/testing) * Gianni Ciolli (QA/testing) * Marco Nenciarini (lead developer) Past contributors: * Carlo Ascani Many thanks go to our sponsors (in alphabetical order): * 4Caast - http://4caast.morfeo-project.org/ (Founding sponsor) * Adyen - http://www.adyen.com/ * Agile Business Group - http://www.agilebg.com/ * BIJ12 - http://www.bij12.nl/ * CSI Piemonte - http://www.csipiemonte.it/ (Founding sponsor) * Ecometer - http://www.ecometer.it/ * GestionaleAuto - http://www.gestionaleauto.com/ (Founding sponsor) * Jobrapido - http://www.jobrapido.com/ * Navionics - http://www.navionics.com/ (Founding sponsor) * Sovon Vogelonderzoek Nederland - https://www.sovon.nl/ * Subito.it - http://www.subito.it/ * XCon Internet Services - http://www.xcon.it/ (Founding sponsor) barman-1.5.1/barman/0000755000076500000240000000000012621417067013521 5ustar mnenciastaffbarman-1.5.1/barman/__init__.py0000644000076500000240000000154012602321601015615 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ The main Barman module """ from __future__ import absolute_import from .version import __version__ __config__ = None __all__ = ['__version__', '__config__'] barman-1.5.1/barman/backup.py0000644000076500000240000011553412621362541015345 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module represents a backup. """ from glob import glob import datetime import logging import os import shutil import time import dateutil.parser import dateutil.tz from barman.infofile import WalFileInfo, BackupInfo, UnknownBackupIdException from barman import xlog, output from barman.command_wrappers import DataTransferFailure from barman.compression import CompressionManager, CompressionIncompatibility from barman.hooks import HookScriptRunner, RetryHookScriptRunner, \ AbortedRetryHookScript from barman.utils import human_readable_timedelta, mkpath, pretty_size, \ fsync_dir from barman.config import BackupOptions from barman.backup_executor import RsyncBackupExecutor from barman.recovery_executor import RecoveryExecutor _logger = logging.getLogger(__name__) class BackupManager(object): """Manager of the backup archive for a server""" DEFAULT_STATUS_FILTER = (BackupInfo.DONE,) def __init__(self, server): """ Constructor """ self.name = "default" self.server = server self.config = server.config self._backup_cache = None self.compression_manager = CompressionManager(self.config) self.executor = RsyncBackupExecutor(self) def get_available_backups(self, status_filter=DEFAULT_STATUS_FILTER): """ Get a list of available backups :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup list returned """ # If the filter is not a tuple, create a tuple using the filter if not isinstance(status_filter, tuple): status_filter = tuple(status_filter,) # Load the cache if necessary if self._backup_cache is None: self._load_backup_cache() # Filter the cache using the status filter tuple backups = {} for key, value in self._backup_cache.iteritems(): if value.status in status_filter: backups[key] = value return backups def _load_backup_cache(self): """ Populate the cache of the available backups, reading information from disk. """ self._backup_cache = {} # Load all the backups from disk reading the backup.info files for filename in glob("%s/*/backup.info" % self.config.basebackups_directory): backup = BackupInfo(self.server, filename) self._backup_cache[backup.backup_id] = backup def backup_cache_add(self, backup_info): """ Register a BackupInfo object to the backup cache. NOTE: Initialise the cache - in case it has not been done yet :param barman.infofile.BackupInfo backup_info: the object we want to register in the cache """ # Load the cache if needed if self._backup_cache is None: self._load_backup_cache() # Insert the BackupInfo object into the cache self._backup_cache[backup_info.backup_id] = backup_info def backup_cache_remove(self, backup_info): """ Remove a BackupInfo object from the backup cache This method _must_ be called after removing the object from disk. :param barman.infofile.BackupInfo backup_info: the object we want to remove from the cache """ # Nothing to do if the cache is not loaded if self._backup_cache is None: return # Remove the BackupInfo object from the backups cache del self._backup_cache[backup_info.backup_id] def get_backup(self, backup_id): """ Return the backup information for the given backup id. If the backup_id is None or backup.info file doesn't exists, it returns None. :param str|None backup_id: the ID of the backup to return :rtype: BackupInfo|None """ if backup_id is not None: # Get all the available backups from the cache available_backups = self.get_available_backups( BackupInfo.STATUS_ALL) # Return the BackupInfo if present, or None return available_backups.get(backup_id) return None def get_previous_backup(self, backup_id, status_filter=DEFAULT_STATUS_FILTER): """ Get the previous backup (if any) in the catalog :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup returned """ if not isinstance(status_filter, tuple): status_filter = tuple(status_filter) backup = BackupInfo(self.server, backup_id=backup_id) available_backups = self.get_available_backups(status_filter + (backup.status,)) ids = sorted(available_backups.keys()) try: current = ids.index(backup_id) while current > 0: res = available_backups[ids[current - 1]] if res.status in status_filter: return res current -= 1 return None except ValueError: raise UnknownBackupIdException('Could not find backup_id %s' % backup_id) def get_next_backup(self, backup_id, status_filter=DEFAULT_STATUS_FILTER): """ Get the next backup (if any) in the catalog :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup returned """ if not isinstance(status_filter, tuple): status_filter = tuple(status_filter) backup = BackupInfo(self.server, backup_id=backup_id) available_backups = self.get_available_backups(status_filter + (backup.status,)) ids = sorted(available_backups.keys()) try: current = ids.index(backup_id) while current < (len(ids) - 1): res = available_backups[ids[current + 1]] if res.status in status_filter: return res current += 1 return None except ValueError: raise UnknownBackupIdException('Could not find backup_id %s' % backup_id) def get_last_backup(self, status_filter=DEFAULT_STATUS_FILTER): """ Get the last backup (if any) in the catalog :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup returned """ available_backups = self.get_available_backups(status_filter) if len(available_backups) == 0: return None ids = sorted(available_backups.keys()) return ids[-1] def get_first_backup(self, status_filter=DEFAULT_STATUS_FILTER): """ Get the first backup (if any) in the catalog :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup returned """ available_backups = self.get_available_backups(status_filter) if len(available_backups) == 0: return None ids = sorted(available_backups.keys()) return ids[0] def delete_backup(self, backup): """ Delete a backup :param backup: the backup to delete """ available_backups = self.get_available_backups() # Honour minimum required redundancy if backup.status == BackupInfo.DONE and \ self.server.config.minimum_redundancy >= len(available_backups): output.warning("Skipping delete of backup %s for server %s due to " "minimum redundancy requirements " "(minimum redundancy = %s, current redundancy = %s)", backup.backup_id, self.config.name, len(available_backups), self.server.config.minimum_redundancy) return output.info("Deleting backup %s for server %s", backup.backup_id, self.config.name) previous_backup = self.get_previous_backup(backup.backup_id) next_backup = self.get_next_backup(backup.backup_id) # Delete all the data contained in the backup try: self.delete_backup_data(backup) except OSError as e: output.error("Failure deleting backup %s for server %s.\n%s", backup.backup_id, self.config.name, e) return # Check if we are deleting the first available backup if not previous_backup: # In the case of exclusive backup (default), removes any WAL # files associated to the backup being deleted. # In the case of concurrent backup, removes only WAL files # prior to the start of the backup being deleted, as they # might be useful to any concurrent backup started immediately # after. remove_until = None # means to remove all WAL files if next_backup: remove_until = next_backup elif BackupOptions.CONCURRENT_BACKUP in self.config.backup_options: remove_until = backup output.info("Delete associated WAL segments:") for name in self.remove_wal_before_backup(remove_until): output.info("\t%s", name) # As last action, remove the backup directory, # ending the delete operation try: self.delete_basebackup(backup) except OSError as e: output.error("Failure deleting backup %s for server %s.\n%s\n" "Please manually remove the '%s' directory", backup.backup_id, self.config.name, e, backup.get_basebackup_directory()) return self.backup_cache_remove(backup) output.info("Done") def retry_backup_copy(self, target_function, *args, **kwargs): """ Execute the target backup copy function, retrying the configured number of times :param target_function: the base backup target function :param args: args for the target function :param kwargs: kwargs of the target function :return: the result of the target function """ attempts = 0 while True: try: # if is not the first attempt, output the retry number if attempts >= 1: output.warning("Copy of base backup: retry #%s", attempts) # execute the target function for backup copy return target_function(*args, **kwargs) # catch rsync errors except DataTransferFailure, e: # exit condition: if retry number is lower than configured retry # limit, try again; otherwise exit. if attempts < self.config.basebackup_retry_times: # Log the exception, for debugging purpose _logger.exception("Failure in base backup copy: %s", e) output.warning( "Copy of base backup failed, waiting for next " "attempt in %s seconds", self.config.basebackup_retry_sleep) # sleep for configured time. then try again time.sleep(self.config.basebackup_retry_sleep) attempts += 1 else: # if the max number of attempts is reached an there is still # an error, exit re-raising the exception. raise def backup(self): """ Performs a backup for the server """ _logger.debug("initialising backup information") self.executor.init() backup_info = None try: # Create the BackupInfo object representing the backup backup_info = BackupInfo( self.server, backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S')) backup_info.save() self.backup_cache_add(backup_info) output.info( "Starting backup for server %s in %s", self.config.name, backup_info.get_basebackup_directory()) # Run the pre-backup-script if present. script = HookScriptRunner(self, 'backup_script', 'pre') script.env_from_backup_info(backup_info) script.run() # Run the pre-backup-retry-script if present. retry_script = RetryHookScriptRunner( self, 'backup_retry_script', 'pre') retry_script.env_from_backup_info(backup_info) retry_script.run() # Do the backup using the BackupExecutor self.executor.backup(backup_info) # Compute backup size and fsync it on disk self.backup_fsync_and_set_sizes(backup_info) # Mark the backup as DONE backup_info.set_attribute("status", "DONE") # Use BaseException instead of Exception to catch events like # KeyboardInterrupt (e.g.: CRTL-C) except BaseException, e: msg_lines = str(e).strip().splitlines() if backup_info: # Use only the first line of exception message # in backup_info error field backup_info.set_attribute("status", "FAILED") # If the exception has no attached message use the raw type name if len(msg_lines) == 0: msg_lines = [type(e).__name__] backup_info.set_attribute( "error", "failure %s (%s)" % ( self.executor.current_action, msg_lines[0])) output.error("Backup failed %s.\nDETAILS: %s\n%s", self.executor.current_action, msg_lines[0], '\n'.join(msg_lines[1:])) else: output.info("Backup end at xlog location: %s (%s, %08X)", backup_info.end_xlog, backup_info.end_wal, backup_info.end_offset) output.info("Backup completed") finally: if backup_info: backup_info.save() # Run the post-backup-retry-script if present. try: retry_script = RetryHookScriptRunner( self, 'backup_retry_script', 'post') retry_script.env_from_backup_info(backup_info) retry_script.run() except AbortedRetryHookScript, e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning("Ignoring stop request after receiving " "abort (exit code %d) from post-backup " "retry hook script: %s", e.hook.exit_status, e.hook.script) # Run the post-backup-script if present. script = HookScriptRunner(self, 'backup_script', 'post') script.env_from_backup_info(backup_info) script.run() output.result('backup', backup_info) def recover(self, backup_info, dest, tablespaces=None, target_tli=None, target_time=None, target_xid=None, target_name=None, exclusive=False, remote_command=None): """ Performs a recovery of a backup :param barman.infofile.BackupInfo backup_info: the backup to recover :param str dest: the destination directory :param dict[str,str]|None tablespaces: a tablespace name -> location map (for relocation) :param str|None target_tli: the target timeline :param str|None target_time: the target time :param str|None target_xid: the target xid :param str|None target_name: the target name created previously with pg_create_restore_point() function call :param bool exclusive: whether the recovery is exclusive or not :param str|None remote_command: default None. The remote command to recover the base backup, in case of remote backup. """ # Archive every WAL files in the incoming directory of the server self.server.archive_wal(verbose=False) # Delegate the recovery operation to a RecoveryExecutor object executor = RecoveryExecutor(self) recovery_info = executor.recover(backup_info, dest, tablespaces, target_tli, target_time, target_xid, target_name, exclusive, remote_command) # Output recovery results output.result('recovery', recovery_info['results']) def archive_wal(self, verbose=True): """ Executes WAL maintenance operations, such as archiving and compression If verbose is set to False, outputs something only if there is at least one file :param bool verbose: report even if no actions """ found = False compressor = self.compression_manager.get_compressor() with self.server.xlogdb('a') as fxlogdb: if verbose: output.info("Processing xlog segments for %s", self.config.name, log=False) # Get the first available backup first_backup_id = self.get_first_backup(BackupInfo.STATUS_NOT_EMPTY) first_backup = self.server.get_backup(first_backup_id) for filename in sorted(glob( os.path.join(self.config.incoming_wals_directory, '*'))): if not found and not verbose: output.info("Processing xlog segments for %s", self.config.name, log=False) found = True # Create WAL Info object wal_info = WalFileInfo.from_file(filename, compression=None) # If there are no available backups ... if first_backup is None: # ... delete xlog segments only for exclusive backups if BackupOptions.CONCURRENT_BACKUP \ not in self.config.backup_options: # Skipping history files if not xlog.is_history_file(filename): output.info("\tNo base backup available." " Trashing file %s" " from server %s", wal_info.name, self.config.name) os.unlink(filename) continue # ... otherwise else: # ... delete xlog segments older than the first backup if wal_info.name < first_backup.begin_wal: # Skipping history files if not xlog.is_history_file(filename): output.info("\tOlder than first backup." " Trashing file %s" " from server %s", wal_info.name, self.config.name) os.unlink(filename) continue # Report to the user the WAL file we are archiving output.info("\t%s", os.path.basename(filename), log=False) _logger.info("Archiving %s/%s", self.config.name, os.path.basename(filename)) # Archive the WAL file try: self.cron_wal_archival(compressor, wal_info) except AbortedRetryHookScript as e: _logger.warning("Archiving of %s/%s aborted by " "pre_archive_retry_script." "Reason: %s" % (self.config.name, os.path.basename(), e)) return # Updates the information of the WAL archive with # the latest segments fxlogdb.write(wal_info.to_xlogdb_line()) # flush and fsync for every line fxlogdb.flush() os.fsync(fxlogdb.fileno()) if not found and verbose: output.info("\tno file found", log=False) def cron_retention_policy(self): """ Retention policy management """ if (self.server.enforce_retention_policies and self.config.retention_policy_mode == 'auto'): available_backups = self.get_available_backups( BackupInfo.STATUS_ALL) retention_status = self.config.retention_policy.report() for bid in sorted(retention_status.iterkeys()): if retention_status[bid] == BackupInfo.OBSOLETE: output.info( "Enforcing retention policy: removing backup %s for " "server %s" % (bid, self.config.name)) self.delete_backup(available_backups[bid]) def delete_basebackup(self, backup): """ Delete the basebackup dir of a given backup. :param barman.infofile.BackupInfo backup: the backup to delete """ backup_dir = backup.get_basebackup_directory() _logger.debug("Deleting base backup directory: %s" % backup_dir) shutil.rmtree(backup_dir) def delete_backup_data(self, backup): """ Delete the data contained in a given backup. :param barman.infofile.BackupInfo backup: the backup to delete """ if backup.tablespaces: if backup.backup_version == 2: tbs_dir = backup.get_basebackup_directory() else: tbs_dir = os.path.join(backup.get_data_directory(), 'pg_tblspc') for tablespace in backup.tablespaces: rm_dir = os.path.join(tbs_dir, str(tablespace.oid)) if os.path.exists(rm_dir): _logger.debug("Deleting tablespace %s directory: %s" % (tablespace.name, rm_dir)) shutil.rmtree(rm_dir) pg_data = backup.get_data_directory() if os.path.exists(pg_data): _logger.debug("Deleting PGDATA directory: %s" % pg_data) shutil.rmtree(pg_data) def delete_wal(self, wal_info): """ Delete a WAL segment, with the given WalFileInfo :param barman.infofile.WalFileInfo wal_info: the WAL to delete """ try: os.unlink(wal_info.fullpath(self.server)) try: os.removedirs(os.path.dirname(wal_info.fullpath(self.server))) except OSError: # This is not an error condition # We always try to remove the the trailing directories, # this means that hashdir is not empty. pass except OSError: _logger.warning('Expected WAL file %s not found during delete', wal_info.name, exc_info=1) def cron_wal_archival(self, compressor, wal_info): """ Archive a WAL segment from the incoming directory. This function returns a WalFileInfo object. :param compressor: the compressor for the file (if any) :param wal_info: WalFileInfo of the WAL file is being processed """ dest_file = wal_info.fullpath(self.server) dest_dir = os.path.dirname(dest_file) srcfile = os.path.join(self.config.incoming_wals_directory, wal_info.name) error = None try: # Run the pre_archive_script if present. script = HookScriptRunner(self, 'archive_script', 'pre') script.env_from_wal_info(wal_info, srcfile) script.run() # Run the pre_archive_retry_script if present. retry_script = RetryHookScriptRunner(self, 'archive_retry_script', 'pre') retry_script.env_from_wal_info(wal_info, srcfile) retry_script.run() mkpath(dest_dir) if compressor: compressor.compress(srcfile, dest_file) shutil.copystat(srcfile, dest_file) os.unlink(srcfile) else: shutil.move(srcfile, dest_file) # Execute fsync() on the archived WAL containing directory fsync_dir(dest_dir) # Execute fsync() also on the incoming directory fsync_dir(self.config.incoming_wals_directory) # Execute fsync() on the archived WAL file file_fd = os.open(dest_file, os.O_RDONLY) os.fsync(file_fd) os.close(file_fd) stat = os.stat(dest_file) wal_info.size = stat.st_size wal_info.compression = compressor and compressor.compression except Exception as e: # In case of failure save the exception for the post sripts error = e raise # Ensure the execution of the post_archive_retry_script and # the post_archive_script finally: # Run the post_archive_retry_script if present. try: retry_script = RetryHookScriptRunner(self, 'archive_retry_script', 'post') retry_script.env_from_wal_info(wal_info, dest_file, error) retry_script.run() except AbortedRetryHookScript, e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning("Ignoring stop request after receiving " "abort (exit code %d) from post-archive " "retry hook script: %s", e.hook.exit_status, e.hook.script) # Run the post_archive_script if present. script = HookScriptRunner(self, 'archive_script', 'post', error) script.env_from_wal_info(wal_info, dest_file) script.run() def check(self, check_strategy): """ This function does some checks on the server. :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # Check compression_setting parameter if self.config.compression and not self.compression_manager.check(): check_strategy.result(self.config.name, 'compression settings', False) else: status = True try: self.compression_manager.get_compressor() except CompressionIncompatibility, field: check_strategy.result('%s setting' % field, False) status = False check_strategy.result(self.config.name, 'compression settings', status) # Minimum redundancy checks no_backups = len(self.get_available_backups()) # Check minimum_redundancy_requirements parameter if no_backups < self.config.minimum_redundancy: status = False else: status = True check_strategy.result( self.config.name, 'minimum redundancy requirements', status, 'have %s backups, expected at least %s' % ( no_backups, self.config.minimum_redundancy)) # Execute additional checks defined by the BackupExecutor self.executor.check(check_strategy) def status(self): """ This function show the server status """ # get number of backups no_backups = len(self.get_available_backups()) output.result('status', self.config.name, "backups_number", "No. of available backups", no_backups) output.result('status', self.config.name, "first_backup", "First available backup", self.get_first_backup()) output.result('status', self.config.name, "last_backup", "Last available backup", self.get_last_backup()) # Minimum redundancy check. if number of backups minor than minimum # redundancy, fail. if no_backups < self.config.minimum_redundancy: output.result('status', self.config.name, "minimum_redundancy", "Minimum redundancy requirements", "FAILED (%s/%s)" % ( no_backups, self.config.minimum_redundancy)) else: output.result('status', self.config.name, "minimum_redundancy", "Minimum redundancy requirements", "satisfied (%s/%s)" % ( no_backups, self.config.minimum_redundancy)) # Output additional status defined by the BackupExecutor self.executor.status() def get_remote_status(self): """ Build additional remote status lines defined by the BackupManager. :rtype: dict[str, None] """ return self.executor.get_remote_status() def rebuild_xlogdb(self): """ Rebuild the whole xlog database guessing it from the archive content. """ from os.path import isdir, join output.info("Rebuilding xlogdb for server %s", self.config.name) root = self.config.wals_directory default_compression = self.config.compression wal_count = label_count = history_count = 0 # lock the xlogdb as we are about replacing it completely with self.server.xlogdb('w') as fxlogdb: xlogdb_new = fxlogdb.name + ".new" with open(xlogdb_new, 'w') as fxlogdb_new: for name in sorted(os.listdir(root)): # ignore the xlogdb and its lockfile if name.startswith(self.server.XLOG_DB): continue fullname = join(root, name) if isdir(fullname): # all relevant files are in subdirectories hash_dir = fullname for wal_name in sorted(os.listdir(hash_dir)): fullname = join(hash_dir, wal_name) if isdir(fullname): _logger.warning( 'unexpected directory ' 'rebuilding the wal database: %s', fullname) else: if xlog.is_wal_file(fullname): wal_count += 1 elif xlog.is_backup_file(fullname): label_count += 1 else: _logger.warning( 'unexpected file ' 'rebuilding the wal database: %s', fullname) continue wal_info = WalFileInfo.from_file( fullname, default_compression=default_compression) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: # only history files are here if xlog.is_history_file(fullname): history_count += 1 wal_info = WalFileInfo.from_file( fullname, default_compression=default_compression) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: _logger.warning( 'unexpected file ' 'rebuilding the wal database: %s', fullname) os.fsync(fxlogdb_new.fileno()) shutil.move(xlogdb_new, fxlogdb.name) fsync_dir(os.path.dirname(fxlogdb.name)) output.info('Done rebuilding xlogdb for server %s ' '(history: %s, backup_labels: %s, wal_file: %s)', self.config.name, history_count, label_count, wal_count) def remove_wal_before_backup(self, backup_info): """ Remove WAL files which have been archived before the start of the provided backup. If no backup_info is provided delete all available WAL files :param BackupInfo|None backup_info: the backup information structure :return list: a list of removed WAL files """ removed = [] with self.server.xlogdb() as fxlogdb: xlogdb_new = fxlogdb.name + ".new" with open(xlogdb_new, 'w') as fxlogdb_new: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if not xlog.is_any_xlog_file(wal_info.name): output.error( "invalid xlog segment name %r\n" "HINT: Please run \"barman rebuild-xlogdb %s\" " "to solve this issue", wal_info.name, self.config.name) continue # Keeps the WAL segment if it is a history file or later # than the given backup (the first available) if (xlog.is_history_file(wal_info.name) or (backup_info and wal_info.name >= backup_info.begin_wal)): fxlogdb_new.write(wal_info.to_xlogdb_line()) continue else: self.delete_wal(wal_info) removed.append(wal_info.name) fxlogdb_new.flush() os.fsync(fxlogdb_new.fileno()) shutil.move(xlogdb_new, fxlogdb.name) fsync_dir(os.path.dirname(fxlogdb.name)) return removed def validate_last_backup_maximum_age(self, last_backup_maximum_age): """ Evaluate the age of the last available backup in a catalogue. If the last backup is older than the specified time interval (age), the function returns False. If within the requested age interval, the function returns True. :param timedate.timedelta last_backup_maximum_age: time interval representing the maximum allowed age for the last backup in a server catalogue :return tuple: a tuple containing the boolean result of the check and auxiliary information about the last backup current age """ # Get the ID of the last available backup backup_id = self.get_last_backup() if backup_id: # Get the backup object backup = BackupInfo(self.server, backup_id=backup_id) now = datetime.datetime.now(dateutil.tz.tzlocal()) # Evaluate the point of validity validity_time = now - last_backup_maximum_age # Pretty print of a time interval (age) msg = human_readable_timedelta(now - backup.end_time) # If the backup end time is older than the point of validity, # return False, otherwise return true if backup.end_time < validity_time: return False, msg else: return True, msg else: # If no backup is available return false return False, "No available backups" def backup_fsync_and_set_sizes(self, backup_info): """ Fsync all files in a backup and set the actual size on disk of a backup. Also evaluate the deduplication ratio and the deduplicated size if applicable. :param barman.infofile.BackupInfo backup_info: the backup to update """ # Calculate the base backup size self.executor.current_action = "calculating backup size" _logger.debug(self.executor.current_action) backup_size = 0 deduplicated_size = 0 backup_dest = backup_info.get_basebackup_directory() for dir_path, _, file_names in os.walk(backup_dest): # execute fsync() on the containing directory fsync_dir(dir_path) # execute fsync() on all the contained files for filename in file_names: file_path = os.path.join(dir_path, filename) file_fd = os.open(file_path, os.O_RDONLY) file_stat = os.fstat(file_fd) backup_size += file_stat.st_size # Excludes hard links from real backup size if file_stat.st_nlink == 1: deduplicated_size += file_stat.st_size os.fsync(file_fd) os.close(file_fd) # Save size into BackupInfo object backup_info.set_attribute('size', backup_size) backup_info.set_attribute('deduplicated_size', deduplicated_size) if backup_info.size > 0: deduplication_ratio = 1 - (float( backup_info.deduplicated_size) / backup_info.size) else: deduplication_ratio = 0 if self.config.reuse_backup == 'link': output.info( "Backup size: %s. Actual size on disk: %s" " (-%s deduplication ratio)." % ( pretty_size(backup_info.size), pretty_size(backup_info.deduplicated_size), '{percent:.2%}'.format(percent=deduplication_ratio) )) else: output.info("Backup size: %s" % pretty_size(backup_info.size)) barman-1.5.1/barman/backup_executor.py0000644000076500000240000011044212621362541017254 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ Backup Executor module A Backup Executor is a class responsible for the execution of a backup. Specific implementations of backups are defined by classes that derive from BackupExecutor (e.g.: backup with rsync through Ssh). A BackupExecutor is invoked by the BackupManager for backup operations. """ from abc import ABCMeta, abstractmethod import logging import os import re import psycopg2 from barman.command_wrappers import RsyncPgData, CommandFailedException, \ Command, DataTransferFailure from barman.utils import mkpath from barman import output, xlog from barman.config import BackupOptions _logger = logging.getLogger(__name__) class SshCommandException(Exception): """ Error parsing ssh_command parameter """ class BackupExecutor(object): """ Abstract base class for any backup executors. """ __metaclass__ = ABCMeta def __init__(self, backup_manager): """ Base constructor :param barman.backup.BackupManager backup_manager: the BackupManager assigned to the executor """ self.backup_manager = backup_manager self.server = backup_manager.server self.config = backup_manager.config def init(self): """ Initialise the internal state of the backup executor """ @abstractmethod def backup(self, backup_info): """ Perform a backup for the server - invoked by BackupManager.backup() :param barman.infofile.BackupInfo backup_info: backup information """ def check(self, check_strategy): """ Perform additional checks - invoked by BackupManager.check() :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ def status(self): """ Set additional status info - invoked by BackupManager.status() """ def get_remote_status(self): """ Get additional remote status info - invoked by BackupManager.get_remote_status() :rtype: dict[str, str] """ return {} def _parse_ssh_command(ssh_command): """ Parse a user provided ssh command to a single command and a list of arguments In case of error, the first member of the result (the command) will be None :param ssh_command: a ssh command provided by the user :return tuple[str,list[str]]: the command and a list of options """ try: ssh_options = ssh_command.split() except AttributeError: return None, [] ssh_command = ssh_options.pop(0) ssh_options.extend("-o BatchMode=yes -o StrictHostKeyChecking=no".split()) return ssh_command, ssh_options class SshBackupExecutor(BackupExecutor): """ Abstract base class for any backup executors based on Ssh remote connections. This class is also a factory for exclusive/concurrent backup strategy objects. Raises a SshCommandException if 'ssh_command' is not set. """ __metaclass__ = ABCMeta def __init__(self, backup_manager): """ Constructor of the abstract class for backups via Ssh :param barman.backup.BackupManager backup_manager: the BackupManager assigned to the executor """ super(SshBackupExecutor, self).__init__(backup_manager) # Retrieve the ssh command and the options necessary for the # remote ssh access. self.ssh_command, self.ssh_options = _parse_ssh_command( backup_manager.config.ssh_command) # Requires ssh_command to be set if not self.ssh_command: raise SshCommandException( 'Missing or invalid ssh_command in barman configuration ' 'for server %s' % backup_manager.config.name) # Holds the action being executed. Used for error messages. self.current_action = None # Depending on the backup options value, create the proper strategy if BackupOptions.CONCURRENT_BACKUP in self.config.backup_options: # Concurrent backup strategy self.strategy = ConcurrentBackupStrategy(self) else: # Exclusive backup strategy self.strategy = ExclusiveBackupStrategy(self) def _update_action_from_strategy(self): """ Update the executor's current action with the one of the strategy. This is used during exception handling to let the caller know where the failure occurred. """ action = getattr(self.strategy, 'current_action', None) if action: self.current_action = action @abstractmethod def backup_copy(self, backup_info): """ Performs the actual copy of a backup for the server :param barman.infofile.BackupInfo backup_info: backup information """ def init(self): """ Initialise the internal state of the backup executor """ self.current_action = "starting backup" def backup(self, backup_info): """ Perform a backup for the server - invoked by BackupManager.backup() through the generic interface of a BackupExecutor. This implementation is responsible for performing a backup through a remote connection to the PostgreSQL server via Ssh. The specific set of instructions depends on both the specific class that derives from SshBackupExecutor and the selected strategy (e.g. exclusive backup through Rsync). :param barman.infofile.BackupInfo backup_info: backup information """ # Start the backup, all the subsequent code must be wrapped in a # try except block which finally issues a backup_stop command try: self.strategy.start_backup(backup_info) except BaseException: self._update_action_from_strategy() raise try: # save any metadata changed by start_backup() call # This must be inside the try-except, because it could fail backup_info.save() # If this is the first backup, purge unused WAL files previous_backup = self.backup_manager.get_previous_backup( backup_info.backup_id) if not previous_backup: self.backup_manager.remove_wal_before_backup(backup_info) output.info("Backup start at xlog location: %s (%s, %08X)", backup_info.begin_xlog, backup_info.begin_wal, backup_info.begin_offset) # Start the copy self.current_action = "copying files" output.info("Copying files.") # perform the backup copy, honouring the retry option if set self.backup_manager.retry_backup_copy(self.backup_copy, backup_info) output.info("Copy done.") except: # we do not need to do anything here besides re-raising the # exception. It will be handled in the external try block. raise else: self.current_action = "issuing stop of the backup" output.info("Asking PostgreSQL server to finalize the backup.") finally: try: self.strategy.stop_backup(backup_info) except BaseException: self._update_action_from_strategy() raise def check(self, check_strategy): """ Perform additional checks for SshBackupExecutor, including Ssh connection (executing a 'true' command on the remote server) and specific checks for the given backup strategy. :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # Execute a 'true' command on the remote server cmd = Command(self.ssh_command, self.ssh_options) ret = cmd("true") hint = "PostgreSQL server" if ret == 0: check_strategy.result(self.config.name, 'ssh', True, hint) else: check_strategy.result(self.config.name, 'ssh', False, '%s, return code: %s' % (hint, ret)) try: # Invoke specific checks for the backup strategy self.strategy.check(check_strategy) except BaseException: self._update_action_from_strategy() raise def status(self): """ Set additional status info for SshBackupExecutor using remote commands via Ssh, as well as those defined by the given backup strategy. """ # If the PostgreSQL version is < 9.4 pg_stat_archiver is not available. # Retrieve the last_archived_wal using the executor remote_status = self.get_remote_status() if 'last_archived_wal' in remote_status: output.result('status', self.config.name, 'last_archived_wal', 'Last archived WAL', remote_status['last_archived_wal'] or 'No WAL segment shipped yet') try: # Invoke the status() method for the given strategy self.strategy.status() except BaseException: self._update_action_from_strategy() raise def get_remote_status(self): """ Get remote information on PostgreSQL using Ssh, such as last archived WAL file :rtype: dict(str,str|None) """ remote_status = {} with self.server.pg_connect(): # Retrieve the last archived WAL using a Ssh connection on # the remote server and executing an 'ls' command. Only # for pre-9.4 versions of PostgreSQL. if self.server.server_version < 90400: remote_status['last_archived_wal'] = None if self.server.get_pg_setting('data_directory') and \ self.server.get_pg_setting('archive_command'): # TODO: replace with RemoteUnixCommand cmd = Command(self.ssh_command, self.ssh_options) archive_dir = os.path.join( self.server.get_pg_setting('data_directory'), 'pg_xlog', 'archive_status') out = str(cmd.getoutput('ls', '-tr', archive_dir)[0]) for line in out.splitlines(): if line.endswith('.done'): name = line[:-5] if xlog.is_any_xlog_file(name): remote_status['last_archived_wal'] = name break return remote_status class RsyncBackupExecutor(SshBackupExecutor): """ Concrete class for backup via Rsync+Ssh. It invokes PostgreSQL commands to start and stop the backup, depending on the defined strategy. Data files are copied using Rsync via Ssh. It heavily relies on methods defined in the SshBackupExecutor class from which it derives. """ def __init__(self, backup_manager): """ Constructor :param barman.backup.BackupManager backup_manager: the BackupManager assigned to the strategy """ super(RsyncBackupExecutor, self).__init__(backup_manager) def backup_copy(self, backup_info): """ Perform the actual copy of the backup using Rsync. First, it copies one tablespace at a time, then the PGDATA directory, and finally configuration files (if outside PGDATA). Bandwidth limitation, according to configuration, is applied in the process. This method is the core of base backup copy using Rsync+Ssh. :param barman.infofile.BackupInfo backup_info: backup information """ # List of paths to be ignored by Rsync exclude_and_protect = [] # Retrieve the previous backup metadata, then set safe_horizon previous_backup = self.backup_manager.get_previous_backup( backup_info.backup_id) if previous_backup: # safe_horizon is a tz-aware timestamp because BackupInfo class # ensures it safe_horizon = previous_backup.begin_time else: # If no previous backup is present, safe_horizon is set to None safe_horizon = None # Copy tablespaces applying bwlimit when necessary if backup_info.tablespaces: tablespaces_bw_limit = self.config.tablespace_bandwidth_limit # Copy a tablespace at a time for tablespace in backup_info.tablespaces: self.current_action = "copying tablespace '%s'" % \ tablespace.name # Apply bandwidth limit if requested bwlimit = self.config.bandwidth_limit if tablespaces_bw_limit and \ tablespace.name in tablespaces_bw_limit: bwlimit = tablespaces_bw_limit[tablespace.name] if bwlimit: self.current_action += (" with bwlimit '%d'" % bwlimit) _logger.debug(self.current_action) # If the tablespace location is inside the data directory, # exclude and protect it from being copied twice during # the data directory copy if tablespace.location.startswith(backup_info.pgdata): exclude_and_protect.append( tablespace.location[len(backup_info.pgdata):]) # Make sure the destination directory exists in order for # smart copy to detect that no file is present there tablespace_dest = backup_info.get_data_directory(tablespace.oid) mkpath(tablespace_dest) # Exclude and protect the tablespace from being copied again # during the data directory copy exclude_and_protect.append("/pg_tblspc/%s" % tablespace.oid) # Copy the backup using smart_copy trying to reuse the # tablespace of the previous backup if incremental is active ref_dir = self._reuse_dir(previous_backup, tablespace.oid) tb_rsync = RsyncPgData( ssh=self.ssh_command, ssh_options=self.ssh_options, args=self._reuse_args(ref_dir), bwlimit=bwlimit, network_compression=self.config.network_compression, check=True) try: tb_rsync.smart_copy( ':%s/' % tablespace.location, tablespace_dest, safe_horizon, ref_dir) except CommandFailedException, e: msg = "data transfer failure on directory '%s'" % \ backup_info.get_data_directory(tablespace.oid) raise DataTransferFailure.from_rsync_error(e, msg) # Make sure the destination directory exists in order for smart copy # to detect that no file is present there backup_dest = backup_info.get_data_directory() mkpath(backup_dest) # Copy the PGDATA, trying to reuse the data dir # of the previous backup if incremental is active ref_dir = self._reuse_dir(previous_backup) rsync = RsyncPgData( ssh=self.ssh_command, ssh_options=self.ssh_options, args=self._reuse_args(ref_dir), bwlimit=self.config.bandwidth_limit, exclude_and_protect=exclude_and_protect, network_compression=self.config.network_compression) try: rsync.smart_copy(':%s/' % backup_info.pgdata, backup_dest, safe_horizon, ref_dir) except CommandFailedException, e: msg = "data transfer failure on directory '%s'" % \ backup_info.pgdata raise DataTransferFailure.from_rsync_error(e, msg) # At last copy pg_control try: rsync(':%s/global/pg_control' % (backup_info.pgdata,), '%s/global/pg_control' % (backup_dest,)) except CommandFailedException, e: msg = "data transfer failure on file '%s/global/pg_control'" % \ backup_info.pgdata raise DataTransferFailure.from_rsync_error(e, msg) # Copy configuration files (if not inside PGDATA) self.current_action = "copying configuration files" _logger.debug(self.current_action) for key in ('config_file', 'hba_file', 'ident_file'): cf = getattr(backup_info, key, None) if cf: assert isinstance(cf, str) # Consider only those that reside outside of the original PGDATA if cf.startswith(backup_info.pgdata): self.current_action = \ "skipping %s as contained in %s directory" % ( key, backup_info.pgdata) _logger.debug(self.current_action) continue self.current_action = "copying %s as outside %s directory" % ( key, backup_info.pgdata) _logger.info(self.current_action) try: rsync(':%s' % cf, backup_dest) except CommandFailedException, e: ret_code = e.args[0]['ret'] msg = "data transfer failure on file '%s'" % cf if 'ident_file' == key and ret_code == 23: # If the ident file is missing, # it isn't an error condition for PostgreSQL. # Barman is consistent with this behavior. output.warning(msg, log=True) continue else: raise DataTransferFailure.from_rsync_error(e, msg) # Check for any include directives in PostgreSQL configuration # Currently, include directives are not supported for files that # reside outside PGDATA. These files must be manually backed up. # Barman will emit a warning and list those files if backup_info.included_files: filtered_files = [ included_file for included_file in backup_info.included_files if not included_file.startswith(backup_info.pgdata) ] if len(filtered_files) > 0: output.warning( "The usage of include directives is not supported " "for files that reside outside PGDATA.\n" "Please manually backup the following files:\n" "\t%s\n", "\n\t".join(filtered_files)) def _reuse_dir(self, previous_backup_info, oid=None): """ If reuse_backup is 'copy' or 'link', builds the path of the directory to reuse, otherwise always returns None. If oid is None, it returns the full path of PGDATA directory of the previous_backup otherwise it returns the path to the specified tablespace using it's oid. :param barman.infofile.BackupInfo previous_backup_info: backup to be reused :param str oid: oid of the tablespace to be reused :returns: a string containing the local path with data to be reused or None :rtype: str|None """ if self.config.reuse_backup in ('copy', 'link') and \ previous_backup_info is not None: try: return previous_backup_info.get_data_directory(oid) except ValueError: return None def _reuse_args(self, reuse_dir): """ If reuse_backup is 'copy' or 'link', build the rsync option to enable the reuse, otherwise returns an empty list :param str reuse_dir: the local path with data to be reused or None :returns: list of argument for rsync call for incremental backup or empty list. :rtype: list(str) """ if self.config.reuse_backup in ('copy', 'link') and \ reuse_dir is not None: return ['--%s-dest=%s' % (self.config.reuse_backup, reuse_dir)] else: return [] class BackupStrategy(object): """ Abstract base class for a strategy to be used by a backup executor. """ __metaclass__ = ABCMeta def __init__(self, executor): """ Constructor :param BackupExecutor executor: the BackupExecutor assigned to the strategy """ self.executor = executor # Holds the action being executed. Used for error messages. self.current_action = None @abstractmethod def start_backup(self, backup_info): """ Issue a start of a backup - invoked by BackupExecutor.backup() :param barman.infofile.BackupInfo backup_info: backup information """ @abstractmethod def stop_backup(self, backup_info): """ Issue a stop of a backup - invoked by BackupExecutor.backup() :param barman.infofile.BackupInfo backup_info: backup information """ def check(self, check_strategy): """ Perform additional checks - invoked by BackupExecutor.check() :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # noinspection PyMethodMayBeStatic def status(self): """ Set additional status info - invoked by BackupExecutor.status() """ def _pg_get_metadata(self, backup_info): """ Load PostgreSQL metadata into the backup_info parameter :param barman.infofile.BackupInfo backup_info: backup information """ server = self.executor.server with server.pg_connect(): # Get the PostgreSQL data directory location self.current_action = 'detecting data directory' output.debug(self.current_action) data_directory = server.get_pg_setting('data_directory') backup_info.set_attribute('pgdata', data_directory) # Set server version backup_info.set_attribute('version', server.server_version) # Set configuration files location cf = server.get_pg_configuration_files() if cf: for key in sorted(cf): backup_info.set_attribute(key, cf[key]) # Get tablespaces information self.current_action = 'detecting tablespaces' output.debug(self.current_action) tablespaces = server.get_pg_tablespaces() if tablespaces and len(tablespaces) > 0: backup_info.set_attribute('tablespaces', tablespaces) for item in tablespaces: msg = "\t%s, %s, %s" % (item.oid, item.name, item.location) _logger.info(msg) class ExclusiveBackupStrategy(BackupStrategy): """ Concrete class for exclusive backup strategy. This strategy is for SshBackupExecutor only and is responsible for coordinating Barman with PostgreSQL on standard physical backup operations (known as 'exclusive' backup), such as invoking pg_start_backup() and pg_stop_backup() on the master server. """ def __init__(self, executor): """ Constructor :param BackupExecutor executor: the BackupExecutor assigned to the strategy """ super(ExclusiveBackupStrategy, self).__init__(executor) # Make sure that executor is of type SshBackupExecutor assert isinstance(executor, SshBackupExecutor) # Make sure that backup_options does not contain 'concurrent' assert (BackupOptions.CONCURRENT_BACKUP not in self.executor.config.backup_options) def _pg_start_backup(self, backup_label): """ Calls pg_start_backup() on the PostgreSQL server :param str backup_label: label for the backup returned by Postgres :rtype: tuple """ with self.executor.server.pg_connect() as conn: try: # Issue a rollback to release any unneeded lock conn.rollback() cur = conn.cursor() if self.executor.server.server_version < 80400: cur.execute( "SELECT xlog_loc, " "(pg_xlogfile_name_offset(xlog_loc)).*, " "now() FROM pg_start_backup(%s) as xlog_loc", (backup_label,)) else: cur.execute( "SELECT xlog_loc, " "(pg_xlogfile_name_offset(xlog_loc)).*, " "now() FROM pg_start_backup(%s,%s) as xlog_loc", (backup_label, self.executor.config.immediate_checkpoint)) return cur.fetchone() except psycopg2.Error, e: msg = "pg_start_backup(): %s" % e _logger.debug(msg) raise Exception(msg) def _pg_stop_backup(self): """ Calls pg_stop_backup() on the PostgreSQL server :returns: a tuple with the result of the pg_stop_backup() call or None :rtype: tuple|None """ with self.executor.server.pg_connect() as conn: try: # Issue a rollback to release any unneeded lock conn.rollback() cur = conn.cursor() cur.execute( 'SELECT xlog_loc, (pg_xlogfile_name_offset(xlog_loc)).*, ' 'now() FROM pg_stop_backup() as xlog_loc') return cur.fetchone() except psycopg2.Error, e: _logger.debug('Error issuing pg_stop_backup() command: %s', e) return None def start_backup(self, backup_info): """ Manage the start of an exclusive backup The method performs all the preliminary operations required for an exclusive physical backup to start, as well as preparing the information on the backup for Barman. :param barman.infofile.BackupInfo backup_info: backup information """ self.current_action = "connecting to database (%s)" % \ self.executor.config.conninfo output.debug(self.current_action) server = self.executor.server with server.pg_connect(): # Retrieve PostgreSQL server metadata self._pg_get_metadata(backup_info) # Issue pg_start_backup on the PostgreSQL server self.current_action = "issuing start backup command" _logger.debug(self.current_action) label = "Barman backup %s %s" % ( backup_info.server_name, backup_info.backup_id) # Exclusive backup: issue a pg_start_Backup() command start_row = self._pg_start_backup(label) start_xlog, start_file_name, start_file_offset, start_time = \ start_row backup_info.set_attribute('status', "STARTED") backup_info.set_attribute('timeline', int(start_file_name[0:8], 16)) backup_info.set_attribute('begin_xlog', start_xlog) backup_info.set_attribute('begin_wal', start_file_name) backup_info.set_attribute('begin_offset', start_file_offset) backup_info.set_attribute('begin_time', start_time) def stop_backup(self, backup_info): """ Manage the stop of an exclusive backup The method informs the PostgreSQL server that the physical exclusive backup is finished, as well as preparing the information returned by PostgreSQL for Barman. :param barman.infofile.BackupInfo backup_info: backup information """ stop_row = self._pg_stop_backup() if stop_row: stop_xlog, stop_file_name, stop_file_offset, stop_time = \ stop_row backup_info.set_attribute('end_time', stop_time) backup_info.set_attribute('end_xlog', stop_xlog) backup_info.set_attribute('end_wal', stop_file_name) backup_info.set_attribute('end_offset', stop_file_offset) else: raise Exception('Cannot terminate exclusive backup. You might ' 'have to manually execute pg_stop_backup() ' 'on your PostgreSQL server') def check(self, check_strategy): """ Perform additional checks for ExclusiveBackupStrategy :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # Make sure PostgreSQL is not in recovery (i.e. is a master) is_in_recovery = self.executor.server.pg_is_in_recovery() if not is_in_recovery: check_strategy.result( self.executor.config.name, 'not in recovery', True) else: check_strategy.result( self.executor.config.name, 'not in recovery', False, 'cannot perform exclusive backup on a standby') class ConcurrentBackupStrategy(BackupStrategy): """ Concrete class for concurrent backup strategy. This strategy is for SshBackupExecutor only and is responsible for coordinating Barman with PostgreSQL on concurrent physical backup operations through the pgespresso extension. """ def __init__(self, executor): """ Constructor :param BackupExecutor executor: the BackupExecutor assigned to the strategy """ super(ConcurrentBackupStrategy, self).__init__(executor) # Make sure that executor is of type SshBackupExecutor assert isinstance(executor, SshBackupExecutor) # Make sure that backup_options contains 'concurrent' assert (BackupOptions.CONCURRENT_BACKUP in self.executor.config.backup_options) def _pgespresso_start_backup(self, backup_label): """ Execute a pgespresso_start_backup :param str backup_label: label for the backup :rtype: tuple """ with self.executor.server.pg_connect() as conn: try: # Issue a rollback to release any unneeded lock conn.rollback() cur = conn.cursor() cur.execute( 'SELECT pgespresso_start_backup(%s,%s), now()', (backup_label, self.executor.config.immediate_checkpoint)) return cur.fetchone() except psycopg2.Error, e: msg = "pgespresso_start_backup(): %s" % e _logger.debug(msg) raise Exception(msg) def _pgespresso_stop_backup(self, backup_label): """ Execute a pgespresso_stop_backup :param str backup_label: label of the backup :returns: a string containing the result of the pgespresso_stop_backup call or None :rtype: tuple|None """ with self.executor.server.pg_connect() as conn: try: # Issue a rollback to release any unneeded lock conn.rollback() cur = conn.cursor() cur.execute("SELECT pgespresso_stop_backup(%s), now()", (backup_label,)) return cur.fetchone() except psycopg2.Error, e: _logger.debug( "Error issuing pgespresso_stop_backup() command: %s", e) return None # noinspection PyMethodMayBeStatic def _write_backup_label(self, backup_info): """ Write backup_label file inside PGDATA folder :param barman.infofile.BackupInfo backup_info: tbackup information """ label_file = os.path.join(backup_info.get_data_directory(), 'backup_label') output.debug("Writing backup label: %s" % label_file) with open(label_file, 'w') as f: f.write(backup_info.backup_label) def start_backup(self, backup_info): """ Start of the backup. The method performs all the preliminary operations required for a backup to start. :param barman.infofile.BackupInfo backup_info: backup information """ self.current_action = "connecting to database (%s)" % \ self.executor.config.conninfo output.debug(self.current_action) with self.executor.server.pg_connect(): # Retrieve PostgreSQL server metadata self._pg_get_metadata(backup_info) # Issue _pg_start_backup on the PostgreSQL server self.current_action = "issuing start backup command" _logger.debug(self.current_action) label = "Barman backup %s %s" % ( backup_info.server_name, backup_info.backup_id) # Concurrent backup: issue a pgespresso_start_Backup() command start_row = self._pgespresso_start_backup(label) backup_data, start_time = start_row wal_re = re.compile( '^START WAL LOCATION: (.*) \(file (.*)\)', re.MULTILINE) wal_info = wal_re.search(backup_data) backup_info.set_attribute('status', "STARTED") backup_info.set_attribute('timeline', int(wal_info.group(2)[0:8], 16)) backup_info.set_attribute('begin_xlog', wal_info.group(1)) backup_info.set_attribute('begin_wal', wal_info.group(2)) backup_info.set_attribute('begin_offset', xlog.get_offset_from_location( wal_info.group(1))) backup_info.set_attribute('backup_label', backup_data) backup_info.set_attribute('begin_time', start_time) def stop_backup(self, backup_info): """ Stop backup wrapper :param barman.infofile.BackupInfo backup_info: backup information """ stop_row = self._pgespresso_stop_backup(backup_info.backup_label) if stop_row: end_wal, stop_time = stop_row decoded_segment = xlog.decode_segment_name(end_wal) backup_info.set_attribute('end_time', stop_time) backup_info.set_attribute('end_xlog', "%X/%X" % (decoded_segment[1], (decoded_segment[ 2] + 1) << 24)) backup_info.set_attribute('end_wal', end_wal) backup_info.set_attribute('end_offset', 0) else: raise Exception('Cannot terminate exclusive backup. You might ' 'have to manually execute ' 'pgespresso_abort_backup() on your PostgreSQL ' 'server') self.current_action = "writing backup label" self._write_backup_label(backup_info) def check(self, check_strategy): """ Perform additional checks for ConcurrentBackupStrategy :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ if self.executor.server.pgespresso_installed(): check_strategy.result(self.executor.config.name, 'pgespresso extension', True) else: check_strategy.result(self.executor.config.name, 'pgespresso extension', False, 'required for concurrent backups') barman-1.5.1/barman/cli.py0000644000076500000240000007341112621123447014644 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module implements the interface with the command line and the logger. """ import logging import os import sys from argh import ArghParser, named, arg, expects_obj from argparse import SUPPRESS, ArgumentTypeError from barman import output from barman.infofile import BackupInfo from barman.server import Server import barman.diagnose import barman.config from barman.utils import drop_privileges, configure_logging, parse_log_level from barman.xlog import BadXlogSegmentName _logger = logging.getLogger(__name__) def check_positive(value): """ Check for a positive integer option :param value: str containing the value to check """ if value is None: return None try: int_value = int(value) except Exception: raise ArgumentTypeError("'%s' is not a valid positive integer" % value) if int_value < 0: raise ArgumentTypeError("'%s' is not a valid positive integer" % value) return int_value @named('list-server') @arg('--minimal', help='machine readable output') def list_server(minimal=False): """ List available servers, with useful information """ # Get every server, both inactive and temporarily disabled servers = get_server_list() for name in sorted(servers): server = servers[name] # Exception: manage_server_command is not invoked here # Normally you would call manage_server_command to check if the # server is None and to report inactive and disabled servers, but here # we want all servers and the server cannot be None output.init('list_server', name, minimal=minimal) description = server.config.description # If the server has been manually disabled if not server.config.active: description += " (inactive)" # If server has configuration errors elif server.config.disabled: description += " (WARNING: disabled)" output.result('list_server', name, description) output.close_and_exit() def cron(): """ Run maintenance tasks (global command) """ # Skip inactive and temporarily disabled servers servers = get_server_list(skip_inactive=True, skip_disabled=True) for name in sorted(servers): server = servers[name] # Exception: manage_server_command is not invoked here # Normally you would call manage_server_command to check if the # server is None and to report inactive and disabled servers, # but here we have only active and well configured servers. server.cron() output.close_and_exit() # noinspection PyUnusedLocal def server_completer(prefix, parsed_args, **kwargs): global_config(parsed_args) for conf in barman.__config__.servers(): if conf.name.startswith(prefix): yield conf.name # noinspection PyUnusedLocal def server_completer_all(prefix, parsed_args, **kwargs): global_config(parsed_args) current_list = getattr(parsed_args, 'server_name', None) or () for conf in barman.__config__.servers(): if conf.name.startswith(prefix) and conf.name not in current_list: yield conf.name if len(current_list) == 0 and 'all'.startswith(prefix): yield 'all' # noinspection PyUnusedLocal def backup_completer(prefix, parsed_args, **kwargs): global_config(parsed_args) server = get_server(parsed_args) backups = server.get_available_backups() for backup_id in sorted(backups, reverse=True): if backup_id.startswith(prefix): yield backup_id for special_id in ('latest', 'last', 'oldest', 'first'): if len(backups) > 0 and special_id.startswith(prefix): yield special_id @arg('server_name', nargs='+', completer=server_completer_all, help="specifies the server names for the backup command " "('all' will show all available servers)") @arg('--immediate-checkpoint', help='forces the initial checkpoint to be done as quickly as possible', dest='immediate_checkpoint', action='store_true', default=SUPPRESS) @arg('--no-immediate-checkpoint', help='forces the initial checkpoint to be spreaded', dest='immediate_checkpoint', action='store_false', default=SUPPRESS) @arg('--reuse-backup', nargs='?', choices=barman.config.REUSE_BACKUP_VALUES, default=None, const='link', help='use the previous backup to improve transfer-rate. ' 'If no argument is given "link" is assumed') @arg('--retry-times', help='Number of retries after an error if base backup copy fails.', type=check_positive) @arg('--retry-sleep', help='Wait time after a failed base backup copy, before retrying.', type=check_positive) @arg('--no-retry', help='Disable base backup copy retry logic.', dest='retry_times', action='store_const', const=0) @expects_obj def backup(args): """ Perform a full backup for the given server (supports 'all') """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue if args.reuse_backup is not None: server.config.reuse_backup = args.reuse_backup if args.retry_sleep is not None: server.config.basebackup_retry_sleep = args.retry_sleep if args.retry_times is not None: server.config.basebackup_retry_times = args.retry_times if hasattr(args, 'immediate_checkpoint'): server.config.immediate_checkpoint = args.immediate_checkpoint server.backup() output.close_and_exit() @named('list-backup') @arg('server_name', nargs='+', completer=server_completer_all, help="specifies the server name for the command " "('all' will show all available servers)") @arg('--minimal', help='machine readable output', action='store_true') @expects_obj def list_backup(args): """ List available backups for the given server (supports 'all') """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue output.init('list_backup', name, minimal=args.minimal) server.list_backups() output.close_and_exit() @arg('server_name', nargs='+', completer=server_completer_all, help='specifies the server name for the command') @expects_obj def status(args): """ Shows live information and status of the PostgreSQL server """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue output.init('status', name) server.status() output.close_and_exit() @arg('server_name', nargs='+', completer=server_completer_all, help='specifies the server name for the command') @expects_obj def rebuild_xlogdb(args): """ Rebuild the WAL file database guessing it from the disk content. """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue server.rebuild_xlogdb() output.close_and_exit() @arg('server_name', completer=server_completer, help='specifies the server name for the command') @arg('--target-tli', help='target timeline', type=int) @arg('--target-time', help='target time. You can use any valid unambiguous representation. ' 'e.g: "YYYY-MM-DD HH:MM:SS.mmm"') @arg('--target-xid', help='target transaction ID') @arg('--target-name', help='target name created previously with ' 'pg_create_restore_point() function call') @arg('--exclusive', help='set target xid to be non inclusive', action="store_true") @arg('--tablespace', help='tablespace relocation rule', metavar='NAME:LOCATION', action='append') @arg('--remote-ssh-command', metavar='SSH_COMMAND', help='This options activates remote recovery, by specifying the secure ' 'shell command to be launched on a remote host. It is the equivalent ' 'of the "ssh_command" server option in the configuration file for ' 'remote recovery. Example: "ssh postgres@db2"') @arg('backup_id', completer=backup_completer, help='specifies the backup ID to recover') @arg('destination_directory', help='the directory where the new server is created') @arg('--retry-times', help='Number of retries after an error if base backup copy fails.', type=check_positive) @arg('--retry-sleep', help='Wait time after a failed base backup copy, before retrying.', type=check_positive) @arg('--no-retry', help='Disable base backup copy retry logic.', dest='retry_times', action='store_const', const=0) @expects_obj def recover(args): """ Recover a server at a given time or xid """ server = get_server(args) # Retrieves the backup backup_id = parse_backup_id(server, args) if backup_id.status != BackupInfo.DONE: output.error( "Cannot recover from backup '%s' of server '%s': " "backup status is not DONE", args.backup_id, server.config.name) output.close_and_exit() # decode the tablespace relocation rules tablespaces = {} if args.tablespace: for rule in args.tablespace: try: tablespaces.update([rule.split(':', 1)]) except ValueError: output.error( "Invalid tablespace relocation rule '%s'\n" "HINT: The valid syntax for a relocation rule is " "NAME:LOCATION", rule) output.close_and_exit() # validate the rules against the tablespace list valid_tablespaces = [tablespace_data.name for tablespace_data in backup_id.tablespaces] if backup_id.tablespaces else [] for item in tablespaces: if item not in valid_tablespaces: output.error("Invalid tablespace name '%s'\n" "HINT: Please use any of the following " "tablespaces: %s", item, ', '.join(valid_tablespaces)) output.close_and_exit() # explicitly disallow the rsync remote syntax (common mistake) if ':' in args.destination_directory: output.error( "The destination directory parameter " "cannot contain the ':' character\n" "HINT: If you want to do a remote recovery you have to use " "the --remote-ssh-command option") output.close_and_exit() if args.retry_sleep is not None: server.config.basebackup_retry_sleep = args.retry_sleep if args.retry_times is not None: server.config.basebackup_retry_times = args.retry_times server.recover(backup_id, args.destination_directory, tablespaces=tablespaces, target_tli=args.target_tli, target_time=args.target_time, target_xid=args.target_xid, target_name=args.target_name, exclusive=args.exclusive, remote_command=args.remote_ssh_command) output.close_and_exit() @named('show-server') @arg('server_name', nargs='+', completer=server_completer_all, help="specifies the server names to show " "('all' will show all available servers)") @expects_obj def show_server(args): """ Show all configuration parameters for the specified servers """ servers = get_server_list(args, skip_inactive=True) for name in sorted(servers): server = servers[name] # Skip the server (apply general rule) if not manage_server_command(server, name): continue output.init('show_server', name) server.show() output.close_and_exit() @arg('server_name', nargs='+', completer=server_completer_all, help="specifies the server names to check " "('all' will check all available servers)") @arg('--nagios', help='Nagios plugin compatible output', action='store_true') @expects_obj def check(args): """ Check if the server configuration is working. This command returns success if every checks pass, or failure if any of these fails """ if args.nagios: output.set_output_writer(output.NagiosOutputWriter()) servers = get_server_list(args) for name in sorted(servers): server = servers[name] # Validate the returned server if not manage_server_command( server, name, skip_inactive=False, skip_disabled=False, disabled_is_error=False): continue # If the server has been manually disabled if not server.config.active: name += " (inactive)" # If server has configuration errors elif server.config.disabled: name += " (WARNING: disabled)" output.init('check', name, server.config.active) server.check() output.close_and_exit() def diagnose(): """ Diagnostic command (for support and problems detection purpose) """ # Get every server (both inactive and temporarily disabled) servers = get_server_list(on_error_stop=False, suppress_error=True) # errors list with duplicate paths between servers errors_list = barman.__config__.servers_msg_list barman.diagnose.exec_diagnose(servers, errors_list) output.close_and_exit() @named('show-backup') @arg('server_name', completer=server_completer, help='specifies the server name for the command') @arg('backup_id', completer=backup_completer, help='specifies the backup ID') @expects_obj def show_backup(args): """ This method shows a single backup information """ server = get_server(args) # Retrieves the backup backup_info = parse_backup_id(server, args) server.show_backup(backup_info) output.close_and_exit() @named('list-files') @arg('server_name', completer=server_completer, help='specifies the server name for the command') @arg('backup_id', completer=backup_completer, help='specifies the backup ID') @arg('--target', choices=('standalone', 'data', 'wal', 'full'), default='standalone', help=''' Possible values are: data (just the data files), standalone (base backup files, including required WAL files), wal (just WAL files between the beginning of base backup and the following one (if any) or the end of the log) and full (same as data + wal). Defaults to %(default)s''') @expects_obj def list_files(args): """ List all the files for a single backup """ server = get_server(args) # Retrieves the backup backup_id = parse_backup_id(server, args) try: for line in backup_id.get_list_of_files(args.target): output.info(line, log=False) except BadXlogSegmentName as e: output.error( "invalid xlog segment name %r\n" "HINT: Please run \"barman rebuild-xlogdb %s\" " "to solve this issue", str(e), server.config.name) output.close_and_exit() @arg('server_name', completer=server_completer, help='specifies the server name for the command') @arg('backup_id', completer=backup_completer, help='specifies the backup ID') @expects_obj def delete(args): """ Delete a backup """ server = get_server(args) # Retrieves the backup backup_id = parse_backup_id(server, args) server.delete_backup(backup_id) output.close_and_exit() @named('get-wal') @arg('server_name', completer=server_completer, help='specifies the server name for the command') @arg('wal_name', help='the WAL file to get') @arg('--output-directory', '-o', help='put the retrieved WAL file in this directory with the original name', default=SUPPRESS) @arg('--gzip', '-x', help='compress the output with gzip', action='store_const', const='gzip', dest='compression', default=SUPPRESS) @arg('--bzip2', '-j', help='compress the output with bzip2', action='store_const', const='bzip2', dest='compression', default=SUPPRESS) @expects_obj def get_wal(args): """ Retrieve WAL_NAME file from SERVER_NAME archive. The content will be streamed on standard output unless the --output-directory option is specified. """ server = get_server(args) # Retrieve optional arguments. If an argument is not specified, # the namespace doesn't contain it due to SUPPRESS default. # In that case we pick 'None' using getattr third argument. compression = getattr(args, 'compression', None) output_directory = getattr(args, 'output_directory', None) server.get_wal(args.wal_name, compression=compression, output_directory=output_directory) output.close_and_exit() @named('archive-wal') @arg('server_name', completer=server_completer, help='specifies the server name for the command') @expects_obj def archive_wal(args): """ Execute maintenance operations on WAL files for a given server. This command processes any incoming WAL files for the server and archives them along the catalogue. """ server = get_server(args) output.debug("Starting archive-wal for server %s", server.config.name) server.archive_wal() output.close_and_exit() def pretty_args(args): """ Prettify the given argh namespace to be human readable :type args: argh.dispatching.ArghNamespace :return: the human readable content of the namespace """ values = dict(vars(args)) # Retrieve the command name with recent argh versions if '_functions_stack' in values: values['command'] = values['_functions_stack'][0].func_name del values['_functions_stack'] # Older argh versions only have the matching function in the namespace elif 'function' in values: values['command'] = values['function'].func_name del values['function'] return "%r" % values def global_config(args): """ Set the configuration file """ if hasattr(args, 'config'): filename = args.config else: try: filename = os.environ['BARMAN_CONFIG_FILE'] except KeyError: filename = None config = barman.config.Config(filename) barman.__config__ = config # change user if needed try: drop_privileges(config.user) except OSError: msg = "ERROR: please run barman as %r user" % config.user raise SystemExit(msg) except KeyError: msg = "ERROR: the configured user %r does not exists" % config.user raise SystemExit(msg) # configure logging log_level = parse_log_level(config.log_level) configure_logging(config.log_file, log_level or barman.config.DEFAULT_LOG_LEVEL, config.log_format) if log_level is None: _logger.warn('unknown log_level in config file: %s', config.log_level) # configure output if args.format != output.DEFAULT_WRITER or args.quiet or args.debug: output.set_output_writer(args.format, quiet=args.quiet, debug=args.debug) # Load additional configuration files config.load_configuration_files_directory() # We must validate the configuration here in order to have # both output and logging configured config.validate_global_config() _logger.debug('Initialised Barman version %s (config: %s, args: %s)', barman.__version__, config.config_file, pretty_args(args)) def get_server(args, skip_inactive=True, skip_disabled=False, on_error_stop=True, suppress_error=False): """ Get a single server retrieving its configuration (wraps get_server_list()) Returns a Server object or None if the required server is unknown and on_error_stop is False. WARNING: this function modifies the 'args' parameter :param args: an argparse namespace containing a single server_name parameter WARNING: the function modifies the content of this parameter :param bool skip_inactive: skip inactive servers when 'all' is required :param bool skip_disabled: skip disabled servers when 'all' is required :param bool on_error_stop: stop if an error is found :param bool suppress_error: suppress display of errors (e.g. diagnose) :rtype: barman.server.Server|None """ # This function must to be called with in a single-server context name = args.server_name assert isinstance(name, str) # The 'all' special name is forbidden in this context if name == 'all': output.error("You cannot use 'all' in a single server context") output.close_and_exit() # The following return statement will never be reached # but it is here for clarity return None # Builds a list from a single given name args.server_name = [name] # Retrieve the requested server servers = get_server_list(args, skip_inactive, skip_disabled, on_error_stop, suppress_error) # The requested server has been excluded from get_server_list result if len(servers) == 0: output.close_and_exit() # The following return statement will never be reached # but it is here for clarity return None # retrieve the server object server = servers[name] # Apply standard validation control and skips # the server if inactive or disabled, displaying standard # error messages. If on_error_stop (default) exits if not manage_server_command(server, name) and on_error_stop: output.close_and_exit() # The following return statement will never be reached # but it is here for clarity return None # Returns the filtered server return server def get_server_list(args=None, skip_inactive=False, skip_disabled=False, on_error_stop=True, suppress_error=False): """ Get the server list from the configuration If args the parameter is None or arg.server_name is ['all'] returns all defined servers :param args: an argparse namespace containing a list server_name parameter :param bool skip_inactive: skip inactive servers when 'all' is required :param bool skip_disabled: skip disabled servers when 'all' is required :param bool on_error_stop: stop if an error is found :param bool suppress_error: suppress display of errors (e.g. diagnose) :rtype: dict(str,barman.server.Server|None) """ server_dict = {} # This function must to be called with in a multiple-server context assert not args or isinstance(args.server_name, list) # Generate the list of servers (required for global errors) available_servers = barman.__config__.server_names() # Get a list of configuration errors from all the servers global_error_list = barman.__config__.servers_msg_list # Global errors have higher priority if global_error_list: # Output the list of global errors if not suppress_error: for error in global_error_list: output.error(error) # If requested, exit on first error if on_error_stop: output.close_and_exit() # The following return statement will never be reached # but it is here for clarity return {} # Handle special 'all' server cases # - args is None # - 'all' special name if not args or 'all' in args.server_name: # When 'all' is used, it must be the only specified argument if args and len(args.server_name) != 1: output.error("You cannot use 'all' with other server names") servers = available_servers else: servers = args.server_name # Loop through all the requested servers for server in servers: conf = barman.__config__.get_server(server) if conf is None: # Unknown server server_dict[server] = None else: # Skip inactive servers, if requested if skip_inactive and not conf.active: output.info("Skipping inactive server '%s'" % conf.name) continue # Skip disabled servers, if requested if skip_disabled and conf.disabled: output.info("Skipping temporarily disabled server '%s'" % conf.name) continue server_object = Server(conf) server_dict[server] = server_object return server_dict def manage_server_command(server, name=None, inactive_is_error=False, disabled_is_error=True, skip_inactive=True, skip_disabled=True): """ Standard and consistent method for managing server errors within a server command execution. By default, suggests to skip any inactive and disabled server; it also emits errors for disabled servers by default. Returns True if the command has to be executed for this server. :param barman.server.Server server: server to be checked for errors :param str name: name of the server, in a multi-server command :param bool inactive_is_error: treat inactive server as error :param bool disabled_is_error: treat disabled server as error :param bool skip_inactive: skip if inactive :param bool skip_disabled: skip if disabled :return: True if the command has to be executed on this server :rtype: boolean """ # Unknown server (skip it) if not server: output.error("Unknown server '%s'" % name) return False if not server.config.active: # Report inactive server as error if inactive_is_error: output.error('Inactive server: %s' % server.config.name) if skip_inactive: return False # Report disabled server as error if server.config.disabled: # Output all the messages as errors, and exit terminating the run. if disabled_is_error: for message in server.config.msg_list: output.error(message) if skip_disabled: return False # All ok, execute the command return True def parse_backup_id(server, args): """ Parses backup IDs including special words such as latest, oldest, etc. Exit with error if the backup id doesn't exist. :param Server server: server object to search for the required backup :param args: command lien arguments namespace :rtype: BackupInfo """ if args.backup_id in ('latest', 'last'): backup_id = server.get_last_backup() elif args.backup_id in ('oldest', 'first'): backup_id = server.get_first_backup() else: backup_id = args.backup_id backup_info = server.get_backup(backup_id) if backup_info is None: output.error( "Unknown backup '%s' for server '%s'", args.backup_id, server.config.name) output.close_and_exit() return backup_info def main(): """ The main method of Barman """ p = ArghParser() p.add_argument('-v', '--version', action='version', version=barman.__version__) p.add_argument('-c', '--config', help='uses a configuration file ' '(defaults: %s)' % ', '.join(barman.config.Config.CONFIG_FILES), default=SUPPRESS) p.add_argument('-q', '--quiet', help='be quiet', action='store_true') p.add_argument('-d', '--debug', help='debug output', action='store_true') p.add_argument('-f', '--format', help='output format', choices=output.AVAILABLE_WRITERS.keys(), default=output.DEFAULT_WRITER) p.add_commands( [ archive_wal, cron, list_server, show_server, status, check, diagnose, backup, list_backup, show_backup, list_files, get_wal, recover, delete, rebuild_xlogdb, ] ) # noinspection PyBroadException try: p.dispatch(pre_call=global_config) except KeyboardInterrupt: msg = "Process interrupted by user (KeyboardInterrupt)" output.exception(msg) except Exception, e: msg = "%s\nSee log file for more details." % e output.exception(msg) # cleanup output API and exit honoring output.error_occurred and # output.error_exit_code output.close_and_exit() if __name__ == '__main__': # This code requires the mock module and allow us to test # bash completion inside the IDE debugger try: # noinspection PyUnresolvedReferences import mock sys.stdout = mock.Mock(wraps=sys.stdout) sys.stdout.isatty.return_value = True os.dup2(2, 8) except ImportError: pass main() barman-1.5.1/barman/command_wrappers.py0000644000076500000240000005132712621123447017440 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains a wrapper for shell commands """ import inspect import shutil import sys import signal import subprocess import os import logging import re import collections import tempfile import barman.utils import dateutil.parser import dateutil.tz _logger = logging.getLogger(__name__) class CommandFailedException(Exception): """ Exception representing a failed command """ pass class RsyncListFilesFailure(Exception): """ Failure parsing the output of a "rsync --list-only" command """ pass class DataTransferFailure(Exception): """ Used to pass rsync failure details """ @classmethod def from_rsync_error(cls, e, msg): """ This method build a DataTransferFailure exception and report the provided message to the user (both console and log file) along with the output of the failed rsync command. :param CommandFailedException e: The exception we are handling :param str msg: a descriptive message on what we are trying to do :return DataTransferFailure: will contain the message provided in msg """ details = msg details += "\nrsync error:\n" details += e.args[0]['out'] details += e.args[0]['err'] return cls(details) class Command(object): """ Simple wrapper for a shell command """ def __init__(self, cmd, args=None, env_append=None, shell=False, check=False, allowed_retval=(0,), debug=False, close_fds=True): self.cmd = cmd self.args = args if args is not None else [] self.shell = shell self.close_fds = close_fds self.check = check self.allowed_retval = allowed_retval self.debug = debug self.ret = None self.out = None self.err = None if env_append: self.env = os.environ.copy() self.env.update(env_append) else: self.env = None @staticmethod def _restore_sigpipe(): """restore default signal handler (http://bugs.python.org/issue1652)""" signal.signal(signal.SIGPIPE, signal.SIG_DFL) # pragma: no cover @staticmethod def _cmd_quote(cmd, args): """ Quote all cmd's arguments. This is needed to avoid command string breaking. WARNING: this function does not protect against injection. """ if args is not None and len(args) > 0: cmd = "%s '%s'" % (cmd, "' '".join(args)) return cmd def __call__(self, *args, **kwargs): self.getoutput(*args, **kwargs) return self.ret def getoutput(self, *args, **kwargs): """ Run the command and return the output and the error (if present) """ # check keyword arguments stdin = kwargs.pop('stdin', None) check = kwargs.pop('check', self.check) close_fds = kwargs.pop('close_fds', self.close_fds) if len(kwargs): raise TypeError('%s() got an unexpected keyword argument %r' % (inspect.stack()[1][3], kwargs.popitem()[0])) args = self.args + list(args) if self.shell: cmd = self._cmd_quote(self.cmd, args) else: cmd = [self.cmd] + args if self.debug: print >> sys.stderr, "Command: %r" % cmd _logger.debug("Command: %r", cmd) pipe = subprocess.Popen(cmd, shell=self.shell, env=self.env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=self._restore_sigpipe, close_fds=close_fds) out, err = pipe.communicate(stdin) # Convert output to a proper unicode string self.out = out.decode('utf-8') self.err = err.decode('utf-8') self.ret = pipe.returncode if self.debug: print >> sys.stderr, "Command return code: %s" % self.ret _logger.debug("Command return code: %s", self.ret) _logger.debug("Command stdout: %s", self.out) _logger.debug("Command stderr: %s", self.err) if check and self.ret not in self.allowed_retval: raise CommandFailedException(dict( ret=self.ret, out=self.out, err=self.err)) return self.out, self.err class Rsync(Command): """ This class is a wrapper for the rsync system command, which is used vastly by barman """ #: This regular expression is used to parse each line of the output # of a "rsync --list-only" call. This regexp has been tested with any known # version of upstream rsync that is supported (>= 3.0.4) LIST_ONLY_RE = re.compile(''' (?x) # Enable verbose mode ^ # start of the line # capture the mode (es. "-rw-------") (?P[-\w]+) \s+ # size is an integer (?P\d+) \s+ # The date field can have two different form (?P # "2014/06/05 18:00:00" if the sending rsync is compiled # with HAVE_STRFTIME [\d/]+\s+[\d:]+ | # "Thu Jun 5 18:00:00 2014" otherwise \w+\s+\w+\s+\d+\s+[\d:]+\s+\d+ ) \s+ # all the remaining characters are part of filename (?P.+) $ # end of the line ''') #: This regular expression is used to ignore error messages regarding # vanished files that are not really an error. It is used because # in some cases rsync reports it with exit code 23 which could also mean # a fatal error VANISHED_RE = re.compile(''' (?x) # Enable verbose mode ^ # start of the line ( # files which vanished before rsync start rsync:\ link_stat\ ".+"\ failed:\ No\ such\ file\ or\ directory\ \(2\) | # files which vanished after rsync start file\ has\ vanished:\ ".+" | # final summary rsync\ error:\ .* \(code\ 23\)\ at\ main\.c\(\d+\)\ \[generator=[^\]]+\] ) $ # end of the line ''') # This named tuple is used to parse each line of the output # of a "rsync --list-only" call FileItem = collections.namedtuple('FileItem', 'mode size date path') def __init__(self, rsync='rsync', args=None, ssh=None, ssh_options=None, bwlimit=None, exclude_and_protect=None, network_compression=None, check=True, allowed_retval=(0, 24), **kwargs): options = [] # Try to find rsync in system PATH using the which method. # If not found, rsync is not installed and this class cannot # work properly. # Raise CommandFailedException warning the user rsync_path = barman.utils.which(rsync) if not rsync_path: raise CommandFailedException('rsync not in system PATH: ' 'is rsync installed?') if ssh: options += ['-e', self._cmd_quote(ssh, ssh_options)] if network_compression: options += ['-z'] if exclude_and_protect: for path in exclude_and_protect: options += ["--exclude=%s" % (path,), "--filter=P_%s" % (path,)] if args: options += self._args_for_suse(args) if bwlimit is not None and bwlimit > 0: options += ["--bwlimit=%s" % bwlimit] Command.__init__(self, rsync, args=options, check=check, allowed_retval=allowed_retval, **kwargs) def _args_for_suse(self, args): """ Mangle args for SUSE compatibility See https://bugzilla.opensuse.org/show_bug.cgi?id=898513 """ # Prepend any argument starting with ':' with a space # Workaround for SUSE rsync issue return [' ' + a if a.startswith(':') else a for a in args] def getoutput(self, *args, **kwargs): """ Run the command and return the output and the error (if present) """ # Prepares args for SUSE args = self._args_for_suse(args) # Invoke the base class method return super(Rsync, self).getoutput(*args, **kwargs) def from_file_list(self, filelist, src, dst, *args, **kwargs): """ This method copies filelist from src to dst. Returns the return code of the rsync command """ if 'stdin' in kwargs: raise TypeError("from_file_list() doesn't support 'stdin' " "keyword argument") input_string = ('\n'.join(filelist)).encode('UTF-8') _logger.debug("from_file_list: %r", filelist) kwargs['stdin'] = input_string self.getoutput('--files-from=-', src, dst, *args, **kwargs) return self.ret def list_files(self, path): """ This method recursively retrieves a list of files contained in a directory, either local or remote (if starts with ':') :param str path: the path we want to inspect :except CommandFailedException: if rsync call fails :except RsyncListFilesFailure: if rsync output can't be parsed """ _logger.debug("list_files: %r", path) # Use the --no-human-readable option to avoid digit groupings # in "size" field with rsync >= 3.1.0. # Ref: http://ftp.samba.org/pub/rsync/src/rsync-3.1.0-NEWS self.getoutput('--no-human-readable', '--list-only', '-r', path, check=True) for line in self.out.splitlines(): line = line.rstrip() match = self.LIST_ONLY_RE.match(line) if match: mode = match.group('mode') # no exceptions here: the regexp forces 'size' to be an integer size = int(match.group('size')) try: date = dateutil.parser.parse(match.group('date')) date = date.replace(tzinfo=dateutil.tz.tzlocal()) except (TypeError, ValueError): # This should not happen, due to the regexp msg = ("Unable to parse rsync --list-only output line " "(date): '%s'" % line) _logger.exception(msg) raise RsyncListFilesFailure(msg) path = match.group('path') yield self.FileItem(mode, size, date, path) else: # This is a hard error, as we are unable to parse the output # of rsync. It can only happen with a modified or unknown # rsync version (perhaps newer than 3.1?) msg = ("Unable to parse rsync --list-only output line: " "'%s'" % line) _logger.error(msg) raise RsyncListFilesFailure(msg) def _rsync_ignore_vanished_files(self, *args, **kwargs): """ Wrap a getoutput() call and ignore missing args TODO: when rsync 3.1 will be widespread, replace this with --ignore-missing-args argument """ try: self.getoutput(*args, **kwargs) except CommandFailedException: # if return code is different than 23 # or there is any error which doesn't match the VANISHED_RE regexp # raise the error again if self.ret == 23 and self.err is not None: for line in self.err.splitlines(): match = self.VANISHED_RE.match(line.rstrip()) if match: continue else: raise else: raise return self.out, self.err def smart_copy(self, src, dst, safe_horizon=None, ref=None): """ Recursively copies files from "src" to "dst" in a way that is safe from the point of view of a PostgreSQL backup. The "safe_horizon" parameter is the timestamp of the beginning of the older backup involved in copy (as source or destination). Any files updated after that timestamp, must be checked as they could have been modified during the backup - and we do not reply WAL files to update them. The "dst" directory must exist. If the "safe_horizon" parameter is None, we cannot make any assumptions about what can be considered "safe", so we must check everything with checksums enabled. If "ref" parameter is provided and is not None, it is looked up instead of the "dst" dir. This is useful when we are copying files using '--link-dest' and '--copy-dest' rsync options. In this case, both the "dst" and "ref" dir must exist and the "dst" dir must be empty. If "src" or "dst" content begin with a ':' character, it is a remote path. Only local paths are supported in "ref" argument. :param str src: the source path :param str dst: the destination path :param datetime.datetime safe_horizon: anything after this time has to be checked :param str ref: the reference path :except CommandFailedException: If rsync failed at any time :except RsyncListFilesFailure: If source rsync output format is unknown """ _logger.info("Smart copy: %r -> %r (ref: %r, safe before %r)", src, dst, ref, safe_horizon) # If reference is not set we use dst as reference path if ref is None: ref = dst # Make sure the ref path ends with a '/' or rsync will add the # last path component to all the returned items during listing if ref[-1] != '/': ref += '/' # Build a hash containing all files present on reference directory. # Directories are not included _logger.info("Smart copy step 1/4: preparation") try: ref_hash = dict(( (item.path, item) for item in self.list_files(ref) if item.mode[0] != 'd')) except (CommandFailedException, RsyncListFilesFailure) as e: # Here we set ref_hash to None, thus disable the code that marks as # "safe matching" those destination files with different time or # size, even if newer than "safe_horizon". As a result, all files # newer than "safe_horizon" will be checked through checksums. ref_hash = None _logger.error( "Unable to retrieve reference directory file list. " "Using only source file information to decide which files need " "to be copied with checksums enabled: %s" % e) # We need a temporary directory to store the files containing the lists # we are building in order to instruct rsync about which files need to # be copied at different stages temp_dir = tempfile.mkdtemp(suffix='', prefix='barman-') try: # The 'dir.list' file will contain every directory in the # source tree dir_list = open(os.path.join(temp_dir, 'dir.list'), 'w+') # The 'safe.list' file will contain all files older than # safe_horizon, as well as files that we know rsync will # check anyway due to a difference in mtime or size safe_list = open(os.path.join(temp_dir, 'safe.list'), 'w+') # The 'check.list' file will contain all files that need # to be copied with checksum option enabled check_list = open(os.path.join(temp_dir, 'check.list'), 'w+') # The 'protect.list' file will contain a filter rule to protect each # file present in the source tree. It will be used during # the first phase to delete all the extra files on destination. exclude_and_protect_filter = open( os.path.join(temp_dir, 'exclude_and_protect.filter'), 'w+') for item in self.list_files(src): # If item is a directory, we only need to save it in 'dir.list' if item.mode[0] == 'd': dir_list.write(item.path + '\n') continue # Add every file in the source path to the list of files # to be protected from deletion ('exclude_and_protect.filter') exclude_and_protect_filter.write('P ' + item.path + '\n') exclude_and_protect_filter.write('- ' + item.path + '\n') # If source item is older than safe_horizon, # add it to 'safe.list' if safe_horizon and item.date < safe_horizon: safe_list.write(item.path + '\n') continue # If ref_hash is None, it means we failed to retrieve the # destination file list. We assume the only safe way is to # check every file that is older than safe_horizon if ref_hash is None: check_list.write(item.path + '\n') continue # If source file differs by time or size from the matching # destination, rsync will discover the difference in any case. # It is then safe to skip checksum check here. dst_item = ref_hash.get(item.path, None) if (dst_item is None or dst_item.size != item.size or dst_item.date != item.date): safe_list.write(item.path + '\n') continue # All remaining files must be checked with checksums enabled check_list.write(item.path + '\n') # Close all the control files dir_list.close() safe_list.close() check_list.close() exclude_and_protect_filter.close() # TODO: remove debug output when the procedure is marked as 'stable' # By adding a double '--itemize-changes' option, the rsync output # will contain the full list of files that have been touched, even # those that have not changed orig_args = self.args self.args = orig_args[:] # clone the argument list self.args.append('--itemize-changes') self.args.append('--itemize-changes') # Create directories and delete/copy unknown files _logger.info("Smart copy step 2/4: create directories and " "delete/copy unknown files") self._rsync_ignore_vanished_files( '--recursive', '--delete', '--files-from=%s' % dir_list.name, '--filter', 'merge %s' % exclude_and_protect_filter.name, src, dst, check=True) # Copy safe files _logger.info("Smart copy step 3/4: safe copy") self._rsync_ignore_vanished_files( '--files-from=%s' % safe_list.name, src, dst, check=True) # Copy remaining files with checksums _logger.info("Smart copy step 4/4: copy with checksums") self._rsync_ignore_vanished_files( '--checksum', '--files-from=%s' % check_list.name, src, dst, check=True) # TODO: remove debug output when the procedure is marked as 'stable' # Restore the original arguments for rsync self.args = orig_args finally: shutil.rmtree(temp_dir) _logger.info("Smart copy finished: %s -> %s (safe before %s)", src, dst, safe_horizon) class RsyncPgData(Rsync): """ This class is a wrapper for rsync, specialised in sync-ing the Postgres data directory """ def __init__(self, rsync='rsync', args=None, **kwargs): options = [ '-rLKpts', '--delete-excluded', '--inplace', '--exclude=/pg_xlog/*', '--exclude=/pg_log/*', '--exclude=/recovery.conf', '--exclude=/postmaster.pid' ] if args: options += args Rsync.__init__(self, rsync, args=options, **kwargs) barman-1.5.1/barman/compression.py0000644000076500000240000001302212621123447016426 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module is responsible to manage the compression features of Barman """ from barman.command_wrappers import Command import logging _logger = logging.getLogger(__name__) class CompressionIncompatibility(Exception): """ Exception for compression incompatibility """ class CompressionManager(object): def __init__(self, config): """ Compression manager """ self.config = config def check(self, compression=None): """ This method returns True if the compression specified in the configuration file is present in the register, otherwise False """ if not compression: compression = self.config.compression if compression not in compression_registry: return False return True def get_compressor(self, remove_origin=False, debug=False, compression=None): """ Returns a new compressor instance """ if not compression: compression = self.config.compression # Check if the requested compression mechanism is allowed if self.check(compression): return compression_registry[compression]( config=self.config, compression=compression, remove_origin=remove_origin, debug=debug) else: return None def identify_compression(filename): """ Try to guess the compression algorithm of a file :param filename: the pat of the file to identify :rtype: str """ with open(filename, 'rb') as f: file_start = f.read(MAGIC_MAX_LENGTH) for file_type, cls in compression_registry.iteritems(): if cls.validate(file_start): return file_type return None class Compressor(object): """ Abstract base class for all compressors """ MAGIC = None def __init__(self, config, compression, remove_origin=False, debug=False): self.config = config self.compression = compression self.remove_origin = remove_origin self.debug = debug self.compress = None self.decompres = None def _build_command(self, pipe_command): """ Build the command string and create the actual Command object :param pipe_command: the command used to compress/decompress :rtype: Command """ command = 'command(){ ' command += pipe_command command += ' > "$2" < "$1"' if self.remove_origin: command += ' && rm -f "$1"' command += ';}; command' return Command(command, shell=True, check=True, debug=self.debug) @classmethod def validate(cls, file_start): """ Guess if the first bytes of a file are compatible with the compression implemented by this class :param file_start: a binary string representing the first few bytes of a file :rtype: bool """ return cls.MAGIC and file_start.startswith(cls.MAGIC) class GZipCompressor(Compressor): """ Predefined compressor with GZip """ MAGIC = b'\x1f\x8b\x08' def __init__(self, config, compression, remove_origin=False, debug=False): super(GZipCompressor, self).__init__( config, compression, remove_origin, debug) self.compress = self._build_command('gzip -c') self.decompress = self._build_command('gzip -c -d') class BZip2Compressor(Compressor): """ Predefined compressor with BZip2 """ MAGIC = b'\x42\x5a\x68' def __init__(self, config, compression, remove_origin=False, debug=False): super(BZip2Compressor, self).__init__( config, compression, remove_origin, debug) self.compress = self._build_command('bzip2 -c') self.decompress = self._build_command('bzip2 -c -d') class CustomCompressor(Compressor): """ Custom compressor """ def __init__(self, config, compression, remove_origin=False, debug=False): if not config.custom_compression_filter: raise CompressionIncompatibility("custom_compression_filter") if not config.custom_decompression_filter: raise CompressionIncompatibility("custom_decompression_filter") super(CustomCompressor, self).__init__( config, compression, remove_origin, debug) self.compress = self._build_command( config.custom_compression_filter) self.decompress = self._build_command( config.custom_decompression_filter) #: a dictionary mapping all supported compression schema #: to the class implementing it compression_registry = { 'gzip': GZipCompressor, 'bzip2': BZip2Compressor, 'custom': CustomCompressor, } #: The longest string needed to identify a compression schema MAGIC_MAX_LENGTH = reduce( max, [len(x.MAGIC or '') for x in compression_registry.values()], 0) barman-1.5.1/barman/config.py0000644000076500000240000006623612621362541015351 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module is responsible for all the things related to Barman configuration, such as parsing configuration file. """ import collections import datetime import inspect import logging.handlers import os import re import sys from ConfigParser import ConfigParser, NoOptionError from glob import iglob from barman import output # create a namedtuple object called PathConflict with 'label' and 'server' PathConflict = collections.namedtuple('PathConflict', 'label server') _logger = logging.getLogger(__name__) FORBIDDEN_SERVER_NAMES = ['all'] DEFAULT_USER = 'barman' DEFAULT_LOG_LEVEL = logging.INFO DEFAULT_LOG_FORMAT = "%(asctime)s [%(process)s] %(name)s " \ "%(levelname)s: %(message)s" _TRUE_RE = re.compile(r"""^(true|t|yes|1)$""", re.IGNORECASE) _FALSE_RE = re.compile(r"""^(false|f|no|0)$""", re.IGNORECASE) _TIME_INTERVAL_RE = re.compile(r""" ^\s* (\d+)\s+(day|month|week)s? # N (day|month|week) with optional 's' \s*$ """, re.IGNORECASE | re.VERBOSE) REUSE_BACKUP_VALUES = ('copy', 'link', 'off') # Possible copy methods for backups (must be all lowercase) COPY_METHOD_VALUES = ['rsync'] class CsvOption(set): """ Base class for CSV options. Given a comma delimited string, this class is a list containing the submitted options. Internally, it uses a set in order to avoid option replication. Allowed values for the CSV option are contained in the 'value_list' attribute. The 'conflicts' attribute specifies for any value, the list of values that are prohibited (and thus generate a conflict). If a conflict is found, raises a ValueError exception. """ value_list = [] conflicts = {} def __init__(self, value, key, source): # Invoke parent class init and initialize an empty set super(CsvOption, self).__init__() # Parse not None values if value is not None: self.parse(value, key, source) # Validates the object structure before returning the new instance self.validate(key, source) def parse(self, value, key, source): """ Parses a list of values and correctly assign the set of values (removing duplication) and checking for conflicts. """ if value == '': return values_list = value.split(',') for val in sorted(values_list): val = val.strip().lower() if val in self.value_list: # check for conflicting values. if a conflict is # found the option is not valid then, raise exception. if val in self.conflicts and self.conflicts[val] in self: raise ValueError("Invalid configuration value '%s' for " "key %s in %s: cannot contain both " "'%s' and '%s'." "Configuration directive ignored." % (val, key, source, val, self.conflicts[val])) else: # otherwise use parsed value self.add(val) else: # not allowed value, reject the configuration raise ValueError("Invalid configuration value '%s' for " "key %s in %s: Unknown option" % (val, key, source)) def validate(self, key, source): """ Override this method for special validation needs """ def to_json(self): """ Output representation of the obj for JSON serialization The result is a string which can be parsed by the same class """ return ",".join(self) class BackupOptions(CsvOption): """ Extends CsvOption class providing all the details for the backup_options field """ # constants containing labels for allowed values EXCLUSIVE_BACKUP = 'exclusive_backup' CONCURRENT_BACKUP = 'concurrent_backup' # list holding all the allowed values for the BackupOption class value_list = [EXCLUSIVE_BACKUP, CONCURRENT_BACKUP] # map holding all the possible conflicts between the allowed values conflicts = { EXCLUSIVE_BACKUP: CONCURRENT_BACKUP, CONCURRENT_BACKUP: EXCLUSIVE_BACKUP, } def validate(self, key, source): """ Validates backup_option values: currently it makes sure that either exclusive_backup or concurrent_backup are set. """ if self.CONCURRENT_BACKUP not in self \ and self.EXCLUSIVE_BACKUP not in self: raise ValueError("Invalid configuration value for " "key %s in %s: it must contain either " "exclusive_backup or concurrent_backup option" % (key, source)) class RecoveryOptions(CsvOption): """ Extends CsvOption class providing all the details for the recovery_options field """ # constants containing labels for allowed values GET_WAL = 'get-wal' # list holding all the allowed values for the RecoveryOptions class value_list = [GET_WAL] def parse_boolean(value): """ Parse a string to a boolean value :param str value: string representing a boolean :raises ValueError: if the string is an invalid boolean representation """ if _TRUE_RE.match(value): return True if _FALSE_RE.match(value): return False raise ValueError("Invalid boolean representation (use 'true' or 'false')") def parse_time_interval(value): """ Parse a string, transforming it in a time interval. Accepted format: N (day|month|week)s :param str value: the string to evaluate """ # if empty string or none return none if value is None or value == '': return None result = _TIME_INTERVAL_RE.match(value) # if the string doesn't match, the option is invalid if not result: raise ValueError("Invalid value for a time interval %s" % value) # if the int conversion value = int(result.groups()[0]) unit = result.groups()[1][0].lower() # Calculates the time delta if unit == 'd': time_delta = datetime.timedelta(days=value) elif unit == 'w': time_delta = datetime.timedelta(weeks=value) elif unit == 'm': time_delta = datetime.timedelta(days=(31 * value)) else: # This should never happen raise ValueError("Invalid unit time %s" % unit) return time_delta def parse_reuse_backup(value): """ Parse a string to a valid reuse_backup value. Valid values are "copy", "link" and "off" :param str value: reuse_backup value :raises ValueError: if the value is invalid """ if value is None: return None if value.lower() in REUSE_BACKUP_VALUES: return value.lower() raise ValueError( "Invalid value (use '%s' or '%s')" % ( "', '".join(REUSE_BACKUP_VALUES[:-1]), REUSE_BACKUP_VALUES[-1])) def parse_copy_method(value): """ Parse a string to a valid copy_method value. Valid values are contained in COPY_METHOD_VALUES list :param str value: copy_method value :raises ValueError: if the value is invalid """ if value is None: return None if value.lower() in COPY_METHOD_VALUES: return value.lower() raise ValueError( "Invalid value (must be one in: '%s')" % ( "', '".join(COPY_METHOD_VALUES))) class ServerConfig(object): """ This class represents the configuration for a specific Server instance. """ KEYS = [ 'active', 'description', 'ssh_command', 'conninfo', 'backup_directory', 'basebackups_directory', 'disabled', 'wals_directory', 'incoming_wals_directory', 'compression', 'custom_compression_filter', 'custom_decompression_filter', 'retention_policy_mode', 'retention_policy', 'wal_retention_policy', 'pre_backup_script', 'post_backup_script', 'pre_backup_retry_script', 'post_backup_retry_script', 'pre_archive_script', 'post_archive_script', 'pre_archive_retry_script', 'post_archive_retry_script', 'minimum_redundancy', 'bandwidth_limit', 'tablespace_bandwidth_limit', 'reuse_backup', 'backup_options', 'immediate_checkpoint', 'network_compression', 'recovery_options', 'basebackup_retry_times', 'basebackup_retry_sleep', 'last_backup_maximum_age', 'copy_method', ] BARMAN_KEYS = [ 'compression', 'custom_compression_filter', 'custom_decompression_filter', 'retention_policy_mode', 'retention_policy', 'wal_retention_policy', 'pre_backup_script', 'post_backup_script', 'pre_archive_script', 'post_archive_script', 'configuration_files_directory', 'minimum_redundancy', 'bandwidth_limit', 'tablespace_bandwidth_limit', 'reuse_backup', 'backup_options', 'immediate_checkpoint', 'network_compression', 'recovery_options', 'basebackup_retry_times', 'basebackup_retry_sleep', 'last_backup_maximum_age', 'copy_method', ] DEFAULTS = { 'active': 'true', 'disabled': 'false', 'backup_directory': r'%(barman_home)s/%(name)s', 'basebackups_directory': r'%(backup_directory)s/base', 'wals_directory': r'%(backup_directory)s/wals', 'incoming_wals_directory': r'%(backup_directory)s/incoming', 'retention_policy_mode': 'auto', 'wal_retention_policy': 'main', 'minimum_redundancy': '0', 'backup_options': "%s" % BackupOptions.EXCLUSIVE_BACKUP, 'recovery_options': '', 'immediate_checkpoint': 'false', 'network_compression': 'false', 'basebackup_retry_times': '0', 'basebackup_retry_sleep': '30', 'copy_method': 'rsync', } FIXED = [ 'disabled', ] PARSERS = { 'active': parse_boolean, 'disabled': parse_boolean, 'immediate_checkpoint': parse_boolean, 'network_compression': parse_boolean, 'backup_options': BackupOptions, 'reuse_backup': parse_reuse_backup, 'basebackup_retry_times': int, 'basebackup_retry_sleep': int, 'recovery_options': RecoveryOptions, 'last_backup_maximum_age': parse_time_interval, 'copy_method': parse_copy_method, } def invoke_parser(self, key, source, value, new_value): """ Function used for parsing configuration values. If needed, it uses special parsers from the PARSERS map, and handles parsing exceptions. Uses two values (value and new_value) to manage configuration hierarchy (server config overwrites global config). :param str key: the name of the configuration option :param str source: the section that contains the configuration option :param value: the old value of the option if present. :param str new_value: the new value that needs to be parsed :return: the parsed value of a configuration option """ # If the new value is None, returns the old value if new_value is None: return value # If we have a parser for the current key, use it to obtain the # actual value. If an exception is thrown, print a warning and # ignore the value. # noinspection PyBroadException if key in self.PARSERS: parser = self.PARSERS[key] try: # If the parser is a subclass of the CsvOption class # we need a different invocation, which passes not only # the value to the parser, but also the key name # and the section that contains the configuration if inspect.isclass(parser) \ and issubclass(parser, CsvOption): value = parser(new_value, key, source) else: value = parser(new_value) except Exception, e: output.warning("Invalid configuration value '%s' for key %s" " in %s: %s", value, key, source, e) _logger.exception(e) else: value = new_value return value def __init__(self, config, name): self.msg_list = [] self.config = config self.name = name self.barman_home = config.barman_home self.barman_lock_directory = config.barman_lock_directory config.validate_server_config(self.name) for key in ServerConfig.KEYS: value = None # Skip parameters that cannot be configured by users if key not in ServerConfig.FIXED: # Get the setting from the [name] section of config file # A literal None value is converted to an empty string new_value = config.get(name, key, self.__dict__, none_value='') source = '[%s] section' % name value = self.invoke_parser(key, source, value, new_value) # If the setting isn't present in [name] section of config file # check if it has to be inherited from the [barman] section if value is None and key in ServerConfig.BARMAN_KEYS: new_value = config.get('barman', key, self.__dict__, none_value='') source = '[barman] section' value = self.invoke_parser(key, source, value, new_value) # If the setting isn't present in [name] section of config file # and is not inherited from global section use its default # (if present) if value is None and key in ServerConfig.DEFAULTS: new_value = ServerConfig.DEFAULTS[key] % self.__dict__ source = 'DEFAULTS' value = self.invoke_parser(key, source, value, new_value) # An empty string is a None value (bypassing inheritance # from global configuration) if value is not None and value == '': value = None setattr(self, key, value) def to_json(self): """ Return an equivalent dictionary that can be encoded in json """ json_dict = dict(vars(self)) # remove the reference to main Config object del json_dict['config'] return json_dict class Config(object): """This class represents the barman configuration. Default configuration files are /etc/barman.conf, /etc/barman/barman.conf and ~/.barman.conf for a per-user configuration """ CONFIG_FILES = [ '~/.barman.conf', '/etc/barman.conf', '/etc/barman/barman.conf', ] _QUOTE_RE = re.compile(r"""^(["'])(.*)\1$""") def __init__(self, filename=None): self._config = ConfigParser() if filename: if hasattr(filename, 'read'): self._config.readfp(filename) else: # check for the existence of the user defined file if not os.path.exists(filename): sys.exit("Configuration file '%s' does not exist" % filename) self._config.read(os.path.expanduser(filename)) else: # Check for the presence of configuration files # inside default directories for path in self.CONFIG_FILES: full_path = os.path.expanduser(path) if os.path.exists(full_path) \ and full_path in self._config.read(full_path): filename = full_path break else: sys.exit("Could not find any configuration file at " "default locations.\n" "Check Barman's documentation for more help.") self.config_file = filename self._servers = None self.servers_msg_list = [] self._parse_global_config() def get(self, section, option, defaults=None, none_value=None): """Method to get the value from a given section from Barman configuration """ if not self._config.has_section(section): return None try: value = self._config.get(section, option, raw=False, vars=defaults) if value.lower() == 'none': value = none_value if value is not None: value = self._QUOTE_RE.sub(lambda m: m.group(2), value) return value except NoOptionError: return None def _parse_global_config(self): """ This method parses the global [barman] section """ self.barman_home = self.get('barman', 'barman_home') self.barman_lock_directory = self.get( 'barman', 'barman_lock_directory') or self.barman_home self.user = self.get('barman', 'barman_user') or DEFAULT_USER self.log_file = self.get('barman', 'log_file') self.log_format = self.get('barman', 'log_format') or DEFAULT_LOG_FORMAT self.log_level = self.get('barman', 'log_level') or DEFAULT_LOG_LEVEL # save the raw barman section to be compared later in # _is_global_config_changed() method self._global_config = set(self._config.items('barman')) def _is_global_config_changed(self): """Return true if something has changed in global configuration""" return self._global_config != set(self._config.items('barman')) def load_configuration_files_directory(self): """ Read the "configuration_files_directory" option and load all the configuration files with the .conf suffix that lie in that folder """ config_files_directory = self.get('barman', 'configuration_files_directory') if not config_files_directory: return if not os.path.isdir(os.path.expanduser(config_files_directory)): _logger.warn( 'Ignoring the "configuration_files_directory" option as "%s" ' 'is not a directory', config_files_directory) return for cfile in sorted(iglob( os.path.join(os.path.expanduser(config_files_directory), '*.conf'))): filename = os.path.basename(cfile) if os.path.isfile(cfile): # Load a file _logger.debug('Including configuration file: %s', filename) self._config.read(cfile) if self._is_global_config_changed(): msg = "the configuration file %s contains a not empty [" \ "barman] section" % filename _logger.fatal(msg) raise SystemExit("FATAL: %s" % msg) else: # Add an info that a file has been discarded _logger.warn('Discarding configuration file: %s (not a file)', filename) def _populate_servers(self): """ Populate server list from configuration file Also check for paths errors in configuration. If two or more paths overlap in a single server, that server is disabled. If two or more directory paths overlap between different servers an error is raised. """ # Populate servers if self._servers is not None: return self._servers = {} # Cycle all the available configurations sections for section in self._config.sections(): if section == 'barman': # skip global settings continue # Exit if the section has a reserved name if section in FORBIDDEN_SERVER_NAMES: msg = "the reserved word '%s' is not allowed as server name." \ "Please rename it." % section _logger.fatal(msg) raise SystemExit("FATAL: %s" % msg) # Create a ServerConfig object self._servers[section] = ServerConfig(self, section) # Check for conflicting paths in Barman configuration self._check_conflicting_paths() def _check_conflicting_paths(self): """ Look for conflicting paths intra-server and inter-server """ # All paths in configuration servers_paths = {} # Global errors list self.servers_msg_list = [] # Cycle all the available configurations sections for section in sorted(self._config.sections()): if section == 'barman': # skip global settings continue # Paths map section_conf = self._servers[section] config_paths = { 'backup_directory': section_conf.backup_directory, 'basebackups_directory': section_conf.basebackups_directory, 'wals_directory': section_conf.wals_directory, 'incoming_wals_directory': section_conf.incoming_wals_directory, } # Check for path errors for label, path in sorted(config_paths.iteritems()): # If the path does not conflict with the others, add it to the # paths map real_path = os.path.realpath(path) if real_path not in servers_paths: servers_paths[real_path] = PathConflict(label, section) else: if section == servers_paths[real_path].server: # Internal path error. # Insert the error message into the server.msg_list if real_path == path: self._servers[section].msg_list.append( "Conflicting path: %s=%s conflicts with " "'%s' for server '%s'" % ( label, path, servers_paths[real_path].label, servers_paths[real_path].server)) else: # Symbolic link self._servers[section].msg_list.append( "Conflicting path: %s=%s (symlink to: %s) " "conflicts with '%s' for server '%s'" % ( label, path, real_path, servers_paths[real_path].label, servers_paths[real_path].server)) # Disable the server self._servers[section].disabled = True else: # Global path error. # Insert the error message into the global msg_list if real_path == path: self.servers_msg_list.append( "Conflicting path: " "%s=%s for server '%s' conflicts with " "'%s' for server '%s'" % ( label, path, section, servers_paths[real_path].label, servers_paths[real_path].server)) else: # Symbolic link self.servers_msg_list.append( "Conflicting path: " "%s=%s (symlink to: %s) for server '%s' " "conflicts with '%s' for server '%s'" % ( label, path, real_path, section, servers_paths[real_path].label, servers_paths[real_path].server)) def server_names(self): """This method returns a list of server names""" self._populate_servers() return self._servers.keys() def servers(self): """This method returns a list of server parameters""" self._populate_servers() return self._servers.values() def get_server(self, name): """ Get the configuration of the specified server :param str name: the server name """ self._populate_servers() return self._servers.get(name, None) def validate_global_config(self): """ Validate global configuration parameters """ # Check for the existence of unexpected parameters in the # global section of the configuration file keys = ['barman_home', 'barman_lock_directory', 'barman_user', 'log_file', 'log_level', 'configuration_files_directory'] keys.extend(ServerConfig.KEYS) self._validate_with_keys(self._global_config, keys, 'barman') def validate_server_config(self, server): """ Validate configuration parameters for a specified server :param str server: the server name """ # Check for the existence of unexpected parameters in the # server section of the configuration file self._validate_with_keys(self._config.items(server), ServerConfig.KEYS, server) @staticmethod def _validate_with_keys(config_items, allowed_keys, section): """ Check every config parameter against a list of allowed keys :param config_items: list of tuples containing provided parameters along with their values :param allowed_keys: list of allowed keys :param section: source section (for error reporting) """ for parameter in config_items: # if the parameter name is not in the list of allowed values, # then output a warning name = parameter[0] if name not in allowed_keys: output.warning('Invalid configuration option "%s" in [%s] ' 'section.', name, section) # easy raw config diagnostic with python -m # noinspection PyProtectedMember def _main(): print "Active configuration settings:" r = Config() r.load_configuration_files_directory() for section in r._config.sections(): print "Section: %s" % section for option in r._config.options(section): print "\t%s = %s " % (option, r.get(section, option)) if __name__ == "__main__": _main() barman-1.5.1/barman/diagnose.py0000644000076500000240000000544612621123447015671 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module represents the barman diagnostic tool. """ import logging import json import sys from barman.backup import BackupInfo import barman from barman import output from barman import fs from barman.utils import BarmanEncoder _logger = logging.getLogger(__name__) def exec_diagnose(servers, errors_list): """ Diagnostic command: gathers information from backup server and from all the configured servers. Gathered information should be used for support and problems detection :param dict(str,barman.server.Server) servers: list of configured servers :param list errors_list: list of global errors """ # global section. info about barman server diagnosis = {} diagnosis['global'] = {} diagnosis['servers'] = {} # barman global config diagnosis['global']['config'] = dict(barman.__config__._global_config) diagnosis['global']['config']['errors_list'] = errors_list command = fs.UnixLocalCommand() # basic system info diagnosis['global']['system_info'] = command.get_system_info() diagnosis['global']['system_info']['barman_ver'] = barman.__version__ # per server section for name in sorted(servers): server = servers[name] if server is None: output.error("Unknown server '%s'" % name) continue # server configuration diagnosis['servers'][name] = {} diagnosis['servers'][name]['config'] = vars(server.config) del diagnosis['servers'][name]['config']['config'] # server system info if server.config.ssh_command: command = fs.UnixRemoteCommand(ssh_command=server.config.ssh_command) diagnosis['servers'][name]['system_info'] = command.get_system_info() # barman statuts information for the server diagnosis['servers'][name]['status'] = server.get_remote_status() # backup list backups = server.get_available_backups(BackupInfo.STATUS_ALL) diagnosis['servers'][name]['backups'] = backups output.info(json.dumps(diagnosis, sys.stdout, cls=BarmanEncoder, indent=4, sort_keys=True)) barman-1.5.1/barman/fs.py0000644000076500000240000002347212621123447014507 0ustar mnenciastaff# Copyright (C) 2013-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import logging from barman.command_wrappers import Command _logger = logging.getLogger(__name__) class FsOperationFailed(Exception): """ Exception which represents a failed execution of a command on FS """ pass def _str(cmd_out): """ Make a string from the output of a CommandWrapper execution. If input is None returns a literal 'None' string :param cmd_out: String or ByteString to convert :return str: a string """ if hasattr(cmd_out, 'decode') and callable(cmd_out.decode): return cmd_out.decode('utf-8', 'replace') else: return str(cmd_out) class UnixLocalCommand(object): """ This class is a wrapper for local calls for file system operations """ def __init__(self): # initialize a shell self.cmd = Command(cmd='sh -c', shell=True) def create_dir_if_not_exists(self, dir_path): """ This method check for the existence of a directory. if exist and is not a directory throws exception. if is a directory everything is ok and no mkdir operation is required. Otherwise creates the directory using mkdir if the mkdir fails an error is raised :param dir_path full path for the directory """ _logger.debug('Create directory %s if it does not exists' % dir_path) exists = self.cmd('test -e %s' % dir_path) if exists == 0: is_dir = self.cmd('test -d %s' % dir_path) if is_dir != 0: raise FsOperationFailed( 'A file with the same name already exists') else: return False else: mkdir_ret = self.cmd('mkdir %s' % dir_path) if mkdir_ret == 0: return True else: raise FsOperationFailed('mkdir execution failed') def delete_if_exists(self, dir_path): """ This method check for the existence of a directory. if exists and is not a directory an exception is raised if is a directory, then is removed using a rm -fr command, and returns True. if the command fails an exception is raised. If the directory does not exists returns False :param dir_path the full path for the directory """ _logger.debug('Delete if directory %s exists' % dir_path) exists = self.cmd('test -e %s' % dir_path) if exists == 0: is_dir = self.cmd('test -d %s' % dir_path) if is_dir != 0: raise FsOperationFailed( 'A file with the same name exists, but is not a ' 'directory') else: rm_ret = self.cmd('rm -fr %s' % dir_path) if rm_ret == 0: return True else: raise FsOperationFailed('rm execution failed') else: return False def check_directory_exists(self, dir_path): """ Check for the existence of a directory in path. if the directory exists returns true. if the directory does not exists returns false. if exists a file and is not a directory raises an exception :param dir_path full path for the directory """ _logger.debug('Check if directory %s exists' % dir_path) exists = self.cmd('test -e %s' % dir_path) if exists == 0: is_dir = self.cmd('test -d %s' % dir_path) if is_dir != 0: raise FsOperationFailed( 'A file with the same name exists, but is not a directory') else: return True else: return False def check_write_permission(self, dir_path): """ check write permission for barman on a given path. Creates a hidden file using touch, then remove the file. returns true if the file is written and removed without problems raise exception if the creation fails. raise exception if the removal fails. :param dir_path full dir_path for the directory to check """ _logger.debug('Check if directory %s is writable' % dir_path) exists = self.cmd('test -e %s' % dir_path) if exists == 0: is_dir = self.cmd('test -d %s' % dir_path) if is_dir == 0: can_write = self.cmd('touch %s/.barman_write_check' % dir_path) if can_write == 0: can_remove = self.cmd( 'rm %s/.barman_write_check' % dir_path) if can_remove == 0: return True else: raise FsOperationFailed('Unable to remove file') else: raise FsOperationFailed('Unable to create write check file') else: raise FsOperationFailed('%s is not a directory' % dir_path) else: raise FsOperationFailed('%s does not exists' % dir_path) def create_symbolic_link(self, src, dst): """ Create a symlink pointing to src named dst. Check src exists, if so, checks that destination does not exists. if src is an invalid folder, raises an exception. if dst already exists, raises an exception. if ln -s command fails raises an exception :param src full path to the source of the symlink :param dst full path for the destination of the symlink """ _logger.debug('Create symbolic link %s -> %s' % (src, dst)) exists = self.cmd('test -e %s' % src) if exists == 0: exists_dst = self.cmd('test -e %s' % dst) if exists_dst != 0: link = self.cmd('ln -s %s %s' % (src, dst)) if link == 0: return True else: raise FsOperationFailed('ln command failed') else: raise FsOperationFailed('ln destination already exists') else: raise FsOperationFailed('ln source does not exists') def get_system_info(self): """ Gather important system information for 'barman diagnose' command """ result = {} # self.cmd.out can be None. The str() call will ensure it will be # translated to a literal 'None' release = '' if self.cmd("lsb_release -a") == 0: release = _str(self.cmd.out).rstrip() elif self.cmd('test -e /etc/lsb-release') == 0: self.cmd('cat /etc/lsb-release ') release = "Ubuntu Linux %s" % _str(self.cmd.out).rstrip() elif self.cmd('test -e /etc/debian_version') == 0: self.cmd('cat /etc/debian_version') release = "Debian GNU/Linux %s" % _str(self.cmd.out).rstrip() elif self.cmd('test -e /etc/redhat-release') == 0: self.cmd('cat /etc/redhat-release') release = "RedHat Linux %s" % _str(self.cmd.out).rstrip() elif self.cmd('sw_vers') == 0: release = _str(self.cmd.out).rstrip() result['release'] = release self.cmd('uname -a') result['kernel_ver'] = _str(self.cmd.out).rstrip() self.cmd('python --version 2>&1') result['python_ver'] = _str(self.cmd.out).rstrip() self.cmd('rsync --version 2>&1') result['rsync_ver'] = _str(self.cmd.out).splitlines(True)[0].rstrip() self.cmd('ssh -V 2>&1') result['ssh_ver'] = _str(self.cmd.out).rstrip() return result def get_file_content(self, path): """ Retrieve the content of a file If the file doesn't exist or isn't readable, it raises an exception. :param str path: full path to the file to read """ _logger.debug('Reading content of file %s' % path) result = self.cmd("test -e '%s'" % path) if result != 0: raise FsOperationFailed('The %s file does not exist' % path) result = self.cmd("test -r '%s'" % path) if result != 0: raise FsOperationFailed('The %s file is not readable' % path) result = self.cmd("cat '%s'" % path) if result != 0: raise FsOperationFailed('Failed to execute "cat \'%s\'"' % path) return self.cmd.out class UnixRemoteCommand(UnixLocalCommand): """ This class is a wrapper for remote calls for file system operations """ # noinspection PyMissingConstructor def __init__(self, ssh_command): """ Uses the same commands as the UnixLocalCommand but the constructor is overridden and a remote shell is initialized using the ssh_command provided by the user :param ssh_command the ssh command provided by the user """ if ssh_command is None: raise FsOperationFailed('No ssh command provided') self.cmd = Command(cmd=ssh_command, shell=True) ret = self.cmd("true") if ret != 0: raise FsOperationFailed("Connection failed using the command '%s'" % ssh_command) barman-1.5.1/barman/hooks.py0000644000076500000240000002276412621123447015225 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains the logic to run hook scripts """ import logging import time from barman import version from barman.command_wrappers import Command from barman.infofile import UnknownBackupIdException _logger = logging.getLogger(__name__) class HookScriptRunner(object): def __init__(self, backup_manager, name, phase=None, error=None, retry=False, **extra_env): """ Execute a hook script managing its environment """ self.backup_manager = backup_manager self.name = name self.extra_env = extra_env self.phase = phase self.error = error self.retry = retry self.environment = None self.exit_status = None self.exception = None self.script = None self.reset() def reset(self): """ Reset the status of the class. """ self.environment = dict(self.extra_env) config_file = self.backup_manager.config.config.config_file self.environment.update({ 'BARMAN_VERSION': version.__version__, 'BARMAN_SERVER': self.backup_manager.config.name, 'BARMAN_CONFIGURATION': config_file, 'BARMAN_HOOK': self.name, 'BARMAN_RETRY': str(1 if self.retry else 0), }) if self.error: self.environment['BARMAN_ERROR'] = self.error if self.phase: self.environment['BARMAN_PHASE'] = self.phase script_config_name = "%s_%s" % (self.phase, self.name) else: script_config_name = self.name self.script = getattr(self.backup_manager.config, script_config_name, None) self.exit_status = None self.exception = None def env_from_backup_info(self, backup_info): """ Prepare the environment for executing a script :param BackupInfo backup_info: the backup metadata """ try: previous_backup = self.backup_manager.get_previous_backup( backup_info.backup_id) if previous_backup: previous_backup_id = previous_backup.backup_id else: previous_backup_id = '' except UnknownBackupIdException: previous_backup_id = '' self.environment.update({ 'BARMAN_BACKUP_DIR': backup_info.get_basebackup_directory(), 'BARMAN_BACKUP_ID': backup_info.backup_id, 'BARMAN_PREVIOUS_ID': previous_backup_id, 'BARMAN_STATUS': backup_info.status, 'BARMAN_ERROR': backup_info.error or '', }) def env_from_wal_info(self, wal_info, full_path=None, error=None): """ Prepare the environment for executing a script :param WalFileInfo wal_info: the backup metadata :param str full_path: override wal_info.fullpath() result :param str|Exception error: An error message in case of failure """ self.environment.update({ 'BARMAN_SEGMENT': wal_info.name, 'BARMAN_FILE': str(full_path if full_path is not None else wal_info.fullpath(self.backup_manager.server)), 'BARMAN_SIZE': str(wal_info.size), 'BARMAN_TIMESTAMP': str(wal_info.time), 'BARMAN_COMPRESSION': wal_info.compression or '', 'BARMAN_ERROR': str(error or '') }) def run(self): """ Run a a hook script if configured. This method must never throw any exception """ # noinspection PyBroadException try: if self.script: _logger.debug("Attempt to run %s: %s", self.name, self.script) cmd = Command( self.script, env_append=self.environment, shell=True, check=False) self.exit_status = cmd() if self.exit_status != 0: details = "%s returned %d\n" \ "Output details:\n" \ % (self.script, self.exit_status) details += cmd.out details += cmd.err _logger.warning(details) else: _logger.debug("%s returned %d", self.script, self.exit_status) return self.exit_status except Exception as e: _logger.exception('Exception running %s', self.name) self.exception = e return None class AbortedRetryHookScript(Exception): """ Exception for handling abort of retry hook scripts """ def __init__(self, hook): """ Initialise the exception with hook script info """ self.hook = hook def __str__(self): """ String representation """ return ("Abort '%s_%s' retry hook script (%s, exit code: %d)" % ( self.hook.phase, self.hook.name, self.hook.script, self.hook.exit_status)) class RetryHookScriptRunner(HookScriptRunner): """ A 'retry' hook script is a special kind of hook script that Barman tries to run indefinitely until it either returns a SUCCESS or ABORT exit code. Retry hook scripts are executed immediately before (pre) and after (post) the command execution. Standard hook scripts are executed immediately before (pre) and after (post) the retry hook scripts. """ # Failed attempts before sleeping for NAP_TIME seconds ATTEMPTS_BEFORE_NAP = 5 # Short break after a failure (in seconds) BREAK_TIME = 3 # Long break (nap, in seconds) after ATTEMPTS_BEFORE_NAP failures NAP_TIME = 60 # ABORT (and STOP) exit code EXIT_ABORT_STOP = 63 # ABORT (and CONTINUE) exit code EXIT_ABORT_CONTINUE = 62 # SUCCESS exit code EXIT_SUCCESS = 0 def __init__(self, backup_manager, name, phase=None, error=None, **extra_env): super(RetryHookScriptRunner, self).__init__( backup_manager, name, phase, error, retry=True, **extra_env) def run(self): """ Run a a 'retry' hook script, if required by configuration. Barman will retry to run the script indefinitely until it returns a EXIT_SUCCESS, or an EXIT_ABORT_CONTINUE, or an EXIT_ABORT_STOP code. There are BREAK_TIME seconds of sleep between every try. Every ATTEMPTS_BEFORE_NAP failures, Barman will sleep for NAP_TIME seconds. """ # If there is no script, exit if self.script is not None: # Keep track of the number of attempts attempts = 1 while True: # Run the script using the standard hook method (inherited) super(RetryHookScriptRunner, self).run() # Run the script until it returns EXIT_ABORT_CONTINUE, # or an EXIT_ABORT_STOP, or EXIT_SUCCESS if self.exit_status in (self.EXIT_ABORT_CONTINUE, self.EXIT_ABORT_STOP, self.EXIT_SUCCESS): break # Check for the number of attempts if attempts <= self.ATTEMPTS_BEFORE_NAP: attempts += 1 # Take a short break _logger.debug("Retry again in %d seconds", self.BREAK_TIME) time.sleep(self.BREAK_TIME) else: # Reset the attempt number and take a longer nap _logger.debug("Reached %d failures. Take a nap " "then retry again in %d seconds", self.ATTEMPTS_BEFORE_NAP, self.NAP_TIME) attempts = 1 time.sleep(self.NAP_TIME) # Outside the loop check for the exit code. if self.exit_status == self.EXIT_ABORT_CONTINUE: # Warn the user if the script exited with EXIT_ABORT_CONTINUE # Notify EXIT_ABORT_CONTINUE exit status because success and # failures are already managed in the superclass run method _logger.warning("%s was aborted (got exit status %d, " "Barman resumes)", self.script, self.exit_status) elif self.exit_status == self.EXIT_ABORT_STOP: # Log the error and raise AbortedRetryHookScript exception _logger.error("%s was aborted (got exit status %d, " "Barman requested to stop)", self.script, self.exit_status) raise AbortedRetryHookScript(self) return self.exit_status barman-1.5.1/barman/infofile.py0000644000076500000240000005520512621123447015671 0ustar mnenciastaff# Copyright (C) 2013-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . import ast import os import dateutil.parser import dateutil.tz import collections import logging from barman import xlog from barman.compression import identify_compression # create a namedtuple object called Tablespace with 'name' 'oid' and 'location' # as property. Tablespace = collections.namedtuple('Tablespace', 'name oid location') _logger = logging.getLogger(__name__) def output_tablespace_list(tablespaces): """ Return the literal representation of tablespaces as a Python string :param tablespaces tablespaces: list of Tablespaces objects :return str: Literal representation of tablespaces """ if tablespaces: return repr([tuple(item) for item in tablespaces]) else: return None def load_tablespace_list(string): """ Load the tablespaces as a Python list of namedtuple Uses ast to evaluate information about tablespaces. The returned list is used to create a list of namedtuple :param str string: :return list: list of namedtuple representing all the tablespaces """ obj = ast.literal_eval(string) if obj: return [Tablespace._make(item) for item in obj] else: return None def null_repr(obj): """ Return the literal representation of an object :param object obj: object to represent :return str|None: Literal representation of an object or None """ return repr(obj) if obj else None def load_datetime_tz(time_str): """ Load datetime and ensure the result is timezone-aware. If the parsed timestamp is naive, transform it into a timezone-aware one using the local timezone. :param str time_str: string representing a timestamp :return datetime: the parsed timezone-aware datetime """ # dateutil parser returns naive or tz-aware string depending on the format # of the input string timestamp = dateutil.parser.parse(time_str) # if the parsed timestamp is naive, forces it to local timezone if timestamp.tzinfo is None: timestamp = timestamp.replace(tzinfo=dateutil.tz.tzlocal()) return timestamp class Field(object): def __init__(self, name, dump=None, load=None, default=None, doc=None): """ Field descriptor to be used with a FieldListFile subclass. The resulting field is like a normal attribute with two optional associated function: to_str and from_str The Field descriptor can also be used as a decorator class C(FieldListFile): x = Field('x') @x.dump def x(val): return '0x%x' % val @x.load def x(val): return int(val, 16) :param str name: the name of this attribute :param callable dump: function used to dump the content to a disk :param callable load: function used to reload the content from disk :param default: default value for the field :param str doc: docstring of the filed """ self.name = name self.to_str = dump self.from_str = load self.default = default self.__doc__ = doc # noinspection PyUnusedLocal def __get__(self, obj, objtype=None): if obj is None: return self if not hasattr(obj, '_fields'): obj._fields = {} return obj._fields.setdefault(self.name, self.default) def __set__(self, obj, value): if not hasattr(obj, '_fields'): obj._fields = {} obj._fields[self.name] = value def __delete__(self, obj): raise AttributeError("can't delete attribute") def dump(self, to_str): return type(self)(self.name, to_str, self.from_str, self.__doc__) def load(self, from_str): return type(self)(self.name, self.to_str, from_str, self.__doc__) class FieldListFile(object): __slots__ = ('_fields', 'filename') def __init__(self, **kwargs): """ Represent a predefined set of keys with the associated value. The constructor build the object assigning every keyword argument to the corresponding attribute. If a provided keyword argument doesn't has a corresponding attribute an AttributeError exception is raised. The values provided to the constructor must be of the appropriate type for the corresponding attribute. The constructor will not attempt any validation or conversion on them. This class is meant to be an abstract base class. :raises: AttributeError """ self._fields = {} self.filename = None for name in kwargs: field = getattr(type(self), name, None) if isinstance(field, Field): setattr(self, name, kwargs[name]) else: raise AttributeError('unknown attribute %s' % name) @classmethod def from_meta_file(cls, filename): """ Factory method that read the specified file and build an object with its content. :param str filename: the file to read """ o = cls() o.load(filename) return o def save(self, filename=None, file_object=None): """ Serialize the object to the specified file or file object If a file_object is specified it will be used. If the filename is not specified it uses the one memorized in the filename attribute. If neither the filename attribute and parameter are set a ValueError exception is raised. :param str filename: path of the file to write :param file file_object: a file like object to write in :param str filename: the file to write :raises: ValueError """ if file_object: info = file_object else: filename = filename or self.filename if filename: info = open(filename, 'w') else: info = None if not info: raise ValueError( 'either a valid filename or a file_object must be specified') with info: for name in sorted(vars(type(self))): field = getattr(type(self), name) value = getattr(self, name, None) if isinstance(field, Field): if callable(field.to_str): value = field.to_str(value) info.write("%s=%s\n" % (name, value)) def load(self, filename=None, file_object=None): """ Replaces the current object content with the one deserialized from the provided file. This method set the filename attribute. A ValueError exception is raised if the provided file contains any invalid line. :param str filename: path of the file to read :param file file_object: a file like object to read from :param str filename: the file to read :raises: ValueError """ if file_object: info = file_object elif filename: info = open(filename, 'r') else: raise ValueError( 'either filename or file_object must be specified') # detect the filename if a file_object is passed if not filename and file_object: if hasattr(file_object, 'name'): filename = file_object.name # canonicalize filename if filename: self.filename = os.path.abspath(filename) else: self.filename = None filename = '' # This is only for error reporting with info: for line in info: # skip spaces and comments if line.isspace() or line.rstrip().startswith('#'): continue # parse the line of form "key = value" try: name, value = [x.strip() for x in line.split('=', 1)] except ValueError: raise ValueError('invalid line %s in file %s' % ( line.strip(), filename)) # use the from_str function to parse the value field = getattr(type(self), name, None) if value == 'None': value = None elif isinstance(field, Field) and callable(field.from_str): value = field.from_str(value) setattr(self, name, value) def items(self): """ Return a generator returning a list of (key, value) pairs. If a filed has a dump function defined, it will be used. """ for name in sorted(vars(type(self))): field = getattr(type(self), name) value = getattr(self, name, None) if isinstance(field, Field): if callable(field.to_str): value = field.to_str(value) yield (name, value) def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, ', '.join(['%s=%r' % x for x in self.items()])) class WalFileInfo(FieldListFile): """ Metadata of a WAL file. """ __slots__ = () name = Field('name', doc='base name of WAL file') size = Field('size', load=int, doc='WAL file size after compression') time = Field('time', load=float, doc='WAL file modification time ' '(seconds since epoch)') compression = Field('compression', doc='compression type') @classmethod def from_file(cls, filename, default_compression=None, **kwargs): """ Factory method to generate a WalFileInfo from a WAL file. Every keyword argument will override any attribute from the provided file. If a keyword argument doesn't has a corresponding attribute an AttributeError exception is raised. :param str filename: the file to inspect :param str default_compression: the compression to set if the current schema is not identifiable. """ stat = os.stat(filename) kwargs.setdefault('name', os.path.basename(filename)) kwargs.setdefault('size', stat.st_size) kwargs.setdefault('time', stat.st_mtime) if 'compression' not in kwargs: kwargs['compression'] = identify_compression(filename) \ or default_compression obj = cls(**kwargs) obj.filename = "%s.meta" % filename return obj def to_xlogdb_line(self): """ Format the content of this object as a xlogdb line. """ return "%s\t%s\t%s\t%s\n" % ( self.name, self.size, self.time, self.compression) @classmethod def from_xlogdb_line(cls, line): """ Parse a line from xlog catalogue :param str line: a line in the wal database to parse :rtype: WalFileInfo """ try: name, size, time, compression = line.split() except ValueError: # Old format compatibility (no compression) compression = None try: name, size, time = line.split() except ValueError: raise ValueError("cannot parse line: %r" % (line,)) # The to_xlogdb_line method writes None values as literal 'None' if compression == 'None': compression = None size = int(size) time = float(time) return cls(name=name, size=size, time=time, compression=compression) def to_json(self): """ Return an equivalent dictionary that can be encoded in json """ return dict(self.items()) def relpath(self): """ Returns the WAL file path relative to the server's wals_directory """ return os.path.join(xlog.hash_dir(self.name), self.name) def fullpath(self, server): """ Returns the WAL file full path :param barman.server.Server server: the server that owns the wal file """ return os.path.join(server.config.wals_directory, self.relpath()) class UnknownBackupIdException(Exception): """ The searched backup_id doesn't exists """ class BackupInfoBadInitialisation(Exception): """ Exception for a bad initialization error """ class BackupInfo(FieldListFile): #: Conversion to string EMPTY = 'EMPTY' STARTED = 'STARTED' FAILED = 'FAILED' DONE = 'DONE' STATUS_ALL = (EMPTY, STARTED, DONE, FAILED) STATUS_NOT_EMPTY = (STARTED, DONE, FAILED) #: Status according to retention policies OBSOLETE = 'OBSOLETE' VALID = 'VALID' POTENTIALLY_OBSOLETE = 'OBSOLETE*' NONE = '-' RETENTION_STATUS = (OBSOLETE, VALID, POTENTIALLY_OBSOLETE, NONE) version = Field('version', load=int) pgdata = Field('pgdata') # Parse the tablespaces as a literal Python list of namedtuple # Output the tablespaces as a literal Python list of tuple tablespaces = Field('tablespaces', load=load_tablespace_list, dump=output_tablespace_list) # Timeline is an integer timeline = Field('timeline', load=int) begin_time = Field('begin_time', load=load_datetime_tz) begin_xlog = Field('begin_xlog') begin_wal = Field('begin_wal') begin_offset = Field('begin_offset', load=int) size = Field('size', load=int) deduplicated_size = Field('deduplicated_size', load=int) end_time = Field('end_time', load=load_datetime_tz) end_xlog = Field('end_xlog') end_wal = Field('end_wal') end_offset = Field('end_offset', load=int) status = Field('status', default=EMPTY) server_name = Field('server_name') error = Field('error') mode = Field('mode') config_file = Field('config_file') hba_file = Field('hba_file') ident_file = Field('ident_file') included_files = Field('included_files', load=ast.literal_eval, dump=null_repr) backup_label = Field('backup_label', load=ast.literal_eval, dump=null_repr) __slots__ = ('server', 'config', 'backup_manager', 'backup_id', 'backup_version') def __init__(self, server, info_file=None, backup_id=None, **kwargs): # Initialises the attributes for the object based on the predefined keys """ Stores meta information about a single backup :param Server server: :param file,str,None info_file: :param str,None backup_id: :raise BackupInfoBadInitialisation: if the info_file content is invalid or neither backup_info or """ super(BackupInfo, self).__init__(**kwargs) self.server = server self.config = server.config self.backup_manager = self.server.backup_manager self.server_name = self.config.name self.mode = self.backup_manager.name if backup_id: # Cannot pass both info_file and backup_id if info_file: raise BackupInfoBadInitialisation( 'both info_file and backup_id parameters are set') self.backup_id = backup_id self.filename = self.get_filename() # Check if a backup info file for a given server and a given ID # already exists. If so load the values from the file. if os.path.exists(self.filename): self.load(filename=self.filename) elif info_file: if hasattr(info_file, 'read'): # We have been given a file-like object self.load(file_object=info_file) else: # Just a file name self.load(filename=info_file) self.backup_id = self.detect_backup_id() elif not info_file: raise BackupInfoBadInitialisation( 'backup_id and info_file parameters are both unset') # Manage backup version for new backup structure self.backup_version = 2 try: # the presence of pgdata directory is the marker of version 1 if self.backup_id is not None and os.path.exists( os.path.join(self.get_basebackup_directory(), 'pgdata')): self.backup_version = 1 except Exception as e: _logger.warning("Error detecting backup_version, use default: 2.\n " "Failure reason: %s", e) def get_required_wal_segments(self): """ Get the list of required WAL segments for the current backup """ return xlog.enumerate_segments(self.begin_wal, self.end_wal, self.version) def get_list_of_files(self, target): """ Get the list of files for the current backup """ # Walk down the base backup directory if target in ('data', 'standalone', 'full'): for root, _, files in os.walk(self.get_basebackup_directory()): for f in files: yield os.path.join(root, f) if target in 'standalone': # List all the WAL files for this backup for x in self.get_required_wal_segments(): yield self.server.get_wal_full_path(x) if target in ('wal', 'full'): for wal_info in self.server.get_wal_until_next_backup( self, include_history=True): yield wal_info.fullpath(self.server) def detect_backup_id(self): """ Detect the backup ID from the name of the parent dir of the info file """ if self.filename: return os.path.basename(os.path.dirname(self.filename)) else: return None def get_basebackup_directory(self): """ Get the default filename for the backup.info file based on backup ID and server directory for base backups """ return os.path.join(self.config.basebackups_directory, self.backup_id) def get_data_directory(self, tablespace_oid=None): """ Get path to the backup data dir according with the backup version If tablespace_oid is passed, build the path to the tablespace base directory, according with the backup version :param str tablespace_oid: the oid of a valid tablespace """ # Check if a tablespace oid is passed and if is a valid oid if tablespace_oid is not None and ( self.tablespaces is None or all(str(tablespace_oid) != str(tablespace.oid) for tablespace in self.tablespaces)): raise ValueError("Invalid tablespace OID %s" % tablespace_oid) # Build the requested path according to backup_version value path = [self.get_basebackup_directory()] # Check te version of the backup if self.backup_version == 2: # If an oid has been provided, we are looking for a tablespace if tablespace_oid is not None: # Append the oid to the basedir of the backup path.append(str(tablespace_oid)) else: # Looking for the data dir path.append('data') else: # Backup v1, use pgdata as base path.append('pgdata') # If a oid has been provided, we are looking for a tablespace. if tablespace_oid is not None: # Append the path to pg_tblspc/oid folder inside pgdata path.extend(('pg_tblspc', str(tablespace_oid))) # Return the built path return os.path.join(*path) def get_filename(self): """ Get the default filename for the backup.info file based on backup ID and server directory for base backups """ return os.path.join(self.get_basebackup_directory(), 'backup.info') def set_attribute(self, key, value): """ Set a value for a given key """ setattr(self, key, value) def save(self, filename=None, file_object=None): if not file_object: # Make sure the containing directory exists filename = filename or self.filename dir_name = os.path.dirname(filename) if not os.path.exists(dir_name): os.makedirs(dir_name) super(BackupInfo, self).save(filename=filename, file_object=file_object) def to_dict(self): """ Return the backup_info content as a simple dictionary :return dict: """ result = dict(self.items()) result.update(backup_id=self.backup_id, server_name=self.server_name, mode=self.mode, tablespaces=self.tablespaces) return result def to_json(self): """ Return an equivalent dictionary that uses only json-supported types """ data = self.to_dict() # Convert fields which need special types not supported by json if data.get('tablespaces') is not None: data['tablespaces'] = [list(item) for item in data['tablespaces']] if data.get('begin_time') is not None: data['begin_time'] = data['begin_time'].ctime() if data.get('end_time') is not None: data['end_time'] = data['end_time'].ctime() return data @classmethod def from_json(cls, server, json_backup_info): """ Factory method that builds a BackupInfo object from a json dictionary :param barman.Server server: the server related to the Backup :param dict json_backup_info: the data set containing values from json """ data = dict(json_backup_info) # Convert fields which need special types not supported by json if data.get('tablespaces') is not None: data['tablespaces'] = [Tablespace._make(item) for item in data['tablespaces']] if data.get('begin_time') is not None: data['begin_time'] = load_datetime_tz(data['begin_time']) if data.get('end_time') is not None: data['end_time'] = load_datetime_tz(data['end_time']) # Instantiate a BackupInfo object using the converted fields return cls(server, **data) barman-1.5.1/barman/lockfile.py0000644000076500000240000001450612621123425015661 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module is the lock manager for Barman """ import errno import fcntl import os class LockFileException(Exception): """ LockFile Exception base class """ pass class LockFileBusy(LockFileException): """ Raised when a lock file is not free """ pass class LockFilePermissionDenied(LockFileException): """ Raised when a lock file is not accessible """ pass class LockFile(object): """ Ensures that there is only one process which is running against a specified LockFile. It supports the Context Manager interface, allowing the use in with statements. with LockFile('file.lock') as locked: if not locked: print "failed" else: You can also use exceptions on failures try: with LockFile('file.lock', True): except LockFileBusy, e, file: print "failed to lock %s" % file """ def __init__(self, filename, raise_if_fail=True, wait=False): self.filename = os.path.abspath(filename) self.fd = None self.raise_if_fail = raise_if_fail self.wait = wait def acquire(self, raise_if_fail=None, wait=None): """ Creates and holds on to the lock file. When raise_if_fail, a LockFileBusy is raised if the lock is held by someone else and a LockFilePermissionDenied is raised when the user executing barman have insufficient rights for the creation of a LockFile. Returns True if lock has been successfully acquired, False if it is not. :param bool raise_if_fail: If True raise an exception on failure :param bool wait: If True issue a blocking request :returns bool: whether the lock has been acquired """ if self.fd: return True fd = None # method arguments take precedence on class parameters raise_if_fail = raise_if_fail \ if raise_if_fail is not None else self.raise_if_fail wait = wait if wait is not None else self.wait try: fd = os.open(self.filename, os.O_TRUNC | os.O_CREAT | os.O_RDWR, 0600) flags = fcntl.LOCK_EX if not wait: flags |= fcntl.LOCK_NB fcntl.flock(fd, flags) os.write(fd, ("%s\n" % os.getpid()).encode('ascii')) self.fd = fd return True except (OSError, IOError), e: if fd: os.close(fd) # let's not leak file descriptors if raise_if_fail: if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK): raise LockFileBusy(self.filename) elif e.errno == errno.EACCES: raise LockFilePermissionDenied(self.filename) else: raise else: return False def release(self): """ Releases the lock. If the lock is not held by the current process it does nothing. """ if not self.fd: return try: fcntl.flock(self.fd, fcntl.LOCK_UN) os.close(self.fd) except (OSError, IOError): pass self.fd = None def __del__(self): """ Avoid stale lock files. """ self.release() # Contextmanager interface def __enter__(self): return self.acquire() def __exit__(self, exception_type, value, traceback): self.release() class GlobalCronLock(LockFile): """ This lock protects cron from multiple executions. Creates a global '.cron.lock' lock file under the given lock_directory. """ def __init__(self, lock_directory): super(GlobalCronLock, self).__init__( os.path.join(lock_directory, '.cron.lock'), raise_if_fail=True) class ServerBackupLock(LockFile): """ This lock protects a server from multiple executions of backup command Creates a '.-backup.lock' lock file under the given lock_directory for the named SERVER. """ def __init__(self, lock_directory, server_name): super(ServerBackupLock, self).__init__( os.path.join(lock_directory, '.%s-backup.lock' % server_name), raise_if_fail=True) class ServerCronLock(LockFile): """ This lock protects a server from multiple executions of cron command Creates a '.-cron.lock' lock file under the given lock_directory for the named SERVER. """ def __init__(self, lock_directory, server_name): super(ServerCronLock, self).__init__( os.path.join(lock_directory, '.%s-cron.lock' % server_name), raise_if_fail=True, wait=False) class ServerXLOGDBLock(LockFile): """ This lock protects a server's xlogdb access Creates a '.-xlogdb.lock' lock file under the given lock_directory for the named SERVER. """ def __init__(self, lock_directory, server_name): super(ServerXLOGDBLock, self).__init__( os.path.join(lock_directory, '.%s-xlogdb.lock' % server_name), raise_if_fail=True, wait=True) class ServerWalArchiveLock(LockFile): """ This lock protects a server from multiple executions of wal-archive command Creates a '.-archive-wal.lock' lock file under the given lock_directory for the named SERVER. """ def __init__(self, lock_directory, server_name): super(ServerWalArchiveLock, self).__init__( os.path.join(lock_directory, '.%s-archive-wal.lock' % server_name), raise_if_fail=True, wait=False) barman-1.5.1/barman/output.py0000644000076500000240000006777012621123447015450 0ustar mnenciastaff# Copyright (C) 2013-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module control how the output of Barman will be rendered """ import inspect import logging import sys from barman.infofile import BackupInfo from barman.utils import pretty_size __all__ = [ 'error_occurred', 'debug', 'info', 'warning', 'error', 'exception', 'result', 'close_and_exit', 'close', 'set_output_writer', 'AVAILABLE_WRITERS', 'DEFAULT_WRITER', 'ConsoleOutputWriter', 'NagiosOutputWriter', ] #: True if error or exception methods have been called error_occurred = False #: Exit code if error occurred error_exit_code = 1 def _format_message(message, args): """ Format a message using the args list. The result will be equivalent to message % args If args list contains a dictionary as its only element the result will be message % args[0] :param str message: the template string to be formatted :param tuple args: a list of arguments :return: the formatted message :rtype: str """ if len(args) == 1 and isinstance(args[0], dict): return message % args[0] elif len(args) > 0: return message % args else: return message def _put(level, message, *args, **kwargs): """ Send the message with all the remaining positional arguments to the configured output manager with the right output level. The message will be sent also to the logger unless explicitly disabled with log=False No checks are performed on level parameter as this method is meant to be called only by this module. If level == 'exception' the stack trace will be also logged :param str level: :param str message: the template string to be formatted :param tuple args: all remaining arguments are passed to the log formatter :key bool log: whether to log the message :key bool is_error: treat this message as an error """ # handle keyword-only parameters log = kwargs.pop('log', True) is_error = kwargs.pop('is_error', False) if len(kwargs): raise TypeError('%s() got an unexpected keyword argument %r' % (inspect.stack()[1][3], kwargs.popitem()[0])) if is_error: global error_occurred error_occurred = True _writer.error_occurred() # dispatch the call to the output handler getattr(_writer, level)(message, *args) # log the message as originating from caller's caller module if log: exc_info = False if level == 'exception': level = 'error' exc_info = True frm = inspect.stack()[2] mod = inspect.getmodule(frm[0]) logger = logging.getLogger(mod.__name__) log_level = logging.getLevelName(level.upper()) logger.log(log_level, message, *args, **{'exc_info': exc_info}) def _dispatch(obj, prefix, name, *args, **kwargs): """ Dispatch the call to the %(prefix)s_%(name) method of the obj object :param obj: the target object :param str prefix: prefix of the method to be called :param str name: name of the method to be called :param tuple args: all remaining positional arguments will be sent to target :param dict kwargs: all remaining keyword arguments will be sent to target :return: the result of the invoked method :raise ValueError: if the target method is not present """ method_name = "%s_%s" % (prefix, name) handler = getattr(obj, method_name, None) if callable(handler): return handler(*args, **kwargs) else: raise ValueError("The object %r does not have the %r method" % ( obj, method_name)) def is_quiet(): """ Calls the "is_quiet" method, accessing the protected parameter _quiet of the instanced OutputWriter :return bool: the _quiet parameter value """ return _writer.is_quiet() def is_debug(): """ Calls the "is_debug" method, accessing the protected parameter _debug of the instanced OutputWriter :return bool: the _debug parameter value """ return _writer.is_debug() def debug(message, *args, **kwargs): """ Output a message with severity 'DEBUG' :key bool log: whether to log the message """ _put('debug', message, *args, **kwargs) def info(message, *args, **kwargs): """ Output a message with severity 'INFO' :key bool log: whether to log the message """ _put('info', message, *args, **kwargs) def warning(message, *args, **kwargs): """ Output a message with severity 'INFO' :key bool log: whether to log the message """ _put('warning', message, *args, **kwargs) def error(message, *args, **kwargs): """ Output a message with severity 'ERROR'. Also records that an error has occurred unless the ignore parameter is True. :key bool ignore: avoid setting an error exit status (default False) :key bool log: whether to log the message """ # ignore is a keyword-only parameter ignore = kwargs.pop('ignore', False) if not ignore: kwargs.setdefault('is_error', True) _put('error', message, *args, **kwargs) def exception(message, *args, **kwargs): """ Output a message with severity 'EXCEPTION' If raise_exception parameter doesn't evaluate to false raise and exception: - if raise_exception is callable raise the result of raise_exception() - if raise_exception is an exception raise it - else raise the last exception again :key bool ignore: avoid setting an error exit status :key raise_exception: raise an exception after the message has been processed :key bool log: whether to log the message """ # ignore and raise_exception are keyword-only parameters ignore = kwargs.pop('ignore', False) # noinspection PyNoneFunctionAssignment raise_exception = kwargs.pop('raise_exception', None) if not ignore: kwargs.setdefault('is_error', True) _put('exception', message, *args, **kwargs) if raise_exception: if callable(raise_exception): # noinspection PyCallingNonCallable raise raise_exception(message) elif isinstance(raise_exception, BaseException): raise raise_exception else: raise def init(command, *args, **kwargs): """ Initialize the output writer for a given command. :param str command: name of the command are being executed :param tuple args: all remaining positional arguments will be sent to the output processor :param dict kwargs: all keyword arguments will be sent to the output processor """ try: _dispatch(_writer, 'init', command, *args, **kwargs) except ValueError: exception('The %s writer does not support the "%s" command', _writer.__class__.__name__, command) close_and_exit() def result(command, *args, **kwargs): """ Output the result of an operation. :param str command: name of the command are being executed :param tuple args: all remaining positional arguments will be sent to the output processor :param dict kwargs: all keyword arguments will be sent to the output processor """ try: _dispatch(_writer, 'result', command, *args, **kwargs) except ValueError: exception('The %s writer does not support the "%s" command', _writer.__class__.__name__, command) close_and_exit() def close_and_exit(): """ Close the output writer and terminate the program. If an error has been emitted the program will report a non zero return value. """ close() if error_occurred: sys.exit(error_exit_code) else: sys.exit(0) def close(): """ Close the output writer. """ _writer.close() def set_output_writer(new_writer, *args, **kwargs): """ Replace the current output writer with a new one. The new_writer parameter can be a symbolic name or an OutputWriter object :param new_writer: the OutputWriter name or the actual OutputWriter :type: string or an OutputWriter :param tuple args: all remaining positional arguments will be passed to the OutputWriter constructor :param dict kwargs: all remaining keyword arguments will be passed to the OutputWriter constructor """ global _writer _writer.close() if new_writer in AVAILABLE_WRITERS: _writer = AVAILABLE_WRITERS[new_writer](*args, **kwargs) else: _writer = new_writer class ConsoleOutputWriter(object): def __init__(self, debug=False, quiet=False): """ Default output writer that output everything on console. :param bool debug: print debug messages on standard error :param bool quiet: don't print info messages """ self._debug = debug self._quiet = quiet #: Used in check command to hold the check results self.result_check_list = [] #: Used in status command to hold the status results self.result_status_list = [] #: The minimal flag. If set the command must output a single list of #: values. self.minimal = False #: The server is active self.active = True def _out(self, message, args): """ Print a message on standard output """ print >> sys.stdout, _format_message(message, args) def _err(self, message, args): """ Print a message on standard error """ print >> sys.stderr, _format_message(message, args) def is_quiet(self): """ Access the quiet property of the OutputWriter instance :return bool: if the writer is quiet or not """ return self._quiet def is_debug(self): """ Access the debug property of the OutputWriter instance :return bool: if the writer is in debug mode or not """ return self._debug def debug(self, message, *args): """ Emit debug. """ if self._debug: self._err('DEBUG: %s' % message, args) def info(self, message, *args): """ Normal messages are sent to standard output """ if not self._quiet: self._out(message, args) def warning(self, message, *args): """ Warning messages are sent to standard error """ self._err('WARNING: %s' % message, args) def error(self, message, *args): """ Error messages are sent to standard error """ self._err('ERROR: %s' % message, args) def exception(self, message, *args): """ Warning messages are sent to standard error """ self._err('EXCEPTION: %s' % message, args) def error_occurred(self): """ Called immediately before any message method when the originating call has is_error=True """ def close(self): """ Close the output channel. Nothing to do for console. """ def result_backup(self, backup_info): """ Render the result of a backup. Nothing to do for console. """ # TODO: evaluate to display something useful here def result_recovery(self, results): """ Render the result of a recovery. """ if len(results['changes']) > 0: self.info("") self.info("IMPORTANT") self.info("These settings have been modified to prevent " "data losses") self.info("") for assertion in results['changes']: self.info("%s line %s: %s = %s", assertion.filename, assertion.line, assertion.key, assertion.value) if len(results['warnings']) > 0: self.info("") self.info("WARNING") self.info("You are required to review the following options" " as potentially dangerous") self.info("") for assertion in results['warnings']: self.info("%s line %s: %s = %s", assertion.filename, assertion.line, assertion.key, assertion.value) if results['delete_barman_xlog']: self.info("") self.info("After the recovery, please remember to remove the " "\"barman_xlog\" directory") self.info("inside the PostgreSQL data directory.") if results['get_wal']: self.info("") self.info("WARNING: 'get-wal' is in the specified " "'recovery_options'.") self.info("Before you start up the PostgreSQL server, please " "review the recovery.conf file") self.info("inside the target directory. Make sure that " "'restore_command' can be executed by " "the PostgreSQL user.") self.info("") self.info("Your PostgreSQL server has been successfully " "prepared for recovery!") def _record_check(self, server_name, check, status, hint): """ Record the check line in result_check_map attribute This method is for subclass use :param str server_name: the server is being checked :param str check: the check name :param bool status: True if succeeded :param str,None hint: hint to print if not None """ self.result_check_list.append(dict( server_name=server_name, check=check, status=status, hint=hint)) if not status and self.active: global error_occurred error_occurred = True def init_check(self, server_name, active): """ Init the check command :param str server_name: the server we are start listing :param boolean active: The server is active """ self.info("Server %s:" % server_name) self.active = active def result_check(self, server_name, check, status, hint=None): """ Record a server result of a server check and output it as INFO :param str server_name: the server is being checked :param str check: the check name :param bool status: True if succeeded :param str,None hint: hint to print if not None """ self._record_check(server_name, check, status, hint) if hint: self.info("\t%s: %s (%s)" % (check, 'OK' if status else 'FAILED', hint)) else: self.info("\t%s: %s" % (check, 'OK' if status else 'FAILED')) def init_list_backup(self, server_name, minimal=False): """ Init the list-backup command :param str server_name: the server we are start listing :param bool minimal: if true output only a list of backup id """ self.minimal = minimal def result_list_backup(self, backup_info, backup_size, wal_size, retention_status): """ Output a single backup in the list-backup command :param BackupInfo backup_info: backup we are displaying :param backup_size: size of base backup (with the required WAL files) :param wal_size: size of WAL files belonging to this backup (without the required WAL files) :param retention_status: retention policy status """ # If minimal is set only output the backup id if self.minimal: self.info(backup_info.backup_id) return out_list = [ "%s %s - " % (backup_info.server_name, backup_info.backup_id)] if backup_info.status == BackupInfo.DONE: end_time = backup_info.end_time.ctime() out_list.append('%s - Size: %s - WAL Size: %s' % (end_time, pretty_size(backup_size), pretty_size(wal_size))) if backup_info.tablespaces: tablespaces = [("%s:%s" % (tablespace.name, tablespace.location)) for tablespace in backup_info.tablespaces] out_list.append(' (tablespaces: %s)' % ', '.join(tablespaces)) if retention_status: out_list.append(' - %s' % retention_status) else: out_list.append(backup_info.status) self.info(''.join(out_list)) def result_show_backup(self, backup_ext_info): """ Output all available information about a backup in show-backup command The argument has to be the result of a Server.get_backup_ext_info() call :param dict backup_ext_info: a dictionary containing the info to display """ data = dict(backup_ext_info) self.info("Backup %s:", data['backup_id']) self.info(" Server Name : %s", data['server_name']) self.info(" Status : %s", data['status']) if data['status'] == BackupInfo.DONE: self.info(" PostgreSQL Version : %s", data['version']) self.info(" PGDATA directory : %s", data['pgdata']) if data['tablespaces']: self.info(" Tablespaces:") for item in data['tablespaces']: self.info(" %s: %s (oid: %s)", item.name, item.location, item.oid) self.info("") self.info(" Base backup information:") self.info(" Disk usage : %s (%s with WALs)", pretty_size(data['size']), pretty_size(data['size'] + data[ 'wal_size'])) if data['deduplicated_size'] is not None and data['size'] > 0: deduplication_ratio = 1 - (float(data['deduplicated_size']) / data['size']) self.info(" Incremental size : %s (-%s)", pretty_size(data['deduplicated_size']), '{percent:.2%}'.format(percent=deduplication_ratio) ) self.info(" Timeline : %s", data['timeline']) self.info(" Begin WAL : %s", data['begin_wal']) self.info(" End WAL : %s", data['end_wal']) self.info(" WAL number : %s", data['wal_num']) # Output WAL compression ratio for basebackup WAL files if data['wal_compression_ratio'] > 0: self.info(" WAL compression ratio: %s", '{percent:.2%}'.format( percent=data['wal_compression_ratio'])) self.info(" Begin time : %s", data['begin_time']) self.info(" End time : %s", data['end_time']) self.info(" Begin Offset : %s", data['begin_offset']) self.info(" End Offset : %s", data['end_offset']) self.info(" Begin XLOG : %s", data['begin_xlog']) self.info(" End XLOG : %s", data['end_xlog']) self.info("") self.info(" WAL information:") self.info(" No of files : %s", data['wal_until_next_num']) self.info(" Disk usage : %s", pretty_size(data['wal_until_next_size'])) # Output WAL rate if data['wals_per_second'] > 0: self.info(" WAL rate : %0.2f/hour", data['wals_per_second'] * 3600) # Output WAL compression ratio for archived WAL files if data['wal_until_next_compression_ratio'] > 0: self.info(" Compression ratio : %s", '{percent:.2%}'.format( percent=data['wal_until_next_compression_ratio'])) self.info(" Last available : %s", data['wal_last']) self.info("") self.info(" Catalog information:") self.info(" Retention Policy : %s", data['retention_policy_status'] or 'not enforced') self.info(" Previous Backup : %s", data.setdefault('previous_backup_id', 'not available') or '- (this is the oldest base backup)') self.info(" Next Backup : %s", data.setdefault('next_backup_id', 'not available') or '- (this is the latest base backup)') else: if data['error']: self.info(" Error: : %s", data['error']) def init_status(self, server_name): """ Init the status command :param str server_name: the server we are start listing """ self.info("Server %s:", server_name) def result_status(self, server_name, status, description, message): """ Record a result line of a server status command and output it as INFO :param str server_name: the server is being checked :param str status: the returned status code :param str description: the returned status description :param str,object message: status message. It will be converted to str """ message = str(message) self.result_status_list.append(dict( server_name=server_name, status=status, description=description, message=message)) self.info("\t%s: %s", description, message) def init_list_server(self, server_name, minimal=False): """ Init the list-server command :param str server_name: the server we are start listing """ self.minimal = minimal def result_list_server(self, server_name, description=None): """ Output a result line of a list-server command :param str server_name: the server is being checked :param str,None description: server description if applicable """ if self.minimal or not description: self.info("%s", server_name) else: self.info("%s - %s", server_name, description) def init_show_server(self, server_name): """ Init the show-server command output method :param str server_name: the server we are displaying """ self.info("Server %s:" % server_name) def result_show_server(self, server_name, server_info): """ Output the results of the show-server command :param str server_name: the server we are displaying :param dict server_info: a dictionary containing the info to display """ for status, message in sorted(server_info.items()): self.info("\t%s: %s", status, message) class NagiosOutputWriter(ConsoleOutputWriter): """ Nagios output writer. This writer doesn't output anything to console. On close it writes a nagios-plugin compatible status """ def _out(self, message, args): """ Do not print anything on standard output """ def _err(self, message, args): """ Do not print anything on standard error """ def close(self): """ Display the result of a check run as expected by Nagios. Also set the exit code as 2 (CRITICAL) in case of errors """ global error_occurred, error_exit_code # List of all servers that have been checked servers = [] # List of servers reporting issues issues = [] for item in self.result_check_list: # Keep track of all the checked servers if item['server_name'] not in servers: servers.append(item['server_name']) # Keep track of the servers with issues if not item['status'] and item['server_name'] not in issues: issues.append(item['server_name']) # Global error (detected at configuration level) if len(issues) == 0 and error_occurred: print "BARMAN CRITICAL - Global configuration errors" error_exit_code = 2 return if len(issues) > 0: fail_summary = [] details = [] for server in issues: # Join all the issues for a server. Output format is in the # form: # " FAILED: , ... " # All strings will be concatenated into the $SERVICEOUTPUT$ # macro of the Nagios output server_fail = "%s FAILED: %s" % ( server, ", ".join([ item['check'] for item in self.result_check_list if item['server_name'] == server and not item['status'] ])) fail_summary.append(server_fail) # Prepare an array with the detailed output for # the $LONGSERVICEOUTPUT$ macro of the Nagios output # line format: # .: FAILED # .: FAILED (Hint if present) # : FAILED # ..... for issue in self.result_check_list: if issue['server_name'] == server and not issue['status']: fail_detail = "%s.%s: FAILED" % (server, issue['check']) if issue['hint']: fail_detail += " (%s)" % issue['hint'] details.append(fail_detail) # Append the summary of failures to the first line of the output # using * as delimiter if len(servers) == 1: print "BARMAN CRITICAL - server %s has issues * %s" % \ (servers[0], " * ".join(fail_summary)) else: print "BARMAN CRITICAL - %d server out of %d have issues * " \ "%s" % (len(issues), len(servers), " * ".join(fail_summary)) # add the detailed list to the output for issue in details: print issue error_exit_code = 2 else: # No issues, all good! # Display the output message for a single server check if len(servers) == 1: print "BARMAN OK - Ready to serve the Espresso backup " \ "for %s" % \ (servers[0]) else: # Display the output message for several servers, using # '*' as delimiter print "BARMAN OK - Ready to serve the Espresso backup " \ "for %d server(s) * %s" % \ (len(servers), " * ".join([server for server in servers])) #: This dictionary acts as a registry of available OutputWriters AVAILABLE_WRITERS = { 'console': ConsoleOutputWriter, # nagios is not registered as it isn't a general purpose output writer # 'nagios': NagiosOutputWriter, } #: The default OutputWriter DEFAULT_WRITER = 'console' #: the current active writer. Initialized according DEFAULT_WRITER on load _writer = AVAILABLE_WRITERS[DEFAULT_WRITER]() barman-1.5.1/barman/recovery_executor.py0000644000076500000240000012117612621406267017657 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains the methods necessary to perform a recovery """ from io import StringIO import logging import os import re import shutil import tempfile import time import collections import dateutil.parser import dateutil.tz import socket from barman.config import RecoveryOptions from barman import xlog, output from barman.command_wrappers import DataTransferFailure, \ CommandFailedException, RsyncPgData, Rsync from barman.fs import FsOperationFailed, UnixRemoteCommand, UnixLocalCommand from barman.infofile import BackupInfo from barman.utils import mkpath # generic logger for this module _logger = logging.getLogger(__name__) # regexp matching a single value in Postgres configuration file PG_CONF_SETTING_RE = re.compile(r"^\s*([^\s=]+)\s*=?\s*(.*)$") # create a namedtuple object called Assertion with 'filename', 'line', 'key' and # 'value' as properties Assertion = collections.namedtuple('Assertion', 'filename line key value') # noinspection PyMethodMayBeStatic class RecoveryExecutor(object): """ Class responsible of recovery operations """ # Potentially dangerous options list, which need to be revised by the user # after a recovery DANGEROUS_OPTIONS = ['data_directory', 'config_file', 'hba_file', 'ident_file', 'external_pid_file', 'ssl_cert_file', 'ssl_key_file', 'ssl_ca_file', 'ssl_crl_file', 'unix_socket_directory', 'include', 'include_dir', 'include_if_exists'] # List of options that, if present, need to be forced to a specific value # during recovery, to avoid data losses MANGLE_OPTIONS = {'archive_command': 'false'} def __init__(self, backup_manager): """ Constructor :param barman.backup.BackupManager backup_manager: the BackupManager owner of the executor """ self.backup_manager = backup_manager self.server = backup_manager.server self.config = backup_manager.config def pg_config_mangle(self, filename, settings, backup_filename=None): """ This method modifies the given PostgreSQL configuration file, commenting out the given settings, and adding the ones generated by Barman. If backup_filename is passed, performs a backup copy first. :param filename: the PostgreSQL configuration file :param settings: dictionary of settings to be mangled :param backup_filename: config file backup copy. Default is None. """ if backup_filename: shutil.copy2(filename, backup_filename) with open(filename) as f: content = f.readlines() mangled = [] with open(filename, 'w') as f: for l_number, line in enumerate(content): rm = PG_CONF_SETTING_RE.match(line) if rm: key = rm.group(1) if key in settings: f.write("#BARMAN# %s" % line) # TODO is it useful to handle none values? changes = "%s = %s\n" % (key, settings[key]) f.write(changes) mangled.append( Assertion._make([ os.path.basename(f.name), l_number, key, settings[key]])) continue f.write(line) return mangled def pg_config_detect_possible_issues(self, filename): """ This method looks for any possible issue with PostgreSQL location options such as data_directory, config_file, etc. It returns a dictionary with the dangerous options that have been found. :param filename: the Postgres configuration file """ clashes = [] with open(filename) as f: content = f.readlines() # Read line by line and identify dangerous options for l_number, line in enumerate(content): rm = PG_CONF_SETTING_RE.match(line) if rm: key = rm.group(1) if key in self.DANGEROUS_OPTIONS: clashes.append( Assertion._make([ os.path.basename(f.name), l_number, key, rm.group(2)])) return clashes def map_temporary_config_files(self, recovery_info, backup_info, remote_command): """ Map configuration files, by filling the 'temporary_configuration_files' array, depending on remote or local recovery. This array will be used by the subsequent methods of the class. :param dict recovery_info: Dictionary containing all the recovery params :param barman.infofile.BackupInfo backup_info: a backup representation :param str remote_command: ssh command for remote recovery """ for conf_file in recovery_info['configuration_files']: if remote_command: # If the recovery is remote, copy the postgresql.conf # file in a temp dir # Otherwise we can modify the postgresql.conf file # in the destination directory. conf_file_path = os.path.join( recovery_info['tempdir'], conf_file) shutil.copy2( os.path.join(backup_info.get_data_directory(), conf_file), conf_file_path) # If is a remote recovery the conf files are inside a temporary dir else: # Otherwise use the local destination path. conf_file_path = os.path.join(recovery_info['destination_path'], conf_file) recovery_info['temporary_configuration_files'].append( conf_file_path) def analyse_temporary_config_files(self, recovery_info): """ Analyse temporary configuration files and identify dangerous options Mark all the dangerous options for the user to review. This procedure also changes harmful options such as 'archive_command'. :param dict recovery_info: dictionary holding all recovery parameters """ results = recovery_info['results'] # Check for dangerous options inside every config file for conf_file in recovery_info['temporary_configuration_files']: # Identify and comment out dangerous options, replacing them with # the appropriate values results['changes'] += self.pg_config_mangle( conf_file, self.MANGLE_OPTIONS, "%s.origin" % conf_file) # Identify dangerous options and warn users about their presence results['warnings'] += self.pg_config_detect_possible_issues( conf_file) def copy_temporary_config_files(self, dest, remote_command, recovery_info): """ Copy modified configuration files using rsync in case of remote recovery :param str dest: destination directory of the recovery :param str remote_command: ssh command for remote connection :param dict recovery_info: Dictionary containing all the recovery params """ if remote_command: # If this is a remote recovery, rsync the modified files from the # temporary local directory to the remote destination directory. file_list = [] for conf_file in recovery_info['configuration_files']: file_list.append('%s' % conf_file) file_list.append('%s.origin' % conf_file) try: recovery_info['rsync'].from_file_list(file_list, recovery_info['tempdir'], ':%s' % dest) except CommandFailedException, e: output.exception( 'remote copy of configuration files failed: %s', e) output.close_and_exit() def prepare_tablespaces(self, backup_info, cmd, dest, tablespaces): """ Prepare the directory structure for required tablespaces, taking care of tablespaces relocation, if requested. :param barman.infofile.BackupInfo backup_info: backup representation :param barman.fs.UnixLocalCommand cmd: Object for filesystem interaction :param str dest: destination dir for the recovery :param dict tablespaces: dict of all the tablespaces and their location """ tblspc_dir = os.path.join(dest, 'pg_tblspc') try: # check for pg_tblspc dir into recovery destination folder. # if it does not exists, create it cmd.create_dir_if_not_exists(tblspc_dir) except FsOperationFailed, e: output.exception("unable to initialise tablespace directory " "'%s': %s", tblspc_dir, e) output.close_and_exit() for item in backup_info.tablespaces: # build the filename of the link under pg_tblspc directory pg_tblspc_file = os.path.join(tblspc_dir, str(item.oid)) # by default a tablespace goes in the same location where # it was on the source server when the backup was taken location = item.location # if a relocation has been requested for this tablespace, # use the target directory provided by the user if tablespaces and item.name in tablespaces: location = tablespaces[item.name] try: # remove the current link in pg_tblspc, if it exists # (raise an exception if it is a directory) cmd.delete_if_exists(pg_tblspc_file) # create tablespace location, if does not exist # (raise an exception if it is not possible) cmd.create_dir_if_not_exists(location) # check for write permissions on destination directory cmd.check_write_permission(location) # create symlink between tablespace and recovery folder cmd.create_symbolic_link(location, pg_tblspc_file) except FsOperationFailed, e: output.exception("unable to prepare '%s' tablespace " "(destination '%s'): %s", item.name, location, e) output.close_and_exit() output.info("\t%s, %s, %s", item.oid, item.name, location) def retrieve_safe_horizon(self, recovery_info, backup_info, dest): """ Retrieve the safe_horizon for smart copy If the target directory contains a previous recovery, it is safe to pick the least of the two backup "begin times" (the one we are recovering now and the one previously recovered in the target directory). Set the value in the given recovery_info dictionary. :param dict recovery_info: Dictionary containing all the recovery params :param barman.infofile.BackupInfo backup_info: a backup representation :param str dest: recovery destination directory """ # noinspection PyBroadException try: backup_begin_time = backup_info.begin_time # Retrieve previously recovered backup metadata (if available) dest_info_txt = recovery_info['cmd'].get_file_content( os.path.join(dest, '.barman-recover.info')) dest_info = BackupInfo( self.server, info_file=StringIO(dest_info_txt)) dest_begin_time = dest_info.begin_time # Pick the earlier begin time. Both are tz-aware timestamps because # BackupInfo class ensure it safe_horizon = min(backup_begin_time, dest_begin_time) output.info("Using safe horizon time for smart rsync copy: %s", safe_horizon) except FsOperationFailed, e: # Setting safe_horizon to None will effectively disable # the time-based part of smart_copy method. However it is still # faster than running all the transfers with checksum enabled. # # FsOperationFailed means the .barman-recover.info is not available # on destination directory safe_horizon = None _logger.warning('Unable to retrieve safe horizon time ' 'for smart rsync copy: %s', e) except Exception, e: # Same as above, but something failed decoding .barman-recover.info # or comparing times, so log the full traceback safe_horizon = None _logger.exception('Error retrieving safe horizon time ' 'for smart rsync copy: %s', e) recovery_info['safe_horizon'] = safe_horizon def generate_recovery_conf(self, recovery_info, backup_info, dest, exclusive, remote_command, target_name, target_time, target_tli, target_xid): """ Generate a recovery.conf file for PITR containing all the required configurations :param dict recovery_info: Dictionary containing all the recovery params :param barman.infofile.BackupInfo backup_info: representation of a backup :param str dest: destination directory of the recovery :param boolean exclusive: exclusive backup or concurrent :param str remote_command: ssh command for remote connection :param str target_name: recovery target name for PITR :param str target_time: recovery target time for PITR :param str target_tli: recovery target timeline for PITR :param str target_xid: recovery target transaction id for PITR """ if remote_command: recovery = open(os.path.join(recovery_info['tempdir'], 'recovery.conf'), 'w') else: recovery = open(os.path.join(dest, 'recovery.conf'), 'w') # If GET_WAL has been set, use the get-wal command to retrieve the # required wal files. Otherwise use the unix command "cp" to copy # them from the barman_xlog directory if recovery_info['get_wal']: # We need to guess the right way to execute the "barman" # command on the Barman server. # If remote recovery we use the machine FQDN and the barman_user # setting to build an ssh command. # If local recovery, we use barman directly, assuming # the postgres process will be executed with the barman user. # It has to be reviewed by the user in any case. if remote_command: fqdn = socket.getfqdn() barman_command = 'ssh "%s@%s" barman' % ( self.config.config.user, fqdn) else: barman_command = 'barman' print >> recovery,\ "restore_command = '%s get-wal %s %%f > %%p'" % ( barman_command, self.config.name) recovery_info['results']['get_wal'] = True else: print >> recovery, "restore_command = 'cp barman_xlog/%f %p'" if backup_info.version >= 80400 and \ not recovery_info['get_wal']: print >> recovery, "recovery_end_command = 'rm -fr barman_xlog'" if target_time: print >> recovery, "recovery_target_time = '%s'" % target_time if target_tli: print >> recovery, "recovery_target_timeline = %s" % target_tli if target_xid: print >> recovery, "recovery_target_xid = '%s'" % target_xid if target_name: print >> recovery, "recovery_target_name = '%s'" % target_name if (target_xid or target_time) and exclusive: print >> recovery, "recovery_target_inclusive = '%s'" % ( not exclusive) recovery.close() if remote_command: # Uses plain rsync (without exclusions) to ship recovery.conf plain_rsync = Rsync( ssh=remote_command, bwlimit=self.config.bandwidth_limit, network_compression=self.config.network_compression) try: plain_rsync.from_file_list(['recovery.conf'], recovery_info['tempdir'], ':%s' % dest) except CommandFailedException, e: output.exception( 'remote copy of recovery.conf failed: %s', e) output.close_and_exit() def generate_archive_status(self, recovery_info, remote_command, required_xlog_files): """ Populate the archive_status directory :param dict recovery_info: Dictionary containing all the recovery params :param str remote_command: ssh command for remote connection :param tuple required_xlog_files: list of required WAL segments """ if remote_command: status_dir = recovery_info['tempdir'] else: status_dir = os.path.join(recovery_info['wal_dest'], 'archive_status') mkpath(status_dir) for wal_info in required_xlog_files: with open(os.path.join(status_dir, "%s.done" % wal_info.name), 'a') as f: f.write('') if remote_command: try: recovery_info['rsync']('%s/' % status_dir, ':%s' % os.path.join( recovery_info['wal_dest'], 'archive_status')) except CommandFailedException as e: output.exception( "unable to populate pg_xlog/archive_status" "directory: %s", e) output.close_and_exit() def setup(self, backup_info, remote_command, dest): """ Prepare the recovery_info dictionary for the recovery, as well as temporary working directory :param barman.infofile.BackupInfo backup_info: representation of a backup :param str remote_command: ssh command for remote connection :return dict: recovery_info dictionary, holding the basic values for a recovery """ recovery_info = { 'cmd': None, 'recovery_dest': 'local', 'rsync': None, 'configuration_files': [], 'destination_path': dest, 'temporary_configuration_files': [], 'tempdir': tempfile.mkdtemp(prefix='barman_recovery-'), 'is_pitr': False, 'wal_dest': os.path.join(dest, 'pg_xlog'), 'get_wal': RecoveryOptions.GET_WAL in self.config.recovery_options, } # A map that will keep track of the results of the recovery. # Used for output generation results = { 'changes': [], 'warnings': [], 'delete_barman_xlog': False, 'get_wal': False, } recovery_info['results'] = results # Set up a list of configuration files recovery_info['configuration_files'].append('postgresql.conf') if backup_info.version >= 90400: recovery_info['configuration_files'].append('postgresql.auto.conf') # Handle remote recovery options if remote_command: recovery_info['recovery_dest'] = 'remote' recovery_info['rsync'] = RsyncPgData( ssh=remote_command, bwlimit=self.config.bandwidth_limit, network_compression=self.config.network_compression) try: # create a UnixRemoteCommand obj if is a remote recovery recovery_info['cmd'] = UnixRemoteCommand(remote_command) except FsOperationFailed: output.error( "Unable to connect to the target host using the command " "'%s'", remote_command) output.close_and_exit() else: # if is a local recovery create a UnixLocalCommand recovery_info['cmd'] = UnixLocalCommand() return recovery_info def teardown(self, recovery_info): """ Cleanup operations for a recovery :param dict recovery_info: dictionary holding the basic values for a recovery """ # Remove the temporary directory (created in the setup method) shutil.rmtree(recovery_info['tempdir']) def set_pitr_targets(self, recovery_info, backup_info, dest, target_name, target_time, target_tli, target_xid): """ Set PITR targets - as specified by the user :param dict recovery_info: Dictionary containing all the recovery params :param barman.infofile.BackupInfo backup_info: representation of a backup :param str dest: destination directory of the recovery :param str|None target_name: recovery target name for PITR :param str|None target_time: recovery target time for PITR :param str|None target_tli: recovery target timeline for PITR :param str|None target_xid: recovery target transaction id for PITR """ target_epoch = None target_datetime = None if (target_time or target_xid or (target_tli and target_tli != backup_info.timeline) or target_name or recovery_info['get_wal']): recovery_info['is_pitr'] = True targets = {} if target_time: # noinspection PyBroadException try: target_datetime = dateutil.parser.parse(target_time) except ValueError as e: output.exception( "unable to parse the target time parameter %r: %s", target_time, e) output.close_and_exit() except Exception: # this should not happen, but there is a known bug in # dateutil.parser.parse() implementation # ref: https://bugs.launchpad.net/dateutil/+bug/1247643 output.exception( "unable to parse the target time parameter %r", target_time) output.close_and_exit() target_epoch = ( time.mktime(target_datetime.timetuple()) + (target_datetime.microsecond / 1000000.)) targets['time'] = str(target_datetime) if target_xid: targets['xid'] = str(target_xid) if target_tli and target_tli != backup_info.timeline: targets['timeline'] = str(target_tli) if target_name: targets['name'] = str(target_name) output.info( "Doing PITR. Recovery target %s", (", ".join(["%s: %r" % (k, v) for k, v in targets.items()]))) recovery_info['wal_dest'] = os.path.join(dest, 'barman_xlog') # With a PostgreSQL version older than 8.4, it is the user's # responsibility to delete the "barman_xlog" directory as the # restore_command option in recovery.conf is not supported if backup_info.version < 80400 and \ not recovery_info['get_wal']: recovery_info['results']['delete_barman_xlog'] = True recovery_info['target_epoch'] = target_epoch recovery_info['target_datetime'] = target_datetime def recover(self, backup_info, dest, tablespaces, target_tli, target_time, target_xid, target_name, exclusive, remote_command): """ Performs a recovery of a backup :param barman.infofile.BackupInfo backup_info: the backup to recover :param str dest: the destination directory :param dict[str,str]|None tablespaces: a tablespace name -> location map (for relocation) :param str|None target_tli: the target timeline :param str|None target_time: the target time :param str|None target_xid: the target xid :param str|None target_name: the target name created previously with pg_create_restore_point() function call :param bool exclusive: whether the recovery is exclusive or not :param str|None remote_command: The remote command to recover the base backup, in case of remote backup. """ # Run the cron to be sure the wal catalog is up to date # Prepare a map that contains all the objects required for a recovery recovery_info = self.setup(backup_info, remote_command, dest) output.info("Starting %s restore for server %s using backup %s", recovery_info['recovery_dest'], self.server.config.name, backup_info.backup_id) output.info("Destination directory: %s", dest) # Set targets for PITR self.set_pitr_targets(recovery_info, backup_info, dest, target_name, target_time, target_tli, target_xid) # Retrieve the safe_horizon for smart copy self.retrieve_safe_horizon(recovery_info, backup_info, dest) # check destination directory. If doesn't exist create it try: recovery_info['cmd'].create_dir_if_not_exists(dest) except FsOperationFailed, e: output.exception("unable to initialise destination directory " "'%s': %s", dest, e) output.close_and_exit() # Initialize tablespace directories if backup_info.tablespaces: self.prepare_tablespaces(backup_info, recovery_info['cmd'], dest, tablespaces) # Copy the base backup output.info("Copying the base backup.") try: # perform the backup copy, honoring the retry option if set self.backup_manager.retry_backup_copy( self.basebackup_copy, backup_info, dest, tablespaces, remote_command, recovery_info['safe_horizon']) except DataTransferFailure, e: output.exception("Failure copying base backup: %s", e) output.close_and_exit() # Copy the backup.info file in the destination as ".barman-recover.info" if remote_command: try: recovery_info['rsync'](backup_info.filename, ':%s/.barman-recover.info' % dest) except CommandFailedException, e: output.exception( 'copy of recovery metadata file failed: %s', e) output.close_and_exit() else: backup_info.save(os.path.join(dest, '.barman-recover.info')) # Restore the WAL segments. If GET_WAL option is set, skip this phase # as they will be retrieved using the wal-get command. if not recovery_info['get_wal']: output.info("Copying required WAL segments.") try: # Retrieve a list of required log files required_xlog_files = tuple( self.server.get_required_xlog_files( backup_info, target_tli, recovery_info['target_epoch'])) # Restore WAL segments into the wal_dest directory self.xlog_copy(required_xlog_files, recovery_info['wal_dest'], remote_command) except DataTransferFailure as e: output.exception("Failure copying WAL files: %s", e) output.close_and_exit() except xlog.BadXlogSegmentName as e: output.error( "invalid xlog segment name %r\n" "HINT: Please run \"barman rebuild-xlogdb %s\" " "to solve this issue" % str(e), self.config.name) output.close_and_exit() # If WAL files are put directly in the pg_xlog directory, # avoid shipping of just recovered files # by creating the corresponding archive status file if not recovery_info['is_pitr']: output.info("Generating archive status files") self.generate_archive_status(recovery_info, remote_command, required_xlog_files) # Generate recovery.conf file (only if needed by PITR) if recovery_info['is_pitr']: output.info("Generating recovery.conf") self.generate_recovery_conf(recovery_info, backup_info, dest, exclusive, remote_command, target_name, target_time, target_tli, target_xid) # Create archive_status directory if necessary archive_status_dir = os.path.join(dest, 'pg_xlog', 'archive_status') try: recovery_info['cmd'].create_dir_if_not_exists(archive_status_dir) except FsOperationFailed, e: output.exception("unable to create the archive_status directory " "'%s': %s", archive_status_dir, e) output.close_and_exit() # As last step, analyse configuration files in order to spot # harmful options. Barman performs automatic conversion of # some options as well as notifying users of their existence. # # This operation is performed in three steps: # 1) mapping # 2) analysis # 3) copy output.info("Identify dangerous settings in destination directory.") self.map_temporary_config_files(recovery_info, backup_info, remote_command) self.analyse_temporary_config_files(recovery_info) self.copy_temporary_config_files(dest, remote_command, recovery_info) # Cleanup operations self.teardown(recovery_info) return recovery_info def basebackup_copy(self, backup_info, dest, tablespaces=None, remote_command=None, safe_horizon=None): """ Perform the actual copy of the base backup for recovery purposes :param barman.infofile.BackupInfo backup_info: the backup to recover :param str dest: the destination directory :param dict[str,str]|None tablespaces: a tablespace name -> location map (for relocation) :param str|None remote_command: default None. The remote command to recover the base backup, in case of remote backup. :param datetime.datetime|None safe_horizon: anything after this time has to be checked with checksum """ # Dictionary for paths to be excluded from rsync exclude_and_protect = [] # Set a ':' prefix to remote destinations dest_prefix = '' if remote_command: dest_prefix = ':' # Copy tablespaces applying bwlimit when necessary if backup_info.tablespaces: tablespaces_bw_limit = self.config.tablespace_bandwidth_limit # Copy a tablespace at a time for tablespace in backup_info.tablespaces: # Apply bandwidth limit if requested bwlimit = self.config.bandwidth_limit if tablespaces_bw_limit and \ tablespace.name in tablespaces_bw_limit: bwlimit = tablespaces_bw_limit[tablespace.name] # By default a tablespace goes in the same location where # it was on the source server when the backup was taken location = tablespace.location # If a relocation has been requested for this tablespace # use the user provided target directory if tablespaces and tablespace.name in tablespaces: location = tablespaces[tablespace.name] # If the tablespace location is inside the data directory, # exclude and protect it from being deleted during # the data directory copy if location.startswith(dest): exclude_and_protect.append(location[len(dest):]) # Exclude and protect the tablespace from being deleted during # the data directory copy exclude_and_protect.append("/pg_tblspc/%s" % tablespace.oid) # Copy the tablespace using smart copy tb_rsync = RsyncPgData( ssh=remote_command, bwlimit=bwlimit, network_compression=self.config.network_compression, check=True) try: tb_rsync.smart_copy( '%s/' % backup_info.get_data_directory(tablespace.oid), dest_prefix + location, safe_horizon) except CommandFailedException, e: msg = "data transfer failure on directory '%s'" % location raise DataTransferFailure.from_rsync_error(e, msg) # Copy the pgdata directory rsync = RsyncPgData( ssh=remote_command, bwlimit=self.config.bandwidth_limit, exclude_and_protect=exclude_and_protect, network_compression=self.config.network_compression) try: rsync.smart_copy( '%s/' % backup_info.get_data_directory(), dest_prefix + dest, safe_horizon) except CommandFailedException, e: msg = "data transfer failure on directory '%s'" % dest raise DataTransferFailure.from_rsync_error(e, msg) # TODO: Manage different location for configuration files # TODO: that were not within the data directory def xlog_copy(self, required_xlog_files, wal_dest, remote_command): """ Restore WAL segments :param required_xlog_files: list of all required WAL files :param wal_dest: the destination directory for xlog recover :param remote_command: default None. The remote command to recover the xlog, in case of remote backup. """ # List of required WAL files partitioned by containing directory xlogs = collections.defaultdict(list) # Map of every compressor used with any WAL file in the archive, # to be used during this recovery compressors = {} compression_manager = self.backup_manager.compression_manager # Fill xlogs and compressors maps from required_xlog_files for wal_info in required_xlog_files: hashdir = xlog.hash_dir(wal_info.name) xlogs[hashdir].append(wal_info) # If a compressor is required, make sure it exists in the cache if wal_info.compression is not None and \ wal_info.compression not in compressors: compressors[wal_info.compression] = \ compression_manager.get_compressor( compression=wal_info.compression) rsync = RsyncPgData( ssh=remote_command, bwlimit=self.config.bandwidth_limit, network_compression=self.config.network_compression) # If compression is used and this is a remote recovery, we need a # temporary directory where to spool uncompressed files, # otherwise we either decompress every WAL file in the local # destination, or we ship the uncompressed file remotely if compressors: if remote_command: # Decompress to a temporary spool directory wal_decompression_dest = tempfile.mkdtemp(prefix='barman_xlog-') else: # Decompress directly to the destination directory wal_decompression_dest = wal_dest # Make sure wal_decompression_dest exists mkpath(wal_decompression_dest) else: # If no compression wal_decompression_dest = None if remote_command: # If remote recovery tell rsync to copy them remotely # add ':' prefix to mark it as remote # add '/' suffix to ensure it is a directory wal_dest = ':%s/' % wal_dest total_wals = sum(map(len, xlogs.values())) partial_count = 0 for prefix in sorted(xlogs): batch_len = len(xlogs[prefix]) partial_count += batch_len source_dir = os.path.join(self.config.wals_directory, prefix) _logger.info( "Starting copy of %s WAL files %s/%s from %s to %s", batch_len, partial_count, total_wals, xlogs[prefix][0], xlogs[prefix][-1]) # If at least one compressed file has been found, activate # compression check and decompression for each WAL files if compressors: for segment in xlogs[prefix]: dst_file = os.path.join(wal_decompression_dest, segment.name) if segment.compression is not None: compressors[segment.compression].decompress( os.path.join(source_dir, segment.name), dst_file) else: shutil.copy2(os.path.join(source_dir, segment.name), dst_file) if remote_command: try: # Transfer the WAL files rsync.from_file_list( list(segment.name for segment in xlogs[prefix]), wal_decompression_dest, wal_dest) except CommandFailedException as e: msg = "data transfer failure while copying WAL files " \ "to directory '%s'" % (wal_dest[1:],) raise DataTransferFailure.from_rsync_error(e, msg) # Cleanup files after the transfer for segment in xlogs[prefix]: file_name = os.path.join(wal_decompression_dest, segment.name) try: os.unlink(file_name) except OSError as e: output.warning( "Error removing temporary file '%s': %s", file_name, e) else: try: rsync.from_file_list( list(segment.name for segment in xlogs[prefix]), "%s/" % os.path.join(self.config.wals_directory, prefix), wal_dest) except CommandFailedException as e: msg = "data transfer failure while copying WAL files " \ "to directory '%s'" % (wal_dest[1:],) raise DataTransferFailure.from_rsync_error(e, msg) _logger.info("Finished copying %s WAL files.", total_wals) # Remove local decompression target directory if different from the # destination directory (it happens when compression is in use during a # remote recovery if wal_decompression_dest and wal_decompression_dest != wal_dest: shutil.rmtree(wal_decompression_dest) barman-1.5.1/barman/retention_policies.py0000644000076500000240000003417412621123447017776 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module defines backup retention policies. A backup retention policy in Barman is a user-defined policy for determining how long backups and archived logs (WAL segments) need to be retained for media recovery. You can define a retention policy in terms of backup redundancy or a recovery window. Barman retains the periodical backups required to satisfy the current retention policy, and any archived WAL files required for complete recovery of those backups. """ from abc import ABCMeta, abstractmethod from datetime import datetime, timedelta import re import logging from barman.infofile import BackupInfo from dateutil import tz _logger = logging.getLogger(__name__) class RetentionPolicy(object): """Abstract base class for retention policies""" __metaclass__ = ABCMeta def __init__(self, mode, unit, value, context, server): """Constructor of the retention policy base class""" self.mode = mode self.unit = unit self.value = int(value) self.context = context self.server = server self._first_backup = None self._first_wal = None def report(self, source=None, context=None): """Report obsolete/valid objects according to the retention policy""" if context is None: context = self.context # Overrides the list of available backups if source is None: source = self.server.get_available_backups(BackupInfo.STATUS_NOT_EMPTY) if context == 'BASE': return self._backup_report(source) elif context == 'WAL': return self._wal_report() else: raise ValueError('Invalid context %s', context) def backup_status(self, backup_id): """Report the status of a backup according to the retention policy""" source = self.server.get_available_backups(BackupInfo.STATUS_NOT_EMPTY) if self.context == 'BASE': return self._backup_report(source)[backup_id] else: return BackupInfo.NONE def first_backup(self): """Returns the first valid backup according to retention policies""" if not self._first_backup: self.report(context='BASE') return self._first_backup def first_wal(self): """Returns the first valid WAL according to retention policies""" if not self._first_wal: self.report(context='WAL') return self._first_wal @abstractmethod def __str__(self): """String representation""" pass @abstractmethod def debug(self): """Debug information""" pass @abstractmethod def _backup_report(self, source): """Report obsolete/valid backups according to the retention policy""" pass @abstractmethod def _wal_report(self): """Report obsolete/valid WALs according to the retention policy""" pass @classmethod def create(cls, server, option, value): """ If given option and value from the configuration file match, creates the retention policy object for the given server """ # using @abstractclassmethod from python3 would be better here raise NotImplementedError( 'The class %s must override the create() class method', cls.__name__) def to_json(self): """ Output representation of the obj for JSON serialization """ return "%s %s %s" % (self.mode, self.value, self.unit) class RedundancyRetentionPolicy(RetentionPolicy): """ Retention policy based on redundancy, the setting that determines many periodical backups to keep. A redundancy-based retention polic yis contrasted with retention policy that uses a recovery window. """ _re = re.compile(r'^\s*redundancy\s+(\d+)\s*$', re.IGNORECASE) def __init__(self, context, value, server): super(RedundancyRetentionPolicy, self ).__init__('redundancy', 'b', value, 'BASE', server) assert (value >= 0) def __str__(self): return "REDUNDANCY %s" % self.value def debug(self): return "Redundancy: %s (%s)" % (self.value, self.context) def _backup_report(self, source): """Report obsolete/valid backups according to the retention policy""" report = dict() backups = source # Normalise the redundancy value (according to minimum redundancy) redundancy = self.value if redundancy < self.server.config.minimum_redundancy: _logger.warning( "Retention policy redundancy (%s) is lower than " "the required minimum redundancy (%s). Enforce %s.", redundancy, self.server.config.minimum_redundancy, self.server.config.minimum_redundancy) redundancy = self.server.config.minimum_redundancy # Map the latest 'redundancy' DONE backups as VALID # The remaining DONE backups are classified as OBSOLETE # Non DONE backups are classified as NONE # NOTE: reverse key orders (simulate reverse chronology) i = 0 for bid in sorted(backups.iterkeys(), reverse=True): if backups[bid].status == BackupInfo.DONE: if i < redundancy: report[bid] = BackupInfo.VALID self._first_backup = bid else: report[bid] = BackupInfo.OBSOLETE i = i + 1 else: report[bid] = BackupInfo.NONE return report def _wal_report(self): """Report obsolete/valid WALs according to the retention policy""" pass @classmethod def create(cls, server, context, optval): # Detect Redundancy retention type mtch = cls._re.match(optval) if not mtch: return None value = int(mtch.groups()[0]) return cls(context, value, server) class RecoveryWindowRetentionPolicy(RetentionPolicy): """ Retention policy based on recovery window. The DBA specifies a period of time and Barman ensures retention of backups and archived WAL files required for point-in-time recovery to any time during the recovery window. The interval always ends with the current time and extends back in time for the number of days specified by the user. For example, if the retention policy is set for a recovery window of seven days, and the current time is 9:30 AM on Friday, Barman retains the backups required to allow point-in-time recovery back to 9:30 AM on the previous Friday. """ _re = re.compile( r""" ^\s* recovery\s+window\s+of\s+ # recovery window of (\d+)\s+(day|month|week)s? # N (day|month|week) with optional 's' \s*$ """, re.IGNORECASE | re.VERBOSE) _kw = {'d': 'DAYS', 'm': 'MONTHS', 'w': 'WEEKS'} def __init__(self, context, value, unit, server): super(RecoveryWindowRetentionPolicy, self ).__init__('window', unit, value, context, server) assert (value >= 0) assert (unit == 'd' or unit == 'm' or unit == 'w') assert (context == 'WAL' or context == 'BASE') # Calculates the time delta if unit == 'd': self.timedelta = timedelta(days=self.value) elif unit == 'w': self.timedelta = timedelta(weeks=self.value) elif unit == 'm': self.timedelta = timedelta(days=(31 * self.value)) def __str__(self): return "RECOVERY WINDOW OF %s %s" % (self.value, self._kw[self.unit]) def debug(self): return "Recovery Window: %s %s: %s (%s)" % ( self.value, self.unit, self.context, self._point_of_recoverability()) def _point_of_recoverability(self): """ Based on the current time and the window, calculate the point of recoverability, which will be then used to define the first backup or the first WAL """ return datetime.now(tz.tzlocal()) - self.timedelta def _backup_report(self, source): """Report obsolete/valid backups according to the retention policy""" report = dict() backups = source # Map as VALID all DONE backups having end time lower than # the point of recoverability. The older ones # are classified as OBSOLETE. # Non DONE backups are classified as NONE found = False valid = 0 # NOTE: reverse key orders (simulate reverse chronology) for bid in sorted(backups.iterkeys(), reverse=True): # We are interested in DONE backups only if backups[bid].status == BackupInfo.DONE: if found: # Check minimum redundancy requirements if valid < self.server.config.minimum_redundancy: _logger.warning( "Keeping obsolete backup %s for server %s " "(older than %s) " "due to minimum redundancy requirements (%s)", bid, self.server.config.name, self._point_of_recoverability(), self.server.config.minimum_redundancy) # We mark the backup as potentially obsolete # as we must respect minimum redundancy requirements report[bid] = BackupInfo.POTENTIALLY_OBSOLETE self._first_backup = bid valid = valid + 1 else: # We mark this backup as obsolete # (older than the first valid one) _logger.info( "Reporting backup %s for server %s as OBSOLETE " "(older than %s)", bid, self.server.config.name, self._point_of_recoverability()) report[bid] = BackupInfo.OBSOLETE else: _logger.debug( "Reporting backup %s for server %s as VALID " "(newer than %s)", bid, self.server.config.name, self._point_of_recoverability()) # Backup within the recovery window report[bid] = BackupInfo.VALID self._first_backup = bid valid = valid + 1 # TODO: Currently we use the backup local end time # We need to make this more accurate if backups[bid].end_time < self._point_of_recoverability(): found = True else: report[bid] = BackupInfo.NONE return report def _wal_report(self): """Report obsolete/valid WALs according to the retention policy""" pass @classmethod def create(cls, server, context, optval): # Detect Recovery Window retention type match = cls._re.match(optval) if not match: return None value = int(match.groups()[0]) unit = match.groups()[1][0].lower() return cls(context, value, unit, server) class SimpleWALRetentionPolicy(RetentionPolicy): """Simple retention policy for WAL files (identical to the main one)""" _re = re.compile(r'^\s*main\s*$', re.IGNORECASE) def __init__(self, context, policy, server): super(SimpleWALRetentionPolicy, self ).__init__('simple-wal', policy.unit, policy.value, context, server) # The referred policy must be of type 'BASE' assert (self.context == 'WAL' and policy.context == 'BASE') self.policy = policy def __str__(self): return "MAIN" def debug(self): return "Simple WAL Retention Policy (%s)" % self.policy def _backup_report(self, source): """Report obsolete/valid backups according to the retention policy""" pass def _wal_report(self): """Report obsolete/valid backups according to the retention policy""" self.policy.report(context='WAL') def first_wal(self): """Returns the first valid WAL according to retention policies""" return self.policy.first_wal() @classmethod def create(cls, server, context, optval): # Detect Redundancy retention type match = cls._re.match(optval) if not match: return None return cls(context, server.config.retention_policy, server) class RetentionPolicyFactory(object): """Factory for retention policy objects""" # Available retention policy types policy_classes = [ RedundancyRetentionPolicy, RecoveryWindowRetentionPolicy, SimpleWALRetentionPolicy ] @classmethod def create(cls, server, option, value): """ Based on the given option and value from the configuration file, creates the appropriate retention policy object for the given server """ if option == 'wal_retention_policy': context = 'WAL' elif option == 'retention_policy': context = 'BASE' else: raise ValueError('Unknown option for retention policy: %s' % option) # Look for the matching rule for policy_class in cls.policy_classes: policy = policy_class.create(server, context, value) if policy: return policy raise ValueError('Cannot parse option %s: %s' % (option, value)) barman-1.5.1/barman/server.py0000644000076500000240000016322612621362541015407 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module represents a Server. Barman is able to manage multiple servers. """ from collections import namedtuple from contextlib import contextmanager import logging import os import shutil import sys from tempfile import NamedTemporaryFile import psycopg2 from psycopg2.extras import RealDictCursor import subprocess from barman import output from barman.backup import BackupManager from barman.compression import identify_compression from barman.infofile import BackupInfo, UnknownBackupIdException, Tablespace, \ WalFileInfo from barman.lockfile import LockFileBusy, LockFilePermissionDenied, \ ServerBackupLock, ServerCronLock, ServerXLOGDBLock, ServerWalArchiveLock from barman.retention_policies import RetentionPolicyFactory from barman.utils import human_readable_timedelta import barman.xlog as xlog _logger = logging.getLogger(__name__) class ConninfoException(Exception): """ Error parsing conninfo parameter """ class PostgresConnectionError(Exception): """ Error connecting to PostgreSQL server. """ class CheckStrategy(object): """ This strategy for the 'check' collects the results of every check and does not print any message. This basic class is also responsible for immediately logging any performed check with an error in case of check failure and a debug message in case of success. """ # create a namedtuple object called CheckResult to manage check results CheckResult = namedtuple('CheckResult', 'server_name check status') # Default list used as a filter to identify non-critical checks NON_CRITICAL_CHECKS = ['minimum redundancy requirements', 'backup maximum age'] def __init__(self, ignore_checks=NON_CRITICAL_CHECKS): """ Silent Strategy constructor :param list ignore_checks: list of checks that can be ignored """ self.ignore_list = ignore_checks self.check_result = [] self.has_error = False def result(self, server_name, check, status, hint=None): """ Store the result of a check (with no output). Log any check result (error or debug level). :param str server_name: the server is being checked :param str check: the check name :param bool status: True if succeeded :param str,None hint: hint to print if not None: """ if not status: # If the name of the check is not in the filter list, # treat it as a blocking error, then notify the error # and change the status of the strategy if check not in self.ignore_list: self.has_error = True _logger.error( "Check '%s' failed for server '%s'" % (check, server_name)) else: # otherwise simply log the error (as info) _logger.info( "Ignoring failed check '%s' for server '%s'" % (check, server_name)) else: _logger.debug( "Check '%s' succeeded for server '%s'" % (check, server_name)) # Store the result and does not output anything result = self.CheckResult(server_name, check, status) self.check_result.append(result) class CheckOutputStrategy(CheckStrategy): """ This strategy for the 'check' command immediately sends the result of a check to the designated output channel. This class derives from the basic CheckStrategy, reuses the same logic and adds output messages. """ def __init__(self): """ Output Strategy constructor """ super(CheckOutputStrategy, self).__init__(ignore_checks=()) def result(self, server_name, check, status, hint=None): """ Output Strategy constructor :param str server_name: the server being checked :param str check: the check name :param bool status: True if succeeded :param str,None hint: hint to print if not None: """ # Call the basic method super(CheckOutputStrategy, self).result( server_name, check, status, hint) # Send result to output output.result('check', server_name, check, status, hint) class Server(object): """ This class represents the PostgreSQL server to backup. """ XLOG_DB = "xlog.db" # the strategy for the management of the results of the various checks __default_check_strategy = CheckOutputStrategy() def __init__(self, config): """ Server constructor. :param barman.config.ServerConfig config: the server configuration """ self.config = config self._conn = None self.server_txt_version = None self.server_version = None if self.config.conninfo is None: raise ConninfoException( 'Missing conninfo parameter in barman configuration ' 'for server %s' % config.name) self.backup_manager = BackupManager(self) self.configuration_files = None self.enforce_retention_policies = False # Set bandwidth_limit if self.config.bandwidth_limit: try: self.config.bandwidth_limit = int(self.config.bandwidth_limit) except ValueError: _logger.warning('Invalid bandwidth_limit "%s" for server "%s" ' '(fallback to "0")' % ( self.config.bandwidth_limit, self.config.name)) self.config.bandwidth_limit = None # set tablespace_bandwidth_limit if self.config.tablespace_bandwidth_limit: rules = {} for rule in self.config.tablespace_bandwidth_limit.split(): try: key, value = rule.split(':', 1) value = int(value) if value != self.config.bandwidth_limit: rules[key] = value except ValueError: _logger.warning( "Invalid tablespace_bandwidth_limit rule '%s'" % rule) if len(rules) > 0: self.config.tablespace_bandwidth_limit = rules else: self.config.tablespace_bandwidth_limit = None # Set minimum redundancy (default 0) if self.config.minimum_redundancy.isdigit(): self.config.minimum_redundancy = int(self.config.minimum_redundancy) if self.config.minimum_redundancy < 0: _logger.warning('Negative value of minimum_redundancy "%s" ' 'for server "%s" (fallback to "0")' % ( self.config.minimum_redundancy, self.config.name)) self.config.minimum_redundancy = 0 else: _logger.warning('Invalid minimum_redundancy "%s" for server "%s" ' '(fallback to "0")' % ( self.config.minimum_redundancy, self.config.name)) self.config.minimum_redundancy = 0 # Initialise retention policies self._init_retention_policies() def _init_retention_policies(self): # Set retention policy mode if self.config.retention_policy_mode != 'auto': _logger.warning( 'Unsupported retention_policy_mode "%s" for server "%s" ' '(fallback to "auto")' % ( self.config.retention_policy_mode, self.config.name)) self.config.retention_policy_mode = 'auto' # If retention_policy is present, enforce them if self.config.retention_policy: # Check wal_retention_policy if self.config.wal_retention_policy != 'main': _logger.warning( 'Unsupported wal_retention_policy value "%s" ' 'for server "%s" (fallback to "main")' % ( self.config.wal_retention_policy, self.config.name)) self.config.wal_retention_policy = 'main' # Create retention policy objects try: rp = RetentionPolicyFactory.create( self, 'retention_policy', self.config.retention_policy) # Reassign the configuration value (we keep it in one place) self.config.retention_policy = rp _logger.debug('Retention policy for server %s: %s' % ( self.config.name, self.config.retention_policy)) try: rp = RetentionPolicyFactory.create( self, 'wal_retention_policy', self.config.wal_retention_policy) # Reassign the configuration value (we keep it in one place) self.config.wal_retention_policy = rp _logger.debug( 'WAL retention policy for server %s: %s' % ( self.config.name, self.config.wal_retention_policy)) except ValueError: _logger.exception( 'Invalid wal_retention_policy setting "%s" ' 'for server "%s" (fallback to "main")' % ( self.config.wal_retention_policy, self.config.name)) rp = RetentionPolicyFactory.create( self, 'wal_retention_policy', 'main') self.config.wal_retention_policy = rp self.enforce_retention_policies = True except ValueError: _logger.exception( 'Invalid retention_policy setting "%s" for server "%s"' % ( self.config.retention_policy, self.config.name)) def check(self, check_strategy=__default_check_strategy): """ Implements the 'server check' command and makes sure SSH and PostgreSQL connections work properly. It checks also that backup directories exist (and if not, it creates them). :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # Check postgres configuration self.check_postgres(check_strategy) # Check barman directories from barman configuration self.check_directories(check_strategy) # Check retention policies self.check_retention_policy_settings(check_strategy) # Check for backup validity self.check_backup_validity(check_strategy) # Executes the backup manager set of checks self.backup_manager.check(check_strategy) def check_postgres(self, check_strategy): """ Checks PostgreSQL connection :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # Take the status of the remote server try: remote_status = self.get_remote_status() except PostgresConnectionError: remote_status = None if remote_status is not None and remote_status['server_txt_version']: check_strategy.result(self.config.name, 'PostgreSQL', True) else: check_strategy.result(self.config.name, 'PostgreSQL', False) return # Check archive_mode parameter: must be on if remote_status['archive_mode'] == 'on': check_strategy.result(self.config.name, 'archive_mode', True) else: check_strategy.result(self.config.name, 'archive_mode', False, "please set it to 'on'") # Check wal_level parameter: must be different from 'minimal' # the parameter has been introduced in postgres >= 9.0 if 'wal_level' in remote_status: if remote_status['wal_level'] != 'minimal': check_strategy.result( self.config.name, 'wal_level', True) else: check_strategy.result( self.config.name, 'wal_level', False, "please set it to a higher level than 'minimal'") if remote_status['archive_command'] and \ remote_status['archive_command'] != '(disabled)': check_strategy.result(self.config.name, 'archive_command', True) # Report if the archiving process works without issues. # Skip if the archive_command check fails # It can be None if PostgreSQL is older than 9.4 if remote_status.get('is_archiving') is not None: check_strategy.result(self.config.name, 'continuous archiving', remote_status['is_archiving']) else: check_strategy.result(self.config.name, 'archive_command', False, 'please set it accordingly to documentation') def _make_directories(self): """ Make backup directories in case they do not exist """ for key in self.config.KEYS: if key.endswith('_directory') and hasattr(self.config, key): val = getattr(self.config, key) if val is not None and not os.path.isdir(val): # noinspection PyTypeChecker os.makedirs(val) def check_directories(self, check_strategy): """ Checks backup directories and creates them if they do not exist :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ if self.config.disabled: check_strategy.result(self.config.name, 'directories', False) for conflict_paths in self.config.msg_list: output.info("\t%s" % conflict_paths) else: try: self._make_directories() except OSError, e: check_strategy.result(self.config.name, 'directories', False, "%s: %s" % (e.filename, e.strerror)) else: check_strategy.result(self.config.name, 'directories', True) def check_retention_policy_settings(self, check_strategy): """ Checks retention policy setting :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ if self.config.retention_policy and not self.enforce_retention_policies: check_strategy.result(self.config.name, 'retention policy settings', False, 'see log') else: check_strategy.result(self.config.name, 'retention policy settings', True) def check_backup_validity(self, check_strategy): """ Check if backup validity requirements are satisfied :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # first check: check backup maximum age if self.config.last_backup_maximum_age is not None: # get maximum age information backup_age = self.backup_manager.validate_last_backup_maximum_age( self.config.last_backup_maximum_age) # format the output check_strategy.result( self.config.name, 'backup maximum age', backup_age[0], "interval provided: %s, latest backup age: %s" % ( human_readable_timedelta( self.config.last_backup_maximum_age), backup_age[1])) else: # last_backup_maximum_age provided by the user check_strategy.result( self.config.name, 'backup maximum age', True, "no last_backup_maximum_age provided") def status_postgres(self): """ Status of PostgreSQL server """ remote_status = self.get_remote_status() if remote_status['server_txt_version']: output.result('status', self.config.name, "pg_version", "PostgreSQL version", remote_status['server_txt_version']) else: output.result('status', self.config.name, "pg_version", "PostgreSQL version", "FAILED trying to get PostgreSQL version") return if remote_status['pgespresso_installed']: output.result('status', self.config.name, 'pgespresso', 'pgespresso extension', "Available") else: output.result('status', self.config.name, 'pgespresso', 'pgespresso extension', "Not available") if remote_status['data_directory']: output.result('status', self.config.name, "data_directory", "PostgreSQL Data directory", remote_status['data_directory']) output.result('status', self.config.name, "archive_command", "PostgreSQL 'archive_command' setting", remote_status['archive_command'] or "FAILED (please set it accordingly to documentation)") last_wal = remote_status.get('last_archived_wal') # If PostgreSQL is >= 9.4 we have the last_archived_time if last_wal and remote_status.get('last_archived_time'): last_wal += ", at %s" % ( remote_status['last_archived_time'].ctime()) output.result('status', self.config.name, "last_archived_wal", "Last archived WAL", last_wal or "No WAL segment shipped yet") if remote_status['current_xlog']: output.result('status', self.config.name, "current_xlog", "Current WAL segment", remote_status['current_xlog']) # Set output for WAL archive failures (PostgreSQL >= 9.4) if remote_status.get('failed_count') is not None: remote_fail = str(remote_status['failed_count']) if int(remote_status['failed_count']) > 0: remote_fail += " (%s at %s)" % ( remote_status['last_failed_wal'], remote_status['last_failed_time'].ctime()) output.result('status', self.config.name, 'failed_count', 'Failures of WAL archiver', remote_fail) # Add hourly archive rate if available (PostgreSQL >= 9.4) and > 0 if remote_status.get('current_archived_wals_per_second'): output.result( 'status', self.config.name, 'server_archived_wals_per_hour', 'Server WAL archiving rate', '%0.2f/hour' % ( 3600 * remote_status['current_archived_wals_per_second'])) def status_retention_policies(self): """ Status of retention policies enforcement """ if self.enforce_retention_policies: output.result('status', self.config.name, "retention_policies", "Retention policies", "enforced " "(mode: %s, retention: %s, WAL retention: %s)" % ( self.config.retention_policy_mode, self.config.retention_policy, self.config.wal_retention_policy)) else: output.result('status', self.config.name, "retention_policies", "Retention policies", "not enforced") def status(self): """ Implements the 'server-status' command. """ if self.config.description: output.result('status', self.config.name, "description", "Description", self.config.description) output.result('status', self.config.name, "active", "Active", self.config.active) output.result('status', self.config.name, "disabled", "Disabled", self.config.disabled) self.status_postgres() self.status_retention_policies() # Executes the backup manager status info method self.backup_manager.status() def pgespresso_installed(self): """ Returns true if pgexpresso extension is available """ try: with self.pg_connect() as conn: # pg_extension is only available from Postgres 9.1+ if self.server_version < 90100: return False cur = conn.cursor() cur.execute("select count(*) from pg_extension " "where extname = 'pgespresso'") q_result = cur.fetchone()[0] if q_result > 0: return True else: return False except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug("Error retrieving pgespresso information: %s", e) return False def get_pg_stat_archiver(self): """ This method gathers statistics from pg_stat_archiver (postgres 9.4+ or greater required) :return dict|None: a dictionary containing Postgres statistics from pg_stat_archiver or None """ try: with self.pg_connect() as conn: # pg_stat_archiver is only available from Postgres 9.4+ if self.server_version < 90400: return None cur = conn.cursor(cursor_factory=RealDictCursor) # Select from pg_stat_archiver statistics view, # retrieving statistics about WAL archiver process activity, # also evaluating if the server is archiving without issues # and the archived WALs per second rate cur.execute( "SELECT *, current_setting('archive_mode')::BOOLEAN " "AND (last_failed_wal IS NULL " "OR last_failed_wal <= last_archived_wal) " "AS is_archiving, " "CAST (archived_count AS NUMERIC) " "/ EXTRACT (EPOCH FROM age(now(), stats_reset)) " "AS current_archived_wals_per_second " "FROM pg_stat_archiver") q_result = cur.fetchone() if q_result: return q_result else: return None except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug("Error retrieving pg_stat_archive data: %s", e) return None def pg_is_in_recovery(self): """ Returns true if PostgreSQL server is in recovery mode """ try: with self.pg_connect() as conn: # pg_is_in_recovery is only available from Postgres 9.0+ if self.server_version < 90000: return False cur = conn.cursor() cur.execute("select pg_is_in_recovery()") q_result = cur.fetchone()[0] if q_result: return True else: return False except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug("Error calling pg_is_in_recovery() function: %s", e) return None def get_remote_status(self): """ Get the status of the remote server :return dict[str, None]: result of the server status query """ # PostgreSQL settings to get from the server pg_settings = [ 'archive_mode', 'archive_command', 'data_directory'] pg_query_keys = [ 'server_txt_version', 'current_xlog', 'pgespresso_installed'] # Initialise the result dictionary setting all the values to None result = dict.fromkeys(pg_settings + pg_query_keys, None) try: with self.pg_connect() as conn: # check for wal_level only if the version is >= 9.0 if self.server_version >= 90000: pg_settings.append('wal_level') for name in pg_settings: result[name] = self.get_pg_setting(name) try: cur = conn.cursor() cur.execute("SELECT version()") result['server_txt_version'] = cur.fetchone()[0].split()[1] except psycopg2.Error, e: _logger.debug( "Error retrieving PostgreSQL version: %s", e) result['pgespresso_installed'] = self.pgespresso_installed() try: if not self.pg_is_in_recovery(): cur = conn.cursor() cur.execute( 'SELECT pg_xlogfile_name(' 'pg_current_xlog_location())') result['current_xlog'] = cur.fetchone()[0] except psycopg2.Error, e: _logger.debug("Error retrieving current xlog: %s", e) result.update(self.get_pg_configuration_files()) # Add pg_stat_archiver statistics if the view is supported pg_stat_archiver = self.get_pg_stat_archiver() if pg_stat_archiver is not None: result.update(pg_stat_archiver) # Merge additional status defined by the BackupManager result.update(self.backup_manager.get_remote_status()) except (PostgresConnectionError, psycopg2.Error) as e: _logger.warn("Error retrieving PostgreSQL status: %s", e) return result def show(self): """ Shows the server configuration """ # Populate result map with all the required keys result = dict([ (key, getattr(self.config, key)) for key in self.config.KEYS ]) remote_status = self.get_remote_status() result.update(remote_status) # Backup maximum age section if self.config.last_backup_maximum_age is not None: age = self.backup_manager.validate_last_backup_maximum_age( self.config.last_backup_maximum_age) # If latest backup is between the limits of the # last_backup_maximum_age configuration, display how old is # the latest backup. if age[0]: msg = "%s (latest backup: %s )" % \ (human_readable_timedelta( self.config.last_backup_maximum_age), age[1]) else: # If latest backup is outside the limits of the # last_backup_maximum_age configuration (or the configuration # value is none), warn the user. msg = "%s (WARNING! latest backup is %s old)" % \ (human_readable_timedelta( self.config.last_backup_maximum_age), age[1]) result['last_backup_maximum_age'] = msg else: result['last_backup_maximum_age'] = "None" output.result('show_server', self.config.name, result) @contextmanager def pg_connect(self): """ A generic function to connect to Postgres using Psycopg2 """ myconn = self._conn is None if myconn: try: self._conn = psycopg2.connect(self.config.conninfo) self.server_version = self._conn.server_version if (self.server_version >= 90000 and 'application_name=' not in self.config.conninfo): cur = self._conn.cursor() cur.execute('SET application_name TO barman') cur.close() # If psycopg2 fails to connect to the host, # raise the appropriate exception except psycopg2.DatabaseError as e: raise PostgresConnectionError( "Cannot connect to postgres: %s" % e) try: yield self._conn finally: if myconn: self._conn.close() self._conn = None def get_pg_setting(self, name): """ Get a postgres setting with a given name :param name: a parameter name """ try: with self.pg_connect() as conn: cur = conn.cursor() cur.execute('SHOW "%s"' % name.replace('"', '""')) return cur.fetchone()[0] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug("Error retrieving PostgreSQL setting '%s': %s", name.replace('"', '""'), e) return None def get_pg_tablespaces(self): """ Returns a list of tablespaces or None if not present """ try: with self.pg_connect() as conn: cur = conn.cursor() if self.server_version >= 90200: cur.execute( "SELECT spcname, oid, " "pg_tablespace_location(oid) AS spclocation " "FROM pg_tablespace " "WHERE pg_tablespace_location(oid) != ''") else: cur.execute( "SELECT spcname, oid, spclocation " "FROM pg_tablespace WHERE spclocation != ''") # Generate a list of tablespace objects return [Tablespace._make(item) for item in cur.fetchall()] except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug("Error retrieving PostgreSQL tablespaces: %s", e) return None def get_pg_configuration_files(self): """ Get postgres configuration files or an empty dictionary in case of error """ if self.configuration_files: return self.configuration_files try: with self.pg_connect() as conn: cur = conn.cursor() cur.execute("SELECT name, setting FROM pg_settings " "WHERE name IN (" "'config_file', 'hba_file', 'ident_file')") self.configuration_files = {} for cname, cpath in cur.fetchall(): self.configuration_files[cname] = cpath # Retrieve additional configuration files cur.execute("SELECT DISTINCT sourcefile AS included_file " "FROM pg_settings " "WHERE sourcefile IS NOT NULL " "AND sourcefile NOT IN " "(SELECT setting FROM pg_settings " "WHERE name = 'config_file') " "ORDER BY 1") included_files = [included_file for included_file, in cur.fetchall()] if len(included_files) > 0: self.configuration_files['included_files'] = included_files return self.configuration_files except (PostgresConnectionError, psycopg2.Error) as e: _logger.debug("Error retrieving PostgreSQL configuration files " "location: %s", e) return {} def delete_backup(self, backup): """Deletes a backup :param backup: the backup to delete """ try: # Lock acquisition: if you can acquire a ServerBackupLock # it means that no backup process is running on that server, # so there is no need to check the backup status. # Simply proceed with the normal delete process. server_backup_lock = ServerBackupLock( self.config.barman_lock_directory, self.config.name) server_backup_lock.acquire(server_backup_lock.raise_if_fail, server_backup_lock.wait) server_backup_lock.release() return self.backup_manager.delete_backup(backup) except LockFileBusy: # Otherwise if the lockfile is busy, a backup process is actually # running on that server. To be sure that it's safe # to delete the backup, we must check its status and its position # in the catalogue. # If it is the first and it is STARTED or EMPTY, we are trying to # remove a running backup. This operation must be forbidden. # Otherwise, normally delete the backup. first_backup = self.get_first_backup(BackupInfo.STATUS_ALL) if backup.backup_id == first_backup.backup_id \ and backup.status in (BackupInfo.STARTED, BackupInfo.EMPTY): output.error("Cannot delete a running backup (%s %s)" % (self.config.name, backup.backup_id)) else: return self.backup_manager.delete_backup(backup) except LockFilePermissionDenied, e: # We cannot access the lockfile. # Exit without removing the backup. output.error("Permission denied, unable to access '%s'" % e) def backup(self): """ Performs a backup for the server """ try: # Default strategy for check in backup is CheckStrategy # This strategy does not print any output - it only logs checks strategy = CheckStrategy() self.check(strategy) if strategy.has_error: output.error("Impossible to start the backup. Check the log " "for more details, or run 'barman check %s'" % self.config.name) return # check required backup directories exist self._make_directories() except OSError, e: output.error('failed to create %s directory: %s', e.filename, e.strerror) return try: # lock acquisition and backup execution with ServerBackupLock(self.config.barman_lock_directory, self.config.name): self.backup_manager.backup() # Archive incoming WALs and update WAL catalogue through cron self.archive_wal(verbose=False) except LockFileBusy: output.error("Another backup process is running") except LockFilePermissionDenied, e: output.error("Permission denied, unable to access '%s'" % e) def get_available_backups(self, status_filter=BackupManager.DEFAULT_STATUS_FILTER): """Get a list of available backups param: status_filter: the status of backups to return, default to BackupManager.DEFAULT_STATUS_FILTER """ return self.backup_manager.get_available_backups(status_filter) def get_last_backup(self, status_filter=BackupManager.DEFAULT_STATUS_FILTER): """ Get the last backup (if any) in the catalog :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup returned """ return self.backup_manager.get_last_backup(status_filter) def get_first_backup(self, status_filter=BackupManager.DEFAULT_STATUS_FILTER): """ Get the first backup (if any) in the catalog :param status_filter: default DEFAULT_STATUS_FILTER. The status of the backup returned """ return self.backup_manager.get_first_backup(status_filter) def list_backups(self): """ Lists all the available backups for the server """ retention_status = self.report_backups() backups = self.get_available_backups(BackupInfo.STATUS_ALL) for key in sorted(backups.iterkeys(), reverse=True): backup = backups[key] backup_size = backup.size or 0 wal_size = 0 rstatus = None if backup.status == BackupInfo.DONE: try: wal_info = self.get_wal_info(backup) backup_size += wal_info['wal_size'] wal_size = wal_info['wal_until_next_size'] except xlog.BadXlogSegmentName as e: output.error( "invalid xlog segment name %r\n" "HINT: Please run \"barman rebuild-xlogdb %s\" " "to solve this issue", str(e), self.config.name) if self.enforce_retention_policies and \ retention_status[backup.backup_id] != BackupInfo.VALID: rstatus = retention_status[backup.backup_id] output.result('list_backup', backup, backup_size, wal_size, rstatus) def get_backup(self, backup_id): """ Return the backup information for the given backup id. If the backup_id is None or backup.info file doesn't exists, it returns None. :param str|None backup_id: the ID of the backup to return :rtype: BackupInfo|None """ return self.backup_manager.get_backup(backup_id) def get_previous_backup(self, backup_id): """ Get the previous backup (if any) from the catalog :param backup_id: the backup id from which return the previous """ return self.backup_manager.get_previous_backup(backup_id) def get_next_backup(self, backup_id): """ Get the next backup (if any) from the catalog :param backup_id: the backup id from which return the next """ return self.backup_manager.get_next_backup(backup_id) def get_required_xlog_files(self, backup, target_tli=None, target_time=None, target_xid=None): """ Get the xlog files required for a recovery """ begin = backup.begin_wal end = backup.end_wal # If timeline isn't specified, assume it is the same timeline # of the backup if not target_tli: target_tli, _, _ = xlog.decode_segment_name(end) with self.xlogdb() as fxlogdb: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) # Handle .history files: add all of them to the output, # regardless of their age if xlog.is_history_file(wal_info.name): yield wal_info continue if wal_info.name < begin: continue tli, _, _ = xlog.decode_segment_name(wal_info.name) if tli > target_tli: continue yield wal_info if wal_info.name > end: end = wal_info.name if target_time and target_time < wal_info.time: break # return all the remaining history files for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if xlog.is_history_file(wal_info.name): yield wal_info # TODO: merge with the previous def get_wal_until_next_backup(self, backup, include_history=False): """ Get the xlog files between backup and the next :param BackupInfo backup: a backup object, the starting point to retrieve WALs :param bool include_history: option for the inclusion of include_history files into the output """ begin = backup.begin_wal next_end = None if self.get_next_backup(backup.backup_id): next_end = self.get_next_backup(backup.backup_id).end_wal backup_tli, _, _ = xlog.decode_segment_name(begin) with self.xlogdb() as fxlogdb: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) # Handle .history files: add all of them to the output, # regardless of their age, if requested (the 'include_history' # parameter is True) if xlog.is_history_file(wal_info.name): if include_history: yield wal_info continue if wal_info.name < begin: continue tli, _, _ = xlog.decode_segment_name(wal_info.name) if tli > backup_tli: continue if not xlog.is_wal_file(wal_info.name): continue if next_end and wal_info.name > next_end: break yield wal_info def get_wal_full_path(self, wal_name): """ Build the full path of a WAL for a server given the name :param wal_name: WAL file name """ # Build the path which contains the file hash_dir = os.path.join(self.config.wals_directory, xlog.hash_dir(wal_name)) # Build the WAL file full path full_path = os.path.join(hash_dir, wal_name) return full_path def get_wal_info(self, backup_info): """ Returns information about WALs for the given backup :param BackupInfo backup_info: the target backup """ begin = backup_info.begin_wal end = backup_info.end_wal # counters wal_info = dict.fromkeys( ('wal_num', 'wal_size', 'wal_until_next_num', 'wal_until_next_size', 'wal_until_next_compression_ratio', 'wal_compression_ratio'), 0) # First WAL (always equal to begin_wal) and Last WAL names and ts wal_info['wal_first'] = None wal_info['wal_first_timestamp'] = None wal_info['wal_last'] = None wal_info['wal_last_timestamp'] = None # WAL rate (default 0.0 per second) wal_info['wals_per_second'] = 0.0 for item in self.get_wal_until_next_backup(backup_info): if item.name == begin: wal_info['wal_first'] = item.name wal_info['wal_first_timestamp'] = item.time if item.name <= end: wal_info['wal_num'] += 1 wal_info['wal_size'] += item.size else: wal_info['wal_until_next_num'] += 1 wal_info['wal_until_next_size'] += item.size wal_info['wal_last'] = item.name wal_info['wal_last_timestamp'] = item.time # Calculate statistics only for complete backups # If the cron is not running for any reason, the required # WAL files could be missing if wal_info['wal_first'] and wal_info['wal_last']: # Estimate WAL ratio # Calculate the difference between the timestamps of # the first WAL (begin of backup) and the last WAL # associated to the current backup wal_info['wal_total_seconds'] = ( wal_info['wal_last_timestamp'] - wal_info['wal_first_timestamp']) if wal_info['wal_total_seconds'] > 0: wal_info['wals_per_second'] = ( float(wal_info['wal_num'] + wal_info['wal_until_next_num']) / wal_info['wal_total_seconds']) # evaluation of compression ratio for basebackup WAL files wal_info['wal_theoretical_size'] = \ wal_info['wal_num'] * float(xlog.XLOG_SEG_SIZE) try: wal_info['wal_compression_ratio'] = 1 - ( wal_info['wal_size'] / wal_info['wal_theoretical_size']) except ZeroDivisionError: wal_info['wal_compression_ratio'] = 0.0 # evaluation of compression ratio of WAL files wal_info['wal_until_next_theoretical_size'] = \ wal_info['wal_until_next_num'] * float(xlog.XLOG_SEG_SIZE) try: wal_info['wal_until_next_compression_ratio'] = 1 - ( wal_info['wal_until_next_size'] / wal_info['wal_until_next_theoretical_size']) except ZeroDivisionError: wal_info['wal_until_next_compression_ratio'] = 0.0 return wal_info def recover(self, backup_info, dest, tablespaces=None, target_tli=None, target_time=None, target_xid=None, target_name=None, exclusive=False, remote_command=None): """ Performs a recovery of a backup :param barman.infofile.BackupInfo backup_info: the backup to recover :param str dest: the destination directory :param dict[str,str]|None tablespaces: a tablespace name -> location map (for relocation) :param str|None target_tli: the target timeline :param str|None target_time: the target time :param str|None target_xid: the target xid :param str|None target_name: the target name created previously with pg_create_restore_point() function call :param bool exclusive: whether the recovery is exclusive or not :param str|None remote_command: default None. The remote command to recover the base backup, in case of remote backup. """ return self.backup_manager.recover( backup_info, dest, tablespaces, target_tli, target_time, target_xid, target_name, exclusive, remote_command) def get_wal(self, wal_name, compression=None, output_directory=None): """ Retrieve a WAL file from the archive :param str wal_name: id of the WAL file to find into the WAL archive :param str|None compression: compression format for the output :param str|None output_directory: directory where to deposit the WAL file """ # Get the WAL file full path wal_file = self.get_wal_full_path(wal_name) # Check for file existence if not os.path.exists(wal_file): output.error("WAL file '%s' not found in server '%s'", wal_name, self.config.name) return # If an output directory was provided write the file inside it # otherwise we use standard output if output_directory is not None: destination_path = os.path.join(output_directory, wal_name) try: destination = open(destination_path, 'w') output.info("Writing WAL '%s' for server '%s' into '%s' file", wal_name, self.config.name, destination_path) except IOError as e: output.error("Unable to open '%s' file: %s" % destination_path, e) return else: destination = sys.stdout # Get a decompressor for the file (None if not compressed) wal_compressor = self.backup_manager.compression_manager \ .get_compressor(compression=identify_compression(wal_file)) # Get a compressor for the output (None if not compressed) # Here we need to handle explicitly the None value because we don't # want it ot fallback to the configured compression if compression is not None: out_compressor = self.backup_manager.compression_manager\ .get_compressor(compression=compression) else: out_compressor = None # Initially our source is the stored WAL file and we do not have # any temporary file source_file = wal_file uncompressed_file = None compressed_file = None # If the required compression is different from the source we # decompress/compress it into the required format (getattr is # used here to gracefully handle None objects) if getattr(wal_compressor, 'compression', None) != \ getattr(out_compressor, 'compression', None): # If source is compressed, decompress it into a temporary file if wal_compressor is not None: uncompressed_file = NamedTemporaryFile( dir=self.config.wals_directory, prefix='.%s.' % wal_name, suffix='.uncompressed') # decompress wal file wal_compressor.decompress(source_file, uncompressed_file.name) source_file = uncompressed_file.name # If output compression is required compress the source # into a temporary file if out_compressor is not None: compressed_file = NamedTemporaryFile( dir=self.config.wals_directory, prefix='.%s.' % wal_name, suffix='.compressed') out_compressor.compress(source_file, compressed_file.name) source_file = compressed_file.name # Copy the prepared source file to destination with open(source_file) as input_file: shutil.copyfileobj(input_file, destination) # Remove temp files if uncompressed_file is not None: uncompressed_file.close() if compressed_file is not None: compressed_file.close() def cron(self, wals=True, retention_policies=True): """ Maintenance operations :param bool wals: WAL archive maintenance :param bool retention_policies: retention policy maintenance """ try: # Actually this is the highest level of locking in the cron, # this stops the execution of multiple cron on the same server with ServerCronLock(self.config.barman_lock_directory, self.config.name): # Standard maintenance (WAL archive) if wals: try: # Try to acquire ServerWalArchiveLock, if the lock is # available, no other processes are running on this # server. # There is no possible race condition here because # we are protected by ServerCronLock. with ServerWalArchiveLock( self.config.barman_lock_directory, self.config.name): # Output and release the lock immediately output.info("Starting WAL archiving for server %s", self.config.name) except LockFileBusy: # Another archive process is running for the server, # warn the user and skip to the next sever. output.info( "Another archive-wal process is already running " "on server %s. Skipping to the next server" % self.config.name) return command = ['barman', '-q', 'archive-wal', self.config.name] _logger.debug("Starting subprocess with for WAL ARCHIVE") subprocess.Popen(command, preexec_fn=os.setsid) # Retention policies execution if retention_policies: self.backup_manager.cron_retention_policy() except LockFileBusy: output.info("Another cron process is already running on server %s. " "Skipping to the next server" % self.config.name) except LockFilePermissionDenied, e: output.error("Permission denied, unable to access '%s'" % e) except (OSError, IOError), e: output.error("%s", e) def archive_wal(self, verbose=True): """ Perform the WAL archiving operations. Usually run as subprocess of the barman cron command, but can be executed manually using the barman archive-wal command :param bool verbose: if false outputs something only if there is at least one file """ try: # Take care of the archive lock. # Only one archive job per server is admitted with ServerWalArchiveLock(self.config.barman_lock_directory, self.config.name): self.backup_manager.archive_wal(verbose) except LockFileBusy: # If another process is running for this server, # warn the user and skip to the next server output.info("Another archive-wal process is already running " "on server %s. Skipping to the next server" % self.config.name) @contextmanager def xlogdb(self, mode='r'): """ Context manager to access the xlogdb file. This method uses locking to make sure only one process is accessing the database at a time. The database file will be created if not exists. Usage example: with server.xlogdb('w') ad file: file.write(new_line) :param str mode: open the file with the required mode (default read-only) """ if not os.path.exists(self.config.wals_directory): os.makedirs(self.config.wals_directory) xlogdb = os.path.join(self.config.wals_directory, self.XLOG_DB) with ServerXLOGDBLock(self.config.barman_lock_directory, self.config.name): # If the file doesn't exist and it is required to read it, # we open it in a+ mode, to be sure it will be created if not os.path.exists(xlogdb) and mode.startswith('r'): if '+' not in mode: mode = "a%s+" % mode[1:] else: mode = "a%s" % mode[1:] with open(xlogdb, mode) as f: # execute the block nested in the with statement try: yield f finally: # we are exiting the context # if file is writable (mode contains w, a or +) # make sure the data is written to disk # http://docs.python.org/2/library/os.html#os.fsync if any((c in 'wa+') for c in f.mode): f.flush() os.fsync(f.fileno()) def report_backups(self): if not self.enforce_retention_policies: return dict() else: return self.config.retention_policy.report() def rebuild_xlogdb(self): """ Rebuild the whole xlog database guessing it from the archive content. """ return self.backup_manager.rebuild_xlogdb() def get_backup_ext_info(self, backup_info): """ Return a dictionary containing all available information about a backup The result is equivalent to the sum of information from * BackupInfo object * the Server.get_wal_info() return value * the context in the catalog (if available) * the retention policy status :param backup_info: the target backup :rtype dict: all information about a backup """ backup_ext_info = backup_info.to_dict() if backup_info.status == BackupInfo.DONE: try: previous_backup = self.backup_manager.get_previous_backup( backup_ext_info['backup_id']) next_backup = self.backup_manager.get_next_backup( backup_ext_info['backup_id']) if previous_backup: backup_ext_info[ 'previous_backup_id'] = previous_backup.backup_id else: backup_ext_info['previous_backup_id'] = None if next_backup: backup_ext_info['next_backup_id'] = next_backup.backup_id else: backup_ext_info['next_backup_id'] = None except UnknownBackupIdException: # no next_backup_id and previous_backup_id items # means "Not available" pass backup_ext_info.update(self.get_wal_info(backup_info)) if self.enforce_retention_policies: policy = self.config.retention_policy backup_ext_info['retention_policy_status'] = \ policy.backup_status(backup_info.backup_id) else: backup_ext_info['retention_policy_status'] = None return backup_ext_info def show_backup(self, backup_info): """ Output all available information about a backup :param backup_info: the target backup """ try: backup_ext_info = self.get_backup_ext_info(backup_info) output.result('show_backup', backup_ext_info) except xlog.BadXlogSegmentName as e: output.error( "invalid xlog segment name %r\n" "HINT: Please run \"barman rebuild-xlogdb %s\" " "to solve this issue" % str(e), self.config.name) output.close_and_exit() barman-1.5.1/barman/utils.py0000644000076500000240000001714212621123447015234 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains utility functions used in Barman. """ import datetime import errno import grp import json import logging import logging.handlers import os import pwd _logger = logging.getLogger(__name__) def drop_privileges(user): """ Change the system user of the current python process. It will only work if called as root or as the target user. :param string user: target user :raise KeyError: if the target user doesn't exists :raise OSError: when the user change fails """ pw = pwd.getpwnam(user) if pw.pw_uid == os.getuid(): return groups = [e.gr_gid for e in grp.getgrall() if pw.pw_name in e.gr_mem] groups.append(pw.pw_gid) os.setgroups(groups) os.setgid(pw.pw_gid) os.setuid(pw.pw_uid) os.setegid(pw.pw_gid) os.seteuid(pw.pw_uid) os.environ['HOME'] = pw.pw_dir def mkpath(directory): """ Recursively create a target directory. If the path already exists it does nothing. :param str directory: directory to be created """ if not os.path.isdir(directory): os.makedirs(directory) def configure_logging( log_file, log_level=logging.INFO, log_format="%(asctime)s %(name)s %(levelname)s: %(message)s"): """ Configure the logging module :param str,None log_file: target file path. If None use standard error. :param int log_level: min log level to be reported in log file. Default to INFO :param str log_format: format string used for a log line. Default to "%(asctime)s %(name)s %(levelname)s: %(message)s" """ warn = None handler = logging.StreamHandler() if log_file: log_file = os.path.abspath(log_file) log_dir = os.path.dirname(log_file) try: mkpath(log_dir) handler = logging.handlers.WatchedFileHandler(log_file) except (OSError, IOError): # fallback to standard error warn = "Failed opening the requested log file. " \ "Using standard error instead." formatter = logging.Formatter(log_format) handler.setFormatter(formatter) logging.root.addHandler(handler) if warn: # this will be always displayed because the default level is WARNING _logger.warn(warn) logging.root.setLevel(log_level) def parse_log_level(log_level): """ Convert a log level to its int representation as required by logging module. :param log_level: An integer or a string :return: an integer or None if an invalid argument is provided """ try: log_level_int = int(log_level) except ValueError: log_level_int = logging.getLevelName(str(log_level).upper()) if isinstance(log_level_int, int): return log_level_int return None def pretty_size(size, unit=1024): """ This function returns a pretty representation of a size value :param int|long|float size: the number to to prettify :param int unit: 1000 or 1024 (the default) :rtype: str """ suffixes = ["B"] + [i + {1000: "B", 1024: "iB"}[unit] for i in "KMGTPEZY"] if unit == 1000: suffixes[1] = 'kB' # special case kB instead of KB # cast to float to avoid loosing decimals size = float(size) for suffix in suffixes: if size < unit or suffix == suffixes[-1]: if suffix == suffixes[0]: return "%d %s" % (size, suffix) else: return "%.1f %s" % (size, suffix) else: size /= unit def human_readable_timedelta(timedelta): """ Given a time interval, returns a human readable string :param timedelta: the timedelta to transform in a human readable form """ delta = abs(timedelta) # Calculate time units for the given interval time_map = { 'day': int(delta.days % 365), 'hour': int(delta.seconds / 3600), 'minute': int(delta.seconds / 60) % 60, } # Build the resulting string time_list = [] # 'Day' part if time_map['day'] > 0: if time_map['day'] == 1: time_list.append('%s day' % time_map['day']) else: time_list.append('%s days' % time_map['day']) # 'Hour' part if time_map['hour'] > 0: if time_map['hour'] == 1: time_list.append('%s hour' % time_map['hour']) else: time_list.append('%s hours' % time_map['hour']) # 'Minute' part if time_map['minute'] > 0: if time_map['minute'] == 1: time_list.append('%s minute' % time_map['minute']) else: time_list.append('%s minutes' % time_map['minute']) human = ', '.join(time_list) # If timedelta is negative append 'ago' suffix if delta != timedelta: human += " ago" return human def which(executable): """ This method is useful to find if a executable is present into the os PATH :param str executable: The name of the executable to find :return str|None: the path of the executable or None """ # Get the system path and split. path = os.getenv('PATH') for file_path in path.split(os.path.pathsep): file_path = os.path.join(file_path, executable) # if the file exists return the full path. if os.path.exists(file_path) and os.access(file_path, os.X_OK): return file_path # If the file is not present on the system return None return None class BarmanEncoder(json.JSONEncoder): """ Custom JSON encoder used for BackupInfo encoding This encoder supports the following types: * dates and timestamps if they have a ctime() method. * objects that implement the 'to_json' method. * binary strings (python 3) """ def default(self, obj): # If the object implements to_json() method use it if hasattr(obj, 'to_json'): return obj.to_json() # Serialise date and datetime objects using ctime() method if hasattr(obj, 'ctime') and callable(obj.ctime): return obj.ctime() # Serialise timedelta objects using human_readable_timedelta() if isinstance(obj, datetime.timedelta): return human_readable_timedelta(obj) # Binary strings must be decoded before using them in # an unicode string if hasattr(obj, 'decode') and callable(obj.decode): return obj.decode('utf-8', 'replace') # Let the base class default method raise the TypeError return super(BarmanEncoder, self).default(obj) def fsync_dir(dir_path): """ Execute fsync on a directory ensuring it is synced to disk :param str dir_path: The directory to sync :raise OSError: If fail opening the directory """ dir_fd = os.open(dir_path, os.O_DIRECTORY) try: os.fsync(dir_fd) except OSError, e: # On some filesystem doing a fsync on a directory # raises an EINVAL error. Ignoring it is usually safe. if e.errno != errno.EINVAL: raise os.close(dir_fd) barman-1.5.1/barman/version.py0000644000076500000240000000141512621416331015552 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . ''' This module contains the current Barman version. ''' __version__ = '1.5.1' barman-1.5.1/barman/xlog.py0000644000076500000240000001357512621123425015047 0ustar mnenciastaff# Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . """ This module contains functions to retrieve information about xlog files """ import os import re # xlog file segment name parser (regular expression) _xlog_re = re.compile(r''' ^ ([\dA-Fa-f]{8}) # everything has a timeline (?: ([\dA-Fa-f]{8})([\dA-Fa-f]{8}) # segment name, if a wal file (?:\.[\dA-Fa-f]{8}\.backup)? # and optional offset, if a backup label | \.history # or only .history, if a history file ) $ ''', re.VERBOSE) # xlog location parser for concurrent backup (regular expression) _location_re = re.compile(r'^([\dA-F]+)/([\dA-F]+)$') # Taken from xlog_internal.h from PostgreSQL sources XLOG_SEG_SIZE = 1 << 24 XLOG_SEG_PER_FILE = 0xffffffff // XLOG_SEG_SIZE XLOG_FILE_SIZE = XLOG_SEG_SIZE * XLOG_SEG_PER_FILE class BadXlogSegmentName(Exception): """ Exception for a bad xlog name """ pass def is_any_xlog_file(path): """ Return True if the xlog is either a WAL segment, a .backup file or a .history file, False otherwise. It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.match(os.path.basename(path)) if match: return True return False def is_history_file(path): """ Return True if the xlog is a .history file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if match and match.group(0).endswith('.history'): return True return False def is_backup_file(path): """ Return True if the xlog is a .backup file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if match and match.group(0).endswith('.backup'): return True return False def is_wal_file(path): """ Return True if the xlog is a regular xlog file, False otherwise It supports either a full file path or a simple file name. :param str path: the file name to test :rtype: bool """ match = _xlog_re.search(os.path.basename(path)) if match \ and not match.group(0).endswith('.backup')\ and not match.group(0).endswith('.history'): return True return False def decode_segment_name(path): """ Retrieve the timeline, log ID and segment ID from the name of a xlog segment It can handle either a full file path or a simple file name. :param str path: the file name to decode :rtype: list[int] """ name = os.path.basename(path) match = _xlog_re.match(name) if not match: raise BadXlogSegmentName(name) return [int(x, 16) if x else None for x in match.groups()] def encode_segment_name(tli, log, seg): """ Build the xlog segment name based on timeline, log ID and segment ID :param int tli: timeline number :param int log: log number :param int seg: segment number :return str: segment file name """ return "%08X%08X%08X" % (tli, log, seg) def encode_history_file_name(tli): """ Build the history file name based on timeline :return str: history file name """ return "%08X.history" % (tli,) def enumerate_segments(begin, end, version): """ Get the list of xlog segments from begin to end (included) :param str begin: begin segment name :param str end: end segment name :param int version: postgres version as an integer (e.g. 90301 for 9.3.1) :rtype: collections.Iterable[str] """ begin_tli, begin_log, begin_seg = decode_segment_name(begin) end_tli, end_log, end_seg = decode_segment_name(end) # this method don't support timeline changes assert begin_tli == end_tli, ( "Begin segment (%s) and end segment (%s) " "must have the same timeline part" % (begin, end)) # Start from the first xlog and sequentially enumerates the segments # to the end cur_log, cur_seg = begin_log, begin_seg while cur_log < end_log or (cur_log == end_log and cur_seg <= end_seg): yield encode_segment_name(begin_tli, cur_log, cur_seg) cur_seg += 1 if cur_seg > XLOG_SEG_PER_FILE or ( version < 90300 and cur_seg == XLOG_SEG_PER_FILE): cur_seg = 0 cur_log += 1 def hash_dir(path): """ Get the directory where the xlog segment will be stored It can handle either a full file path or a simple file name. :param str|unicode path: xlog file name :return str: directory name """ tli, log, _ = decode_segment_name(path) # tli is always not None if log is not None: return "%08X%08X" % (tli, log) else: return '' def get_offset_from_location(location): """ Calculate a xlog segment offset starting from a xlog location. :param location: a complete xlog location :return int: a xlog segment offset """ match = _location_re.match(location) if match: xlo = int(match.group(2), 16) return xlo % XLOG_SEG_SIZE else: return None barman-1.5.1/barman.egg-info/0000755000076500000240000000000012621417067015213 5ustar mnenciastaffbarman-1.5.1/barman.egg-info/dependency_links.txt0000644000076500000240000000000112621417066021260 0ustar mnenciastaff barman-1.5.1/barman.egg-info/PKG-INFO0000644000076500000240000000300012621417066016300 0ustar mnenciastaffMetadata-Version: 1.0 Name: barman Version: 1.5.1 Summary: Backup and Recovery Manager for PostgreSQL Home-page: http://www.pgbarman.org/ Author: 2ndQuadrant Italia Srl Author-email: info@2ndquadrant.it License: GPL-3.0 Description: Barman (Backup and Recovery Manager) is an open source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments and help DBAs during the recovery phase. Barman's most requested features include backup catalogues, incremental backup, retention policies, remote backup and recovery, archiving and compression of WAL files and backups. Barman is written and maintained by PostgreSQL professionals 2ndQuadrant. Platform: Linux Platform: Mac OS X Classifier: Environment :: Console Classifier: Development Status :: 5 - Production/Stable Classifier: Topic :: System :: Archiving :: Backup Classifier: Topic :: Database Classifier: Topic :: System :: Recovery Tools Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+) Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 barman-1.5.1/barman.egg-info/requires.txt0000644000076500000240000000007512621417066017614 0ustar mnenciastaffpsycopg2 argh >= 0.21.2 python-dateutil argcomplete argparse barman-1.5.1/barman.egg-info/SOURCES.txt0000644000076500000240000000177712621417067017113 0ustar mnenciastaffAUTHORS ChangeLog INSTALL LICENSE MANIFEST.in NEWS README setup.py barman/__init__.py barman/backup.py barman/backup_executor.py barman/cli.py barman/command_wrappers.py barman/compression.py barman/config.py barman/diagnose.py barman/fs.py barman/hooks.py barman/infofile.py barman/lockfile.py barman/output.py barman/recovery_executor.py barman/retention_policies.py barman/server.py barman/utils.py barman/version.py barman/xlog.py barman.egg-info/PKG-INFO barman.egg-info/SOURCES.txt barman.egg-info/dependency_links.txt barman.egg-info/requires.txt barman.egg-info/top_level.txt bin/barman doc/barman.1 doc/barman.5 doc/barman.conf rpm/barman.spec rpm/rhel5/python-dateutil-1.4.1-remove-embedded-timezone-data.patch rpm/rhel5/python26-argcomplete.spec rpm/rhel5/python26-argh.spec rpm/rhel5/python26-dateutil.spec rpm/rhel5/python26-psycopg2.spec rpm/rhel5/setup.cfg.patch rpm/rhel6/python-argcomplete.spec rpm/rhel6/python-argh.spec rpm/rhel7/python-argh.spec scripts/barman-wal-restore scripts/barman.bash_completionbarman-1.5.1/barman.egg-info/top_level.txt0000644000076500000240000000000712621417066017741 0ustar mnenciastaffbarman barman-1.5.1/bin/0000755000076500000240000000000012621417067013031 5ustar mnenciastaffbarman-1.5.1/bin/barman0000755000076500000240000000152612602321601014206 0ustar mnenciastaff#!/usr/bin/env python # # Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . # # PYTHON_ARGCOMPLETE_OK from barman.cli import main if __name__ == '__main__': main() else: raise NotImplementedError barman-1.5.1/ChangeLog0000644000076500000240000015714212621416550014041 0ustar mnenciastaff2015-11-13 Marco Nenciarini Update the ChangeLog file Prepared version 1.5.1 2015-11-13 Giulio Calacoci Create pg_xlog/archive_status directory at the end of recovery On PostgreSQL 8.3, the pg_xlog/archive_status directory is not automatically created, if missing, during the startup. To avoid errors Barman now creates that directory at the end of any recovery operation. 2015-11-13 Marco Nenciarini Converted sphinx directory README to Markdown 2015-11-10 Marco Nenciarini Pin (temporarily) pytest-catchlog to version 1.1 The pytest-catchlog release 1.2.0 broke Python 2.6 compatibility, so pin the 1.1 version until it will be restored. Ref: https://github.com/eisensheng/pytest-catchlog/pull/15 In latest release of pytest-catchlog the result of caplog.records() call is a copy of the event list so any modification of that has no global effect. Change the code that uses caplog.records() to not rely on the previous undocumented behaviour. 2015-11-05 Marco Nenciarini Fix a typo in barman-wal-restore help output 2015-11-04 Marco Nenciarini Update the ChangeLog file Avoid forcing arcfour cypher in barman-wal-restore OpenSSH 6.7 dropped many low security algorithms, and arcfour is one of them. So we stop forcing it in the script, allowing the user to define the wanted algorithm in ~/.ssh/config Ref: http://www.openssh.com/txt/release-6.7 2015-11-04 Giulio Calacoci Improve error messaging for missing config files 2015-11-04 Gabriele Bartolini Set version to 1.5.1b1 2015-10-27 Giulio Calacoci Manage 'wal_level' only for PostgreSQL >= 9.0 Really fixes #3 2015-10-26 Marco Nenciarini Fix all E731 pep8 errors Always use a def statement instead of an assignment statement that binds a lambda expression directly to a name. See: https://www.python.org/dev/peps/pep-0008/#programming-recommendations 2015-10-22 Marco Nenciarini Update the ChangeLog file 2015-10-22 Gabriele Bartolini NEWS for 1.5.1a1 Set version to 1.5.1a1 Revert version to 1.5 branch due to relevant bug fixing 2015-10-12 Giulio Calacoci Add the 'archive-wal' command The 'archive-wal' command executes the WAL maintenance operations on a given server. This command is automatically executed as a subprocess by the cron command, allowing the parallel execution of WAL archiving on different servers. 2015-10-20 Giuseppe Broccolo Avoid 'wal_level' check on PostgreSQL version < 9.0 Conditionally skip 'wal_level' check in PostgreSQL versions prior to 9.0. Fixes #3 Avoid retention policy checks during the recovery Additionally, skip any missing tablespace directory during the deletion of a backup (like we already do with missing pgdata directory) 2015-10-20 Marco Nenciarini Fix more incorrect mock assert calls in unit tests Add flake8-mock plugin in flake8 tox environment to make sure it will not happen again. Some more cleanup of testing infrastructure has been done: * Switch from pytest-capturelog (unresponsive upstream) to pytest-catchlog. * Remove the version pinning from mock as 1.3.0 supports py2.6 again. * Add flake8-copyright and flake8-string-format plugins. 2015-10-09 Giulio Calacoci Allow parallel cron execution for different servers Allow the execution of multiple 'barman cron' processes, each one handling a different server. Servers will be handled sequentially, skipping those that are already being served by another cron process. 2015-10-22 Marco Nenciarini Fix calculation of backup size In Barman 1.5.0 the internal directory structure of a backup has been changed, moving tablespaces from being inside the data directory to being inside the backup directory. The method backup_fsync_and_set_sizes() now takes that into account by running against the whole backup directory. Fixes #5 2015-10-12 Marco Nenciarini Fix some incorrect mock assert calls in unit tests 2015-10-06 Giulio Calacoci Support for mixed compression types in WAL files (#61) Decompress each WAL file using the specific algorithm reported in the XLOG archive (xlogdb). Improve management of xlog.db errors Better management of errors during the decoding of the name of an xlog segment. Added a hint that suggests to run "barman rebuild-xlogdb" to fix corruptions in the xlogdb file. 2015-10-02 Gabriele Bartolini Started version 1.6.0a1 2015-09-25 Marco Nenciarini Update the ChangeLog file Update Copyright notice 2015-09-25 Gabriele Bartolini Remove obsolete section from tutorial Removed a section in the tutorial regarding the 'delete' command, as pointed out by Ian Barwick. 2015-09-25 Marco Nenciarini Enable flake8 in every tox run 2015-09-24 Gabriele Bartolini Prepared version 1.5.0 for final release 2015-09-24 Marco Nenciarini Update tox.ini to allow specifying a target test Also add .cache directory, which is created by latest tox, to .gitignore 2015-09-24 Giulio Calacoci Changed import of call() method. Now is compatible with all versions of py.test libraries 2015-09-16 Giulio Calacoci Support for non-critical checks during backup Add a filter for non-critical checks during backup operations. This patch fixes a bug that prevented users from taking a new backup when minimum redundancy or smelly backup checks failed. 2015-09-15 Marco Nenciarini Update the ChangeLog file 2015-09-15 Gabriele Bartolini Prepared for 1.5.0 alpha 1 2015-09-09 Gabriele Bartolini Converted man pages to Markdown 2015-09-08 Gianni Ciolli Some small fixes to the tutorial 2015-09-01 Francesco Canovai Updated spec file for RPM building 2015-09-04 Giuseppe Broccolo Add barman-wal-restore script Add a simple bash script to be used as `restore_command` in a standby scenario, as fallback method. The script uses ssh to connect to the Barman server and requests the required WAL file for recovery. The script checks that destination path is not a directory. 2015-08-27 Gabriele Bartolini Convert tutorial to Markdown 2015-08-28 Giulio Calacoci Manage options without '=' in PostgreSQL configuration files 2015-08-18 Gabriele Bartolini Allow 'pre' retry hook scripts to stop the command Add EXIT_ABORT_STOP (63) and EXIT_ABORT_CONTINUE (62) exit codes to control how a retry hook script is aborted. Termination of a retry hook script with EXIT_ABORT_CONTINUE informs Barman to continue with its operations. By terminating a retry hook script with EXIT_ABORT_STOP, users request Barman to stop its main operation (i.e. backup or WAL archival). EXIT_ABORT_CONTINUE is implemented by every retry hook script. EXIT_ABORT_STOP is currently implemented only with 'pre_backup_retry_script' and 'pre_archive_retry_script'. EXIT_ABORT_STOP is currently ignored by 'post_backup_retry_script' and 'post_archive_retry_script', and its behaviour in these cases is identical to EXIT_ABORT_CONTINUE. 2015-08-21 Marco Nenciarini Add flake8 tox environment 2015-08-18 Gabriele Bartolini Documentation for 'barman_lock_directory' 2015-08-18 Giulio Calacoci Analyse include directives for PostgreSQL Check if PostgreSQL administrators take advantage of include directives and specify additional files. Include directives (include, include_if_exists, include_dir) are not supported in Barman for files that reside outside PGDATA. During backup, warn users and list the files that require manual backup. During recovery, warn users about the presence of include directives in PostgreSQL configuration. 2015-08-18 Gabriele Bartolini Workaround for rsync on SUSE Linux Many thanks to Christoph Moench-Tegeder for his workaround proposal. This fixes the outstanding issue on SUSE Linux - previously raised by issues #13 and #26. See also: https://bugzilla.opensuse.org/show_bug.cgi?id=898513 2015-08-17 Gabriele Bartolini Added copy_method option (fixed to rsync) 2015-08-06 Giulio Calacoci Support for 'retry' hook scripts for backup and WAL archiving A 'retry' hook script is a special kind of hook scripts that Barman tries to run indefinitely until it either returns a SUCCESS (0) or an ABORT (63) exit code. Safe hook scripts are executed immediately before (pre) and after (post) the command execution. Standard hook scripts are executed immediately before (pre) and after (post) the retry hook scripts. This patch adds support for pre/post retry hook scripts for backup and WAL archiving. The following four global/server configuration options have been added: * pre_backup_retry_script: executed before a backup * post_backup_retry_script: executed after a backup * pre_archive_retry_script: executed before a WAL archive operation * post_archive_retry_script: executed after a WAL archive operation By default, no retry hook script is executed. Environment variables are identical to the equivalent standard hook script. 2015-08-12 Gabriele Bartolini Updated AUTHORS file 2014-05-29 Giulio Calacoci Support for the 'get-wal' command The 'barman get-wal' command allows users to fetch any WAL file from the archive of a specific server in Barman. 2015-08-21 Marco Nenciarini Add simple Travis CI integration 2015-08-11 Gabriele Bartolini Set version 1.5.0a1 2015-07-30 Giulio Calacoci Preserve Timeline history files. Fixes #70. Added check for the management of history files during the removal of a backup or during normal maintenance operations (cron). 2015-08-06 Giulio Calacoci Forbid the delete of a running backup Block the execution of a barman delete command on a backup that is in progress. Use locks to ensure that there are no running backups on that server. Otherwise check for the position of the backup to be deleted in the catalogue and its status: if it is the first backup and its status is STARTED or EMPTY, the backup is running and delete is forbidden. 2015-08-03 Giulio Calacoci Follow symlinks when checking directory paths Check conflicting paths in configuration files using canonical paths and following symlinks when necessary. 2015-07-13 Francesco Canovai Pin mock==1.0.1 for testing. In version 1.1.x mock has changed some behaviours and is currently incompatible with our unit tests. 2015-07-08 Stefano Bianucci Execute check() before starting a backup Execute the full suite of tests from barman check command before starting a backup. Skip the execution of a backup in case check fails. Add a strategy for managing the results of the checks. Now every check is properly logged as error (failure) or debug (success). 2015-07-03 Giulio Calacoci Support code documentation using Sphinx Generate code documentation using Sphinx autodocs 2015-07-13 Giulio Calacoci Modified test for correct management of timezone 2015-07-10 Marco Nenciarini Properly support BackupInfo serialization in JSON format. 2015-07-01 Giulio Calacoci Improved management of configuration in tests Improved and simplified the management of configurations in tests. Added a method for the creation of dictionaries containing all the keys that are usually present in a configuration. Updated tests accordingly. 2015-06-18 Giulio Calacoci Second part of the backup_executor module's refactor Refactored and streamlined the executor module. Introduced a specific class for physical backup with Rsync through Ssh, as well as a strategy pattern for the management of exclusive and concurrent backups (for backups from a standby). 2015-06-23 Gabriele Bartolini Standard error management for server commands Standardised the process of managing errors with server commands in barman/cli.py. By default, inactive servers are skipped (without error) as well as temporarily disabled servers (with error). No distinction is made between calling a command with one server as target or with a list of them (including 'all'). Exceptions are check (no server is skipped, errors are returned only for active servers), cron (no error is ever returned), list-server and diagnose (both managing active/disabled servers with no errors). Inactive servers are the ones with 'active' option set to False. Disabled servers are the ones with internal directory conflicts (e.g. WALs directory = base backup directory). 2015-06-23 Gabriele Bartolini Asciidoc support for man pages and tutorial 2015-06-16 Giulio Calacoci Fixed error in WAL rate calculation. Solved an error during the evaluation of the WAL rate for a backup. Added two basic unit tests. 2015-06-12 Stefano Bianucci Add check for wal_level For better usability, warn users about setting a proper value for wal_level setting in PostgreSQL. 2015-05-14 Stefano Bianucci Add check for conflicting paths in Barman's configuration Added controls for path-clash during the creation of Barman servers. If there are conflicting paths, Barman will disable those servers containing errors. If a potentially destructive command like "backup" is issued on a server containing conflicts, Barman exits with an error message. 2015-05-21 Giulio Calacoci Complete refactor of the 'recover' command The main 'recover' command has been completely refactored, through the creation of a separate module, called 'recovery_executor'. The RecoveryExecutor class now embodies both local and remote operations, laying the road for future improvements. This patch also fixes #68, by disabling dangerous settings in postgresql.auto.conf (available from PostgreSQL 9.4). Basic unit tests have been added. 2015-05-19 Giulio Calacoci Rename class Server to ServerConfig in barman.config module Previously both barman.config and barman.server modules had a Server class. The former has now been renamed to ServerConfig, hence removing the ambiguity. 2015-05-21 Marco Nenciarini Fix incompatibility with tox version >= 2 2015-05-08 Giulio Calacoci Make sure that even an EMPTY backup has a server set. 2015-05-07 Giulio Calacoci Improve xlog.db integrity (Closes: #67) * Execute flush() and fsync() after writing a line in xlog.db * Execute fsync() on incoming directory after every WAL is archived 2015-04-13 Giulio Calacoci Remove unused 'server' argument from WalFile.from_xlogdb_line() This also fix regression in 'barman delete' command introduced with commit 7ac8fe9c41fd7e5636f370abdc92ca785057263e. 2015-04-13 Giulio Calacoci Fix exception during error handling in barman recovery (Closes: #65) 2015-03-24 Giulio Calacoci Improved cache management of backups Streamlined the management of the cache of the backups. It is now possible to register and remove a backup from the cache. The cache structure has been simplified and now it is a simple dictionary of backups. Status filters are applied by the get_available_backups() method. Managed registration and removal of a backup in the cache during backup operations. 2015-03-02 Giulio Calacoci Create backup_executor module and refactor backup. Extract all the methods relative to the execution of a backup into a dedicated object. The RsyncBackupExecutor encapsulates the operation of backing up a remote server using rsync and ssh to copy files. 2015-03-27 Marco Nenciarini Improved management of xlogb file The xlogdb file is now correctly fsynced when updated. Also, the rebuild-xlogdb command now operates on a temporary new file, which overwrites the main one when finished. 2015-03-27 Stefano Bianucci Add "active" configuration option for a server It is now possible to temporarily disable a server through the 'active' configuration option. Defined at server level as a boolean, when set to False the server is ignored by main operational commands such as cron, backup and recover. By default, it is set to True. 2015-03-20 Giulio Calacoci Modified Barman version following PEP 440 rules. Changed Barman version from 1.4.1-alpha1 to 1.4.1a1 following PEP 440 rules. Adapted RPM build scripts to produce packages with the correct names. 2015-03-17 Giulio Calacoci Fix computation of WAL production ratio The WAL file ratio reported in the 'show-backup' command output was wrong because it was considering only the number of WALS produced during the backup instead of the number of WALs produced until next backup. 2015-02-25 Giulio Calacoci Fix for WAL archival stop working if first backup is EMPTY In some rare cases, if an empty backup has left by a failure during a backup, the cron could start trashing WAL files, as if there is no available backups. Closes: #64 2015-03-06 Gabriele Bartolini Added 'barman_lock_directory' global option Barman now allows to specify the default directory for locks through the global option called 'barman_lock_directory', by default set to 'barman_home'. Lock files will be created inside this directory. Names of lock files have been slightly changed. However, this won't affect users of Barman, unless you are relying on their locations and paths. This patch introduces four classes for managing lock files: GlobalCronLock, ServerBackupLock, ServerCronLock and ServerXLOGDBLock. 2015-02-03 Giulio Calacoci New backup directory structure Inside the backup directory the 'pgdata' has been renamed to 'data'. Tablespaces, if present, are stored into individual directories alongside the 'data' directory. During backup and recovery operations, tablespaces are copied individually using a separate rsync command. 2015-02-12 Giulio Calacoci Improve backup delete command Improve robustness and error reporting of backup delete command 2015-02-10 Stefano Bianucci Add unit tests for dateutil module compatibility 2015-02-08 Gabriele Bartolini After a backup, limit cron activity to WAL archiving only (#62) 2015-01-28 Marco Nenciarini Clarify rpm spec comments about pre-releases 2015-01-28 Gabriele Bartolini Updated backlog (TODO list) 2015-01-23 Marco Nenciarini Update metadata in setup.py - Improve barman description - Add Python 3.4 2015-01-23 Gabriele Bartolini Started version 1.4.1-alpha.1 Update the ChangeLog file Prepared version 1.4.0 2015-01-20 Francesco Canovai Updated spec files for RHEL7 2015-01-16 Giulio Calacoci Delete basebackup dir as last action of a delete. Split the delete operation: remove the PGDATA directory first, then the related WAL files and, at last, the basebackup directory. 2015-01-13 Giulio Calacoci Add minimum_redundancy tests in test_retention_policy.py 2015-01-13 Gabriele Bartolini Fix calculation of deduplication ratio 2015-01-12 Gabriele Bartolini Update the ChangeLog file Prepared documentation for version 1.4.0-alpha1 2015-01-11 Gabriele Bartolini Store deduplication effects for incremental backup When incremental backup is enabled and uses hard links (reuse_backup = link), output of 'backup' command reports the effects of deduplication. The metrict is stored along the backup.info file in the 'deduplicated_size' field. IMPORTANT: this metric refers to the increment in size of the current backup from the previous backup and reflects only the situation at backup time. 2015-01-10 Gabriele Bartolini Prepared version 1.4.0-alpha1 Updated copyright to 2015 2015-01-09 Marco Nenciarini Fix smart_copy of tablespaces when using bwlimit option 2015-01-07 Giulio Calacoci Add dedicated exception for PostgreSQL connection errors 2015-01-08 Giulio Calacoci Fix missing argument error in retention policies backup_status method Improve test coverage for retention_policy.py module 2015-01-07 Giulio Calacoci Remove logging of tracebacks on error during backup 2015-01-05 Gabriele Bartolini Avoid logging of tracebacks during smart copy While retrieving the list of files on destination for smart copy, log any failure as error instead of exception 2014-12-22 Giulio Calacoci Unit tests for BackupInfo object 2014-12-24 Giulio Calacoci Change the way BackupInfo are created for testing Merge the method build_test_backup_info and the mock_backup_info. Now we use real BackupInfo objects instead of a Mock 2011-12-07 Marco Nenciarini Incremental base backup implementation Add support for reuse_backup global/server option, accepting three possible values: * off: no incremental backup support (default) * copy: uses the last available backup as source (preventing unmodified files from being copied) * link: same as copy but uses hard links on destination, if the filesystem on the backup server allows it (reducing the occupied space) Add support for command line '--reuse-backup' option (default: link). 2014-12-24 Gabriele Bartolini Allow last_archived_wal to be any xlog file Correctly show any xlog file as last_archived_wal for pre-pg_stat_archiver cases. Improve testing and docstrings for barman/xlog.py module. 2014-12-09 Giulio Calacoci Improve robustness of ssh_command and conninfo options 2014-12-18 Giulio Calacoci pg_stat_archiver support for PostgreSQL 9.4 Integrate pg_stat_archiver with PostgreSQL 9.4 servers for the barman check, show-server and status commands. 2014-11-28 Giulio Calacoci Improve robustness of retention policy unit tests 2014-12-16 Giulio Calacoci Fixes retention policies delete bug (#58) The method responsible for deleting obsolete backup in retention policies enforement, will not raise anymore the 'NoneType object is not iterable'. This prevents barman from terminating abruptly. 2014-11-28 Giulio Calacoci Pass list of available backups to retention policy code 2014-12-02 Giulio Calacoci Include history files in WAL management 2014-12-04 Giulio Calacoci Added a util method to find an executable in system path. If rsync is not present on system, a proper error message is displayed to the user when a command using rsync is issued 2014-12-09 Giulio Calacoci Changed behaviour if pg_ident.conf is missing from an error to a warning 2014-10-22 Marco Nenciarini Remove code to replace output stream when quiet Previously the '-q' option was handled replacing the standard output stream with one which trows away averything it gets. Now it is not needed anymore because we haver a proper output module. 2014-09-26 Giulio Calacoci Remove all remaining output done by yield Migrate all the remaining part using yeld to do output to using the new output module. 2014-10-07 Marco Nenciarini Ignore fsync EINVAL errors on directories (#55) On some filesystem doing a fsync on a directory raises an EINVAL error. Ignoring it is usually safe. 2014-09-23 Giulio Calacoci Modified output module to access protected properties: quiet and debug 2014-09-10 Marco Nenciarini Fix bash autocompleter Minor changes: * Some code formatting adjustments Move cron retention policy management to a separate method 2014-09-05 Marco Nenciarini Fix dates in rpm changelog 2014-09-03 Giulio Calacoci Calculate backup WAL statistics only if the WALs are already processed 2014-09-02 Giulio Calacoci Change default LockFile behaviour to raise if fails acquisition 2014-09-01 Gabriele Bartolini Invoke WAL maintenance after a successful backup * At the end of the 'barman backup' command, maintenance operations are automatically started for successful backups (equivalent to manually executing a 'barman cron' command, just for that server) * Trashing of unuseful WALs (part of 'barman cron') has been changed as follows: * in case of one or more backups, delete WAL files older than the start WAL of the first backup * otherwise, trash WAL files in case of exclusive backup server (that is, not concurrent) 2014-09-03 Marco Nenciarini Remove redundant server argument from HookScriptRunner.env_from_wal_info() 2014-08-27 Gabriele Bartolini Add relpath() and fullpath() methods in WalInfoFile * Remove 'full_path' attribute in WalInfoFile * Add 'relpath()' method to WalInfoFile, which returns the relative path of a WAL file within the 'wals_directory' directory * Add 'fullpath()' method to WalInfoFile, which returns the full path of a WAL file within a server installation (requires a server object) 2014-08-23 Gabriele Bartolini Updated version in .spec file 2014-08-20 Marco Nenciarini Add build_config_from_dicts to testing_helpers module Make Config.Server, WalFileInfo and BackupInfo objects json encodable 2014-08-20 Gabriele Bartolini Added unit to JSON representation of a retention policy Started version 1.3.4-alpha.1 2014-08-18 Gabriele Bartolini Update the ChangeLog file Fixed typo in release date 2014-08-13 Gabriele Bartolini Prepared version 1.3.3 2014-08-12 Marco Nenciarini Add an unit-test for Server.get_wal_full_path() method 2014-08-12 Gabriele Bartolini Refactored building of full path of a WAL file 2014-08-01 Marco Nenciarini Report which file is about to be archived before actually doing it 2014-07-25 Giulio Calacoci Remove traceback from output when Barman is interrupted by CTRL-c Avoid flushing/fsyncing read only files Fixes: #49 EXCEPTION: [Errno 9] Bad file descriptor 2014-07-24 Giulio Calacoci Added Barman's version number to 'barman diagnose' 2014-07-22 Giulio Calacoci Move xlogdb_parse_line method in WalFileInfo class 2014-07-23 Marco Nenciarini Cleanup output API status at the end of test_output.py 2014-07-22 Gabriele Bartolini Estimates WAL production rate for a backup 2014-07-18 Giulio Calacoci Removed duplicate log message at the end of 'barman recover' wal segments copy Fix datetime.timedelta json serialization in 'barman diagnose' command 2014-07-17 Marco Nenciarini Update the ChangeLog file 2014-07-17 Gabriele Bartolini Prepared version 1.3.3-alpha.1 docs 2014-07-17 Marco Nenciarini Really fix "ssh" version detection in "barman diagnose" command 2014-07-16 Giulio Calacoci Add command line options for retry of backup/recover copy Implemented the --retry-times (including --no-retry) and --retry-sleep command line options for backup/recovery copy Emit warnings in case of unexptected configuration options 2014-07-14 Giulio Calacoci Reduce the verbosity of the log for "barman cron" Currently the "barman cron" command emits one log line for every WAL file that's archived (including the server name as a prefix). No log line is emitted for an empty cron run. Make recovery --target-time option more resilient to wrongly formatted values Workaround a bug in dateutil.parser.parse() implementation ref: https://bugs.launchpad.net/dateutil/+bug/1247643 Improved logging for "barman recover" command Default log prefix now contains barman process ID (pid) 2014-07-16 Marco Nenciarini Fix "ssh" version detection in "barman diagnose" command 2014-07-11 Marco Nenciarini Fix wrong variable name in BackupManager.delete_wal() 2014-07-09 Giulio Calacoci Add unit test for LockFile object and server.xlogdb() call Minor changes: - converted test_xlog.py to py.test style 2014-07-11 Giulio Calacoci Make sure remote WAL destination path is a directory Add a trailing slash to the remote WAL destination path, in order to ensure it is a directory 2014-07-07 Giulio Calacoci Fix serialisation of CvsOption during "barman diagnose" command 2014-07-11 Marco Nenciarini Use a WalFileInfo object when decoding an xlogdb line Add --no-human-readable to rsync --list-only invocation In rsync >= 3.1.0 the --list-only format changed adding digit groupings by default in "size" field. To obtain the pre 3.1.0 behavior you need to add --no-human-readable Ref: http://ftp.samba.org/pub/rsync/src/rsync-3.1.0-NEWS 2014-07-09 Marco Nenciarini Log any hook script failure with its output at warning level 2014-07-08 Gabriele Bartolini Wraps xlogdb() code in a try/finally block 2014-06-28 Marco Nenciarini Fix wait parameter logic in LockFile class In previous versions the wait argument on the LockFile constructor was mistakenly ignored, actually preventing the usage of a waiting lock through the Context Manager interface Always use utils.mkpath() to create directories that could already exist Minor changes: - In retry_backup_copy log the exception which caused the failure 2014-06-27 Marco Nenciarini Really ignore vanished files errors in rsync smart copy routine 2014-06-27 Gabriele Bartolini Added info messages for the four phases of the new rsync smart copy Minor changes: - Fix unit tests for basebackup_retry_* config values Updated documentation for 1.3.3-alpha1 Set default for basebackup_retry_times to 0 For compatibility with previous Barman versions, set basebackup_retry_times to 0 as default value. 2014-06-26 Giulio Calacoci Make sure timestamps are tz-aware anywhere in the code Minor changes: - Add basic unit tests for retention policies 2014-06-26 Marco Nenciarini Close all open descriptors but std{in,out,err} when spawning a child process Minor changes: - Remove some dead code - fix missing 'last_backup_maximum_age' as global option 2014-06-24 Gabriele Bartolini Display compression ratio for WALs in show-backup 2014-06-23 Giulio Calacoci Improved Nagios output for check command 2014-06-25 Giulio Calacoci Manage KeyboardInterrupt exception in 'barman backup' 2014-06-23 Gabriele Bartolini Added support for PostgreSQL 8.3 2014-06-24 Marco Nenciarini Updated rpm packaging spec to work with pre-releases Minor changes: - add rsync dependency to barman.spec file 2014-05-29 Giulio Calacoci Support for comma separated list options Added support for a new data type in configuration options: comma separated list values. The first option to be implemented is backup_options, now accepting a list of values. 2014-06-18 Marco Nenciarini Decode binary strings in command_wrapper This fixes python2.7 and python3 compatibility Minor changes: - make scripts/release.sh python3 compatible 2014-06-10 Giulio Calacoci Support for 'last_backup_max_age' This new global/server option allows administrators to set the max age of the last backup, making it easier to detect any issues with periodical backup execution. 2014-06-18 Marco Nenciarini Support for "smart" incremental recovery Avoid invoking rsync with --checksum option during recovery, while maintaining the same level of safety by splitting the copy operation in multiple steps. Barman will only use the --checksum option on files having identical time and size that have been modified after the start of the backup. This change greatly improves the speed of "incremental" recovery. Minor changes: - disable --checksum even for backup. During a backup the rsync destination directory is empty, so it is safe to go with a plain rsync - Put a ".barman-recover.info" with backup metadata inside the destination directory during recover. Use Postgres' server time for both begin_time and end_time Minor changes: - make sure exceptions during backup are logged with stacktraces - commit on disk the backup status just after issuing the PostgreSQL start_backup command Change version to 1.3.3-alpha.1 2014-06-09 Giulio Calacoci Added fsync() for backup and cron operations 2014-06-06 Marco Nenciarini Fix parsing of 'basebackup_retry_times' and 'basebackup_retry_sleep' options 2014-05-30 Giulio Calacoci Fix for #43 recovery.conf not copied on remote recovery 2014-05-08 Giulio Calacoci Retry option for base backup If a network error happens during rsync, add the ability to retry a defined number of time. Two options have been added: * basebackup_retry_times: INT (> 0, default 1) maximum number or retry before giving up * basebackup_retry_sleep: INT (> 0, default 10) wait time (seconds) before retrying, after an error 2014-05-29 Marco Nenciarini Improve robustness of backup code Improve error message about stop_backup failure 2014-04-23 Giulio Calacoci fixed missing pre/post archive parameters. #41 on sourceforge 2014-04-15 Marco Nenciarini Update the ChangeLog file Update unit tests to match current rsync flags 2014-04-15 Gabriele Bartolini Prepared source code for version 1.3.2 2014-04-15 Gabriele Bartolini Added checks for pg_extension (>= 9.1) and pg_is_in_recovery (>= 9.0) 2014-04-11 Marco Nenciarini Update the ChangeLog file 2014-04-10 Marco Nenciarini Always pass --checksum to rsync invocations Emit a warning if backup_options is set to an invalid value Clarify some "permission denied" error 2014-04-08 Gabriele Bartolini Cosmetic change: Pgespresso -> pgespresso 2014-04-07 Marco Nenciarini Update RPM spec file for 1.3.1 2014-04-04 Gabriele Bartolini Prepared documentation for version 1.3.1 2014-04-04 Marco Nenciarini Fix 'barman diagnose' python3 support Improved logging and error reporting 2014-04-03 Gabriele Bartolini Fixed SourceForge bug #36: Unhandled exception for minimum redundancy 2014-04-03 Giulio Calacoci Empty strings are now treated as None in Barman configuration 2014-04-02 Marco Nenciarini Removed spurious "file not found" message in cron output Add release information to 'barman diagnose' Sort 'barman show-server' output Use a Tablespace object to carry tablespace information 2014-03-26 Giulio Calacoci Protect during recovery tablespaces inside PGDATA * When performing a recovery operation, tablespaces that will be recovered inside the new destination directory (PGDATA) are be 'protected' by rsync. This avoids overwrites by rsync when copying PGDATA content. * Add debug messages to FS class 2014-03-24 Gabriele Bartolini Implementation of 'barman diagnose' command (JSON output) 2014-03-21 Gabriele Bartolini Concurrent backup using the 'pgespresso' extension * Fix bwlimit tablespaces backup (missing destination directory) * Purge unused wal files at first backup in concurrent mode * Exclusion of recovery.conf during backup 2014-03-19 Marco Nenciarini Fix unhandled exception in recover when destination dir is not writable 2014-02-19 Marco Nenciarini Make -q command line switch working again Also demote "another cron is running" message from error to info level. 2014-02-02 Marco Nenciarini Update the ChangeLog file Update RPM spec file for release 1.3.0 Review of NEWS and AUTHORS files 2014-01-31 Gabriele Bartolini Updated files for final release 2014-01-30 Marco Nenciarini Improve error messages during remote recovery 2014-01-29 Marco Nenciarini Use fsync to avoid xlog.db file corruption (Closes #32) Add network_compression configuration option (Closes #19) When network_compression is enabled, all network transfers are done using compression (if available). 2014-01-29 Gabriele Bartolini Check directories exist before executing a backup (#14) 2014-01-28 Giulio Calacoci Reduce log verbosity during initialisation phase 2014-01-28 Gabriele Bartolini Load configuration files after logger initialisation 2014-01-21 Marco Nenciarini Avoid tablespaces inside pgdata directory from being copied twice 2014-01-09 Marco Nenciarini Generalise recovery operations (local/remote) 2014-01-28 Gabriele Bartolini Reviewed documentation of WAL archive hook scripts 2014-01-07 Marco Nenciarini Add pre_archive_script and post_archive_scrip hook scripts 2014-01-23 Marco Nenciarini Refactor the LockFile management class to report permission errors. Fix 'Invalid cross-device link' error in cron when incoming is on a different filesystem (merge request #4 by Holger Hamann) 2014-01-22 Marco Nenciarini Port 'show-server' command to the new output interface 2014-01-21 Giulio Calacoci Updated copyright (2014) 2014-01-17 Marco Nenciarini Port 'status' and 'list-server' commands to the new output interface 2014-01-09 Marco Nenciarini Port the 'show-backup' command to the new output interface 2014-01-16 Giulio Calacoci Added implementation for backup command --immediate-checkpoint option and immediate_checkpoint configuration option 2014-01-08 Gabriele Bartolini Bump version number and add release notes for 1.3.0 2013-11-27 Giulio Calacoci Add unit tests for infofile and compression modules Fix some python3 compatibility bugs highlighted by the tests 2013-10-18 Marco Nenciarini Move barman._pretty_size() to barman.utils.pretty_size() 2014-01-03 Marco Nenciarini Implement BackupInfo as a FieldListFile and move it in infofile module. 2014-01-07 Marco Nenciarini Refactor output to a dedicate module. The following commands have been ported to the new interface: * backup * check * list-backup A special NagiosOutputWriter has been added to support Nagios compatible output for the check command WARNING: this code doesn't run due to a circular dependency. The issue will be fixed in the next commit 2013-09-12 Marco Nenciarini Isolate subrocesses' stdin/stdout in command_wrappers module 2014-01-07 Marco Nenciarini Refactor hooks management 2013-09-12 Marco Nenciarini Split out logging configuration and userid enforcement from the configuration class. 2013-12-16 Gabriele Bartolini Added rebuild-xlogdb command man page 2013-11-08 Marco Nenciarini Implement the rebuild-xlogdb command. (Closes #27) 2013-11-19 Giulio Calacoci added documentation for tablespaces relocation (#22) 2013-10-30 Gabriele Bartolini Added TODO list 2013-09-05 Marco Nenciarini Update the ChangeLog file Bump version to 1.2.3 2013-08-29 Gabriele Bartolini Updated README and man page Added stub of release notes 2013-08-26 Marco Nenciarini Initial Python 3 support Update setup.py to support py.test and recent setuptools 2013-08-24 Damon Snyder 27: Addresses potential corruption of WAL xlog.db files. In barman.lockfile.release() the file is unlinked (deleted). This effectively nullifies any future attempts to lock the file by a blocking process by deleting the open file table entry upon which the flock is based. This commit removes the unlink and instead unlocks the file and then closes the file descriptor leaving the lock file and open file table entry intact. 2013-08-22 Marco Nenciarini Add support for restore target name (PostgreSQL 9.1+) 2013-08-21 Marco Nenciarini PostgreSQL version in backup.info file is an integer Make WAL sequence calculation compatible with PostgreSQL 9.3 With PostgreSQL 9.3 WAL files are written in a continuous stream, rather than skipping the last 16MB segment every 4GB, meaning WAL filenames may end in FF. 2013-06-24 Marco Nenciarini Update the ChangeLog file Fix config file parser tests Bump version to 1.2.2 Fix python 2.6 compatibility Fix history in spec file 2013-06-17 Marco Nenciarini Update RPM spec file 2013-06-13 Marco Nenciarini Update the ChangeLog file Fix remote recovery with bwlimit on a tablespace 2013-06-07 Marco Nenciarini Added the "tablespace_bandwidth_limit" option 2013-06-12 Gabriele Bartolini Updated docs and man pages for 1.2.1 Prepared NEWS file for 1.2.1 release 2013-04-26 Gabriele Bartolini Added the "bandwidth_limit" global/server option which allows to limit the I/O bandwidth (in KBPS) for backup and recovery operations Added /etc/barman/barman.conf as default location 2013-03-13 Gabriele Bartolini Removed duplicate message for previous backup in show command 2013-03-07 Gabriele Bartolini Cosmetic change in message for "all" reserved section 2013-02-08 Marco Nenciarini Avoid triggering the minimum_redundancy check on FAILED backups Add BARMAN_VERSION to hook script environment 2013-01-31 Marco Nenciarini Update the ChangeLog file Update RPM's spec files 2013-01-30 Gabriele Bartolini Finalised files for version 1.2.0 2013-01-28 Marco Nenciarini Forbid the usage of 'all' word as server name 2013-01-11 Gabriele Bartolini Added basic support for Nagios plugin output for check command through the --nagios option 2013-01-28 Marco Nenciarini Add @expects_obj decorator to cli function as required by the upcoming Argh 1.0 API 2013-01-11 Marco Nenciarini Migratte to new argh api. Now barman requires arg => 0.21.2 and argcomplete- 2013-01-11 Gabriele Bartolini Prepared release notes 2012-12-18 Marco Nenciarini Fix typo in doc/barman.conf 2012-12-14 Marco Nenciarini Return failure exit code if backup command fails in any way 2012-12-14 Gabriele Bartolini Prepared copyright lines for 2013 Updated documentation and man pages Added retention policy examples in configuration file 2012-12-13 Marco Nenciarini Q/A on retention policy code 2012-12-12 Marco Nenciarini Fix configuration parser unit tests Exit with error if an invalid server name is passed in any command which takes a list of server 2012-12-08 Gabriele Bartolini Add retention status to show-backup and list-backup commands Auto-management of retention policies for base backups Using the report() method for retention policies, enforce retention policy through cron (if policy mode is 'auto'), by deleting OBSOLETE backups. Retention status and report() method for retention policies Created the following states for retention policies: VALID, OBSOLETE, NONE and POTENTIALLY_OBSOLETE (an object which is OBSOLETE but cannot be removed automatically due to minimum_redundancy requirements). Created the report() method for the retention policy base class, which exected the _backup_report() method for base backups and the _wal_report() method for WAL retention policies (currently not enforced). The report method iterates through the DONE backups and according to the retention policy, classifies the backup. RedundancyRetentionPolicy uses the number of backups, RecoveryWindowRetentionPolicy uses the time window and the recoverability point concept. Integrated minimum_redundancy with "barman check" Initialisation of retention policies for a server Added the _init_retention_policies() method in the Server class constructor, which integrates with the new RetentionPolicy classes and performs syntax checking. Integrated retention policies with log, 'barman check' and 'barman status'. String representation conforms to retention syntax The string representation produces now a syntax-valid retention policy configuration string. The previous __str__ method has been renamed into debug() SimpleWALRetentionPolicy objects are now created from the server's main retention policy by the factory class. 2012-12-07 Gabriele Bartolini Add the global/server option minimum_redundancy. Check it is >= 0. Guarantees that when delete is performed (or retention policies are enforced), this is the minimum number of backups to be kept for that server. Add support for retention_policy_mode global/server option which defines the method for enforcing retention policies (currently only "auto", in future versions "manual" will be allowed) Added first stub of retention policy classes Started version 1.2.0 2012-12-04 Marco Nenciarini Fix unit config tests Update the ChangeLog file Add ssl_*_file and unix_socket_directory to dangerous options list Display tablespace's oid in show-backup output Alphabetically sort servers in all commands output Don't give up on first error in 'barman check all' command 2012-12-03 Gabriele Bartolini Added sorting of files in configuration directory 2012-11-29 Marco Nenciarini Fix regression in barman check command when configuration_files_directory is None Update rpm files to 1.1.2 release 2012-11-29 Carlo Ascani Update README 2012-11-29 Gabriele Bartolini Prepared files for release 2012-11-28 Gabriele Bartolini Add the configuration_files_directory option which allows to include multiple files from a directory 2012-11-29 Carlo Ascani Update README 2012-11-28 Marco Nenciarini Update NEWS file 2012-11-05 Gabriele Bartolini Added support for list-backup all 2012-11-04 Gabriele Bartolini Added latest/oldest for show-backup, delete, list-files and recover commands Added get_first_backup and get_last_backup functions to Server class Added application_name management for PostgreSQL >= 9.0 2012-11-13 Gabriele Bartolini Switched to version 1.1.2 Continue if a WAL file is not found during delete (bug #18) 2012-11-04 Gabriele Bartolini Includes version 90200 for tablespace new function 2012-10-16 Marco Nenciarini Update the ChangeLog file Update NEWS file and rpm package Bump version to 1.1.1 Add more information about the failing line in xlogdb_parse_line errors 2012-10-15 Marco Nenciarini Fix two bug on recover command 2012-10-12 Marco Nenciarini Update the ChangeLog file Update rpm changelog Make recover fail if an invalid tablespace relocation rule is given Remove unused imports from cli.py 2012-10-11 Gabriele Bartolini Updated version to 1.1.0 Fixes bug #12 2012-10-11 Marco Nenciarini Fail fast on recover command if the destination directory contains the ':' character (Closes: #4) Fix typo in recovery messages Report an informative message when pg_start_backup() invocation fails because an exclusive backup is already running (Closes: #8) Make current_action an attribute of BackupManager class 2012-10-08 Gabriele Bartolini Added ticket #10 to NEWS Add pg_config_detect_possible_issues function for issue #10 2012-10-04 Gabriele Bartolini Updated NEWS file with bug fixing #9 Fixes issue #9 on pg_tablespace_location() for 9.2 2012-08-31 Marco Nenciarini Add BARMAN_PREVIOUS_ID variable to hooks environment 2012-08-20 Marco Nenciarini Merge spec changes from Devrim Add BARMAN_ERROR and BARMAN_STATUS variables to hook's environment Added backup all documentation to README 2012-08-20 Gabriele Bartolini Updated release notes Set version to 1.0.1 2012-08-20 Marco Nenciarini Document {pre,post}_backup_script in README Document {pre,post}_backup_script in configuration man-page 2012-08-17 Marco Nenciarini Add pre/post backup hook scripts definition (Closes: #7) Add the possibility to manage hook scripts before and after a base backup. Add the global (overridden per server) configuration options called: * pre_backup_script: executed before a backup * post_backup_script: executed after a backup Use the environment to pass at least the following variabiles: * BARMAN_BACKUP_DIR: backup destination directory * BARMAN_BACKUP_ID: ID of the backup * BARMAN_CONFIGURATION: configuration file used by barman * BARMAN_PHASE: 'pre' or 'post' * BARMAN_SERVER: name of the server The script definition is passed to the shell and can return any exit code. Barman won't perform any exit code check. It will simply log the result in the log file. To test it you can try adding pre_backup_script = env | grep ^BARMAN post_backup_script = env | grep ^BARMAN in your barman config and you'll see the variables on console. 2012-08-16 Marco Nenciarini Add documentation for 'backup all' command. 2012-07-19 Gabriele Bartolini Add 'backup all' shortcut and, in general, multiple servers specification (issue #1) Add 'backup all' shortcut and, in general, multiple servers specification (issue #1) 2012-07-16 Gabriele Bartolini Fixed typo (thanks to Daymel Bonne Solís) 2012-07-06 Marco Nenciarini Initial commit barman-1.5.1/doc/0000755000076500000240000000000012621417067013026 5ustar mnenciastaffbarman-1.5.1/doc/barman.10000644000076500000240000003072412621416313014347 0ustar mnenciastaff.\" Automatically generated by Pandoc 1.15.1.1 .\" .hy .TH "BARMAN" "5" "November 16, 2015" "Barman User manuals" "Version 1.5.1" .SH NAME .PP barman \- Backup and Recovery Manager for PostgreSQL .SH SYNOPSIS .PP barman [\f[I]OPTIONS\f[]] \f[I]COMMAND\f[] .SH DESCRIPTION .PP barman is an administration tool for disaster recovery of PostgreSQL servers written in Python. barman can perform remote backups of multiple servers in business critical environments and helps DBAs during the recovery phase. .SH OPTIONS .TP .B \-v, \-\-version Show program version number and exit. .RS .RE .TP .B \-q, \-\-quiet Do not output anything. Useful for cron scripts. .RS .RE .TP .B \-h, \-\-help Show a help message and exit. .RS .RE .TP .B \-c \f[I]CONFIG\f[], \-\-config \f[I]CONFIG\f[] Use the specified configuration file. .RS .RE .SH COMMANDS .PP Important: every command has a help option .TP .B archive\-wal \f[I]SERVER_NAME\f[] Archive the incoming WAL files for \f[C]SERVER_NAME\f[], moving them in the archive, while applying compression if requested. .RS .RE .TP .B cron Perform maintenance tasks, such as enforcing retention policies or WAL files management. .RS .RE .TP .B list\-server Show all the configured servers, and their descriptions. .RS .RE .TP .B show\-server \f[I]SERVER_NAME\f[] Show information about \f[C]SERVER_NAME\f[], including: \f[C]conninfo\f[], \f[C]backup_directory\f[], \f[C]wals_directory\f[] and many more. Specify \f[C]all\f[] as \f[C]SERVER_NAME\f[] to show information about all the configured servers. .RS .RE .TP .B status \f[I]SERVER_NAME\f[] Show information about the status of a server, including: number of available backups, \f[C]archive_command\f[], \f[C]archive_status\f[] and many more. For example: .RS .RE .IP .nf \f[C] Server\ quagmire: \ \ Description:\ The\ Giggity\ database \ \ Passive\ node:\ False \ \ PostgreSQL\ version:\ 9.3.9 \ \ pgespresso\ extension:\ Not\ available \ \ PostgreSQL\ Data\ directory:\ /srv/postgresql/9.3/data \ \ PostgreSQL\ \[aq]archive_command\[aq]\ setting:\ rsync\ \-a\ %p\ barman\@backup:/var/lib/barman/quagmire/incoming \ \ Last\ archived\ WAL:\ 0000000100003103000000AD \ \ Current\ WAL\ segment:\ 0000000100003103000000AE \ \ Retention\ policies:\ enforced\ (mode:\ auto,\ retention:\ REDUNDANCY\ 2,\ WAL\ retention:\ MAIN) \ \ No.\ of\ available\ backups:\ 2 \ \ First\ available\ backup:\ 20150908T003001 \ \ Last\ available\ backup:\ 20150909T003001 \ \ Minimum\ redundancy\ requirements:\ satisfied\ (2/1) \f[] .fi .TP .B check \f[I]SERVER_NAME\f[] Show diagnostic information about \f[C]SERVER_NAME\f[], including: ssh connection check, PostgreSQL version, configuration and backup directories. Specify \f[C]all\f[] as \f[C]SERVER_NAME\f[] to show diagnostic information about all the configured servers. .RS .TP .B \-\-nagios Nagios plugin compatible output .RS .RE .RE .TP .B diagnose Collect diagnostic information about the server where barman is installed and all the configured servers, including: global configuration, SSH version, Python version, \f[C]rsync\f[] version, as well as current configuration and status of all servers. .RS .RE .TP .B backup \f[I]SERVER_NAME\f[] Perform a backup of \f[C]SERVER_NAME\f[] using parameters specified in the configuration file. Specify \f[C]all\f[] as \f[C]SERVER_NAME\f[] to perform a backup of all the configured servers. .RS .TP .B \-\-immediate\-checkpoint forces the initial checkpoint to be done as quickly as possible. Overrides value of the parameter \f[C]immediate_checkpoint\f[], if present in the configuration file. .RS .RE .TP .B \-\-no\-immediate\-checkpoint forces to wait for the checkpoint. Overrides value of the parameter \f[C]immediate_checkpoint\f[], if present in the configuration file. .RS .RE .TP .B \-\-reuse\-backup [INCREMENTAL_TYPE] Overrides \f[C]reuse_backup\f[] option behaviour. Possible values for \f[C]INCREMENTAL_TYPE\f[] are: .RS .IP \[bu] 2 \f[I]off\f[]: do not reuse the last available backup; .IP \[bu] 2 \f[I]copy\f[]: reuse the last available backup for a server and create a copy of the unchanged files (reduce backup time); .IP \[bu] 2 \f[I]link\f[]: reuse the last available backup for a server and create a hard link of the unchanged files (reduce backup time and space); .PP \f[C]link\f[] is the default target if \f[C]\-\-reuse\-backup\f[] is used and \f[C]INCREMENTAL_TYPE\f[] is not explicited. .RE .TP .B \-\-retry\-times Number of retries of base backup copy, after an error. Used during both backup and recovery operations. Overrides value of the parameter \f[C]basebackup_retry_times\f[], if present in the configuration file. .RS .RE .TP .B \-\-no\-retry Same as \f[C]\-\-retry\-times\ 0\f[] .RS .RE .TP .B \-\-retry\-sleep Number of seconds of wait after a failed copy, before retrying. Used during both backup and recovery operations. Overrides value of the parameter \f[C]basebackup_retry_sleep\f[], if present in the configuration file. .RS .RE .RE .TP .B list\-backup \f[I]SERVER_NAME\f[] Show available backups for \f[C]SERVER_NAME\f[]. This command is useful to retrieve a backup ID. For example: .RS .RE .IP .nf \f[C] servername\ 20111104T102647\ \-\ Fri\ Nov\ \ 4\ 10:26:48\ 2011\ \-\ Size:\ 17.0\ MiB\ \-\ WAL\ Size:\ 100\ B \f[] .fi .IP .nf \f[C] In\ this\ case,\ *20111104T102647*\ is\ the\ backup\ ID. \f[] .fi .TP .B show\-backup \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] Show detailed information about a particular backup, identified by the server name and the backup ID. See the Backup ID shortcuts (#shortcuts) section below for available shortcuts. For example: .RS .RE .IP .nf \f[C] Backup\ 20150828T130001: \ \ Server\ Name\ \ \ \ \ \ \ \ \ \ \ \ :\ quagmire \ \ Status\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ :\ DONE \ \ PostgreSQL\ Version\ \ \ \ \ :\ 90402 \ \ PGDATA\ directory\ \ \ \ \ \ \ :\ /srv/postgresql/9.4/main/data \ \ Base\ backup\ information: \ \ \ \ Disk\ usage\ \ \ \ \ \ \ \ \ \ \ :\ 12.4\ TiB\ (12.4\ TiB\ with\ WALs) \ \ \ \ Incremental\ size\ \ \ \ \ :\ 4.9\ TiB\ (\-60.02%) \ \ \ \ Timeline\ \ \ \ \ \ \ \ \ \ \ \ \ :\ 1 \ \ \ \ Begin\ WAL\ \ \ \ \ \ \ \ \ \ \ \ :\ 0000000100000CFD000000AD \ \ \ \ End\ WAL\ \ \ \ \ \ \ \ \ \ \ \ \ \ :\ 0000000100000D0D00000008 \ \ \ \ WAL\ number\ \ \ \ \ \ \ \ \ \ \ :\ 3932 \ \ \ \ WAL\ compression\ ratio:\ 79.51% \ \ \ \ Begin\ time\ \ \ \ \ \ \ \ \ \ \ :\ 2015\-08\-28\ 13:00:01.633925+00:00 \ \ \ \ End\ time\ \ \ \ \ \ \ \ \ \ \ \ \ :\ 2015\-08\-29\ 10:27:06.522846+00:00 \ \ \ \ Begin\ Offset\ \ \ \ \ \ \ \ \ :\ 1575048 \ \ \ \ End\ Offset\ \ \ \ \ \ \ \ \ \ \ :\ 13853016 \ \ \ \ Begin\ XLOG\ \ \ \ \ \ \ \ \ \ \ :\ CFD/AD180888 \ \ \ \ End\ XLOG\ \ \ \ \ \ \ \ \ \ \ \ \ :\ D0D/8D36158 \ \ WAL\ information: \ \ \ \ No\ of\ files\ \ \ \ \ \ \ \ \ \ :\ 35039 \ \ \ \ Disk\ usage\ \ \ \ \ \ \ \ \ \ \ :\ 121.5\ GiB \ \ \ \ WAL\ rate\ \ \ \ \ \ \ \ \ \ \ \ \ :\ 275.50/hour \ \ \ \ Compression\ ratio\ \ \ \ :\ 77.81% \ \ \ \ Last\ available\ \ \ \ \ \ \ :\ 0000000100000D95000000E7 \ \ Catalog\ information: \ \ \ \ Retention\ Policy\ \ \ \ \ :\ not\ enforced \ \ \ \ Previous\ Backup\ \ \ \ \ \ :\ 20150821T130001 \ \ \ \ Next\ Backup\ \ \ \ \ \ \ \ \ \ :\ \-\ (this\ is\ the\ latest\ base\ backup) \f[] .fi .TP .B list\-files \f[I][OPTIONS]\f[] \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] List all the files in a particular backup, identified by the server name and the backup ID. See the Backup ID shortcuts (#shortcuts) section below for available shortcuts. .RS .TP .B \-\-target \f[I]TARGET_TYPE\f[] Possible values for TARGET_TYPE are: .RS .IP \[bu] 2 \f[I]data\f[]: lists just the data files; .IP \[bu] 2 \f[I]standalone\f[]: lists the base backup files, including required WAL files; .IP \[bu] 2 \f[I]wal\f[]: lists all the WAL files between the start of the base backup and the end of the log / the start of the following base backup (depending on whether the specified base backup is the most recent one available); .IP \[bu] 2 \f[I]full\f[]: same as data + wal. .PP The default value is \f[C]standalone\f[]. .RE .RE .TP .B rebuild\-xlogdb \f[I]SERVER_NAME\f[] Perform a rebuild of the WAL file metadata for \f[C]SERVER_NAME\f[] (or every server, using the \f[C]all\f[] shortcut) guessing it from the disk content. The metadata of the WAL archive is contained in the \f[C]xlog.db\f[] file, and every Barman server has its own copy. .RS .RE .TP .B recover \f[I][OPTIONS]\f[] \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] \f[I]DESTINATION_DIRECTORY\f[] Recover a backup in a given directory (local or remote, depending on the \f[C]\-\-remote\-ssh\-command\f[] option settings). See the Backup ID shortcuts (#shortcuts) section below for available shortcuts. .RS .TP .B \-\-target\-tli \f[I]TARGET_TLI\f[] Recover the specified timeline. .RS .RE .TP .B \-\-target\-time \f[I]TARGET_TIME\f[] Recover to the specified time. .RS .PP You can use any valid unambiguous representation (e.g: "YYYY\-MM\-DD HH:MM:SS.mmm"). .RE .TP .B \-\-target\-xid \f[I]TARGET_XID\f[] Recover to the specified transaction ID. .RS .RE .TP .B \-\-target\-name \f[I]TARGET_NAME\f[] Recover to the named restore point previously created with the \f[C]pg_create_restore_point(name)\f[] (for PostgreSQL 9.1 and above users). .RS .RE .TP .B \-\-exclusive Set target xid to be non inclusive. .RS .RE .TP .B \-\-tablespace \f[I]NAME:LOCATION\f[] Specify tablespace relocation rule. .RS .RE .TP .B \-\-remote\-ssh\-command \f[I]SSH_COMMAND\f[] This options activates remote recovery, by specifying the secure shell command to be launched on a remote host. This is the equivalent of the "ssh_command" server option in the configuration file for remote recovery. Example: \[aq]ssh postgres\@db2\[aq]. .RS .RE .TP .B \-\-retry\-times Number of retries of data copy during base backup after an error. Overrides value of the parameter \f[C]basebackup_retry_times\f[], if present in the configuration file. .RS .RE .TP .B \-\-no\-retry Same as \f[C]\-\-retry\-times\ 0\f[] .RS .RE .TP .B \-\-retry\-sleep Number of seconds of wait after a failed copy, before retrying. Overrides value of the parameter \f[C]basebackup_retry_sleep\f[], if present in the configuration file. .RS .RE .RE .TP .B get\-wal \f[I][OPTIONS]\f[] \f[I]SERVER_NAME\f[] \f[I]WAL_ID\f[] Retrieve a WAL file from the \f[C]xlog\f[] archive of a given server. By default, the requested WAL file, if found, is returned as uncompressed content to \f[C]STDOUT\f[]. The following options allow users to change this behaviour: .RS .TP .B \-o \f[I]OUTPUT_DIRECTORY\f[] destination directory where the \f[C]get\-wal\f[] will deposit the requested WAL .RS .RE .TP .B \-j output will be compressed using gzip .RS .RE .TP .B \-x output will be compressed using bzip2 .RS .RE .RE .TP .B delete \f[I]SERVER_NAME\f[] \f[I]BACKUP_ID\f[] Delete the specified backup. Backup ID shortcuts (#shortcuts) section below for available shortcuts. .RS .RE .SH BACKUP ID SHORTCUTS .PP Rather than using the timestamp backup ID, you can use any of the following shortcuts/aliases to identity a backup for a given server: .TP .B first Oldest available backup for that server, in chronological order. .RS .RE .TP .B last Latest available backup for that server, in chronological order. .RS .RE .TP .B latest same ast \f[I]last\f[]. .RS .RE .TP .B oldest same ast \f[I]first\f[]. .RS .RE .SH EXIT STATUS .TP .B 0 Success .RS .RE .TP .B Not zero Failure .RS .RE .SH SEE ALSO .PP \f[C]barman\f[] (5). .SH BUGS .PP Barman has been extensively tested, and is currently being used in several production environments. However, we cannot exclude the presence of bugs. .PP Any bug can be reported via the Sourceforge bug tracker. Along the bug submission, users can provide developers with diagnostics information obtained through the \f[C]barman\ diagnose\f[] command. .SH AUTHORS .PP In alphabetical order: .IP \[bu] 2 Gabriele Bartolini (project leader) .IP \[bu] 2 Stefano Bianucci (developer) .IP \[bu] 2 Giuseppe Broccolo (QA/testing) .IP \[bu] 2 Giulio Calacoci (developer) .IP \[bu] 2 Francesco Canovai (QA/testing) .IP \[bu] 2 Gianni Ciolli (QA/testing) .IP \[bu] 2 Marco Nenciarini (lead developer) .PP Past contributors: .IP \[bu] 2 Carlo Ascani .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .SH COPYING .PP Barman is the exclusive property of 2ndQuadrant Italia and its code is distributed under GNU General Public License v3. .PP Copyright (C) 2011\-2015 2ndQuadrant Italia Srl \- . .SH AUTHORS 2ndQuadrant Italy . barman-1.5.1/doc/barman.50000644000076500000240000003020312621416313014343 0ustar mnenciastaff.\" Automatically generated by Pandoc 1.15.1.1 .\" .hy .TH "BARMAN" "5" "November 16, 2015" "Barman User manuals" "Version 1.5.1" .SH NAME .PP barman \- backup and recovery manager for PostgreSQL .SH CONFIGURATION FILE LOCATIONS .PP The system\-level Barman configuration file is located at .IP .nf \f[C] /etc/barman.conf \f[] .fi .PP or .IP .nf \f[C] /etc/barman/barman.conf \f[] .fi .PP and is overridden on a per\-user level by .IP .nf \f[C] $HOME/.barman.conf \f[] .fi .SH CONFIGURATION FILE SYNTAX .PP The Barman configuration file is a plain \f[C]INI\f[] file. There is a general section called \f[C][barman]\f[] and a section \f[C][servername]\f[] for each server you want to backup. Rows starting with \f[C];\f[] are comments. .SH CONFIGURATION FILE DIRECTORY .PP Barman supports the inclusion of multiple configuration files, through the \f[C]configuration_files_directory\f[] option. Included files must contain only server specifications, not global configurations. If the value of \f[C]configuration_files_directory\f[] is a directory, Barman reads all files with \f[C]\&.conf\f[] extension that exist in that folder. For example, if you set it to \f[C]/etc/barman.d\f[], you can specify your PostgreSQL servers placing each section in a separate \f[C]\&.conf\f[] file inside the \f[C]/etc/barman.d\f[] folder. .SH OPTIONS .TP .B active Ignored. Server. .RS .RE .TP .B description A human readable description of a server. Server. .RS .RE .TP .B ssh_command Command used by Barman to login to the Postgres server via ssh. Server. .RS .RE .TP .B conninfo Connection string used by Barman to connect to the Postgres server. Server. .RS .RE .TP .B barman_home Main data directory for Barman. Global. .RS .RE .TP .B barman_lock_directory Directory for locks. Default: \f[C]%(barman_home)s\f[]. Global. .RS .RE .TP .B backup_directory Directory where backup data for a server will be placed. Server. .RS .RE .TP .B basebackups_directory Directory where base backups will be placed. Server. .RS .RE .TP .B wals_directory Directory which contains WAL files. Server. .RS .RE .TP .B incoming_wals_directory Directory where incoming WAL files are archived into. Server. .RS .RE .TP .B lock_file Lock file for a backup in progress. Global/Server. .RS .RE .TP .B log_file Location of Barman\[aq]s log file. Global. .RS .RE .TP .B log_level Level of logging (DEBUG, INFO, WARNING, ERROR, CRITICAL). Global. .RS .RE .TP .B custom_compression_filter Compression algorithm applied to WAL files. Global/Server. .RS .RE .TP .B custom_decompression_filter Decompression algorithm applied to compressed WAL files; this must match the compression algorithm. Global/Server. .RS .RE .TP .B pre_backup_script Hook script launched before a base backup. Global/Server. .RS .RE .TP .B pre_backup_retry_script Hook script launched before a base backup, after \[aq]pre_backup_script\[aq]. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the backup operation. Global/Server. .RS .RE .TP .B post_backup_retry_script Hook script launched after a base backup. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post backup scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. .RS .RE .TP .B post_backup_script Hook script launched after a base backup, after \[aq]post_backup_retry_script\[aq]. Global/Server. .RS .RE .TP .B pre_archive_script Hook script launched before a WAL file is archived by maintenance. Global/Server. .RS .RE .TP .B pre_archive_retry_script Hook script launched before a WAL file is archived by maintenance, after \[aq]pre_archive_script\[aq]. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. Returning ABORT_STOP will propagate the failure at a higher level and interrupt the WAL archiving operation. Global/Server. .RS .RE .TP .B post_archive_retry_script Hook script launched after a WAL file is archived by maintenance. Being this a \f[I]retry\f[] hook script, Barman will retry the execution of the script until this either returns a SUCCESS (0), an ABORT_CONTINUE (62) or an ABORT_STOP (63) code. In a post archive scenario, ABORT_STOP has currently the same effects as ABORT_CONTINUE. Global/Server. .RS .RE .TP .B post_archive_script Hook script launched after a WAL file is archived by maintenance, after \[aq]post_archive_retry_script\[aq]. Global/Server. .RS .RE .TP .B minimum_redundancy Minimum number of backups to be retained. Default 0. Global/Server. .RS .RE .TP .B retention_policy Policy for retention of periodic backups and archive logs. If left empty, retention policies are not enforced. For redundancy based retention policy use "REDUNDANCY i" (where i is an integer > 0 and defines the number of backups to retain). For recovery window retention policy use "RECOVERY WINDOW OF i DAYS" or "RECOVERY WINDOW OF i WEEKS" or "RECOVERY WINDOW OF i MONTHS" where i is a positive integer representing, specifically, the number of days, weeks or months to retain your backups. For more detailed information, refer to the official documentation. Default value is empty. Global/Server. .RS .RE .TP .B wal_retention_policy Policy for retention of archive logs (WAL files). Currently only "MAIN" is available. Global/Server. .RS .RE .TP .B retention_policy_mode Currently only "auto" is implemented. Global/Server. .RS .RE .TP .B bandwidth_limit This option allows you to specify a maximum transfer rate in kilobytes per second. A value of zero specifies no limit (default). Global/Server. .RS .RE .TP .B tablespace_bandwidth_limit This option allows you to specify a maximum transfer rate in kilobytes per second, by specifying a comma separated list of tablespaces (pairs TBNAME:BWLIMIT). A value of zero specifies no limit (default). Global/Server. .RS .RE .TP .B immediate_checkpoint This option allows you to control the way PostgreSQL handles checkpoint at the start of the backup. If set to \f[C]false\f[] (default), the I/O workload for the checkpoint will be limited, according to the \f[C]checkpoint_completion_target\f[] setting on the PostgreSQL server. If set to \f[C]true\f[], an immediate checkpoint will be requested, meaning that PostgreSQL will complete the checkpoint as soon as possible. Global/Server. .RS .RE .TP .B network_compression This option allows you to enable data compression for network transfers. If set to \f[C]false\f[] (default), no compression is used. If set to \f[C]true\f[], compression is enabled, reducing network usage. Global/Server. .RS .RE .TP .B backup_options This option allows you to control the way Barman interacts with PostgreSQL for backups. If set to \f[C]exclusive_backup\f[] (default), \f[C]barman\ backup\f[] executes backup operations using the standard exclusive backup approach (technically through pg_start_backup/pg_stop_backup). If set to \f[C]concurrent_backup\f[], Barman requires the \f[C]pgespresso\f[] module to be installed on the PostgreSQL server (this allows you to perform a backup from a standby server). Global/Server. .RS .RE .TP .B last_backup_maximum_age This option identifies a time frame that must contain the latest backup. If the latest backup is older than the time frame, barman check command will report an error to the user. If empty (default), latest backup is always considered valid. Syntax for this option is: "i (DAYS | WEEKS | MONTHS)" where i is a integer greater than zero, representing the number of days | weeks | months of the time frame. Global/Server. .RS .RE .TP .B basebackup_retry_times Number of retries of base backup copy, after an error. Used during both backup and recovery operations. Positive integer, default 0. Global/Server. .RS .RE .TP .B basebackup_retry_sleep Number of seconds of wait after a failed copy, before retrying Used during both backup and recovery operations. Positive integer, default 30. Global/Server. .RS .RE .TP .B reuse_backup This option controls incremental backup support. Global/Server. Possible values are: * \f[C]off\f[]: disabled (default); * \f[C]copy\f[]: reuse the last available backup for a server and create a copy of the unchanged files (reduce backup time); * \f[C]link\f[]: reuse the last available backup for a server and create a hard link of the unchanged files (reduce backup time and space). Requires operating system and file system support for hard links. .RS .RE .TP .B recovery_options Options for recovery operations. Currently only supports \f[C]get\-wal\f[]. \f[C]get\-wal\f[] activates generation of a basic \f[C]restore_command\f[] in the resulting \f[C]recovery.conf\f[] file that uses the \f[C]barman\ get\-wal\f[] command to fetch WAL files directly from Barman\[aq]s archive of WALs. Comma separated list of values, default empty. Global/Server. .RS .RE .SH HOOK SCRIPTS .PP The script definition is passed to a shell and can return any exit code. .PP The shell environment will contain the following variables: .TP .B \f[C]BARMAN_CONFIGURATION\f[] configuration file used by barman .RS .RE .TP .B \f[C]BARMAN_ERROR\f[] error message, if any (only for the \[aq]post\[aq] phase) .RS .RE .TP .B \f[C]BARMAN_PHASE\f[] \[aq]pre\[aq] or \[aq]post\[aq] .RS .RE .TP .B \f[C]BARMAN_RETRY\f[] \f[C]1\f[] if it is a \f[I]retry script\f[] (from 1.5.0), \f[C]0\f[] if not .RS .RE .TP .B \f[C]BARMAN_SERVER\f[] name of the server .RS .RE .PP Backup scripts specific variables: .TP .B \f[C]BARMAN_BACKUP_DIR\f[] backup destination directory .RS .RE .TP .B \f[C]BARMAN_BACKUP_ID\f[] ID of the backup .RS .RE .TP .B \f[C]BARMAN_PREVIOUS_ID\f[] ID of the previous backup (if present) .RS .RE .TP .B \f[C]BARMAN_STATUS\f[] status of the backup .RS .RE .TP .B \f[C]BARMAN_VERSION\f[] version of Barman .RS .RE .PP Archive scripts specific variables: .TP .B \f[C]BARMAN_SEGMENT\f[] name of the WAL file .RS .RE .TP .B \f[C]BARMAN_FILE\f[] full path of the WAL file .RS .RE .TP .B \f[C]BARMAN_SIZE\f[] size of the WAL file .RS .RE .TP .B \f[C]BARMAN_TIMESTAMP\f[] WAL file timestamp .RS .RE .TP .B \f[C]BARMAN_COMPRESSION\f[] type of compression used for the WAL file .RS .RE .PP Only in case of retry hook scripts, the exit code of the script is checked by Barman. Output of hook scripts is simply written in the log file. .SH EXAMPLE .PP Here is an example of configuration file: .IP .nf \f[C] [barman] ;\ Main\ directory barman_home\ =\ /var/lib/barman ;\ System\ user barman_user\ =\ barman ;\ Log\ location log_file\ =\ /var/log/barman/barman.log ;\ Default\ compression\ level ;compression\ =\ gzip ;\ Incremental\ backup reuse_backup\ =\ link ;\ \[aq]main\[aq]\ PostgreSQL\ Server\ configuration [main] ;\ Human\ readable\ description description\ =\ \ "Main\ PostgreSQL\ Database" ;\ SSH\ options ssh_command\ =\ ssh\ postgres\@pg ;\ PostgreSQL\ connection\ string conninfo\ =\ host=pg\ user=postgres ;\ Minimum\ number\ of\ required\ backups\ (redundancy) minimum_redundancy\ =\ 1 ;\ Retention\ policy\ (based\ on\ redundancy) retention_policy\ =\ REDUNDANCY\ 2 \f[] .fi .SH SEE ALSO .PP \f[C]barman\f[] (1). .SH AUTHORS .PP In alphabetical order: .IP \[bu] 2 Gabriele Bartolini (project leader) .IP \[bu] 2 Stefano Bianucci (developer) .IP \[bu] 2 Giuseppe Broccolo (QA/testing) .IP \[bu] 2 Giulio Calacoci (developer) .IP \[bu] 2 Francesco Canovai (QA/testing) .IP \[bu] 2 Gianni Ciolli (QA/testing) .IP \[bu] 2 Marco Nenciarini (lead developer) .PP Past contributors: .IP \[bu] 2 Carlo Ascani .SH RESOURCES .IP \[bu] 2 Homepage: .IP \[bu] 2 Documentation: .SH COPYING .PP Barman is the exclusive property of 2ndQuadrant Italia and its code is distributed under GNU General Public License v3. .PP Copyright (C) 2011\-2015 2ndQuadrant Italia Srl \- http://www.2ndQuadrant.it/. .SH AUTHORS 2ndQuadrant Italy . barman-1.5.1/doc/barman.conf0000644000076500000240000000610212616416573015141 0ustar mnenciastaff; Barman, Backup and Recovery Manager for PostgreSQL ; http://www.pgbarman.org/ - http://www.2ndQuadrant.com/ ; ; Main configuration file [barman] ; Main directory barman_home = /var/lib/barman ; Locks directory - default: %(barman_home)s ;barman_lock_directory = /var/run/barman ; System user barman_user = barman ; Log location log_file = /var/log/barman/barman.log ; Default compression level: possible values are None (default), bzip2, gzip or custom ;compression = gzip ; Incremental backup support: possible values are None (default), link or copy ;reuse_backup = link ; Pre/post backup hook scripts ;pre_backup_script = env | grep ^BARMAN ;pre_backup_retry_script = env | grep ^BARMAN ;post_backup_retry_script = env | grep ^BARMAN ;post_backup_script = env | grep ^BARMAN ; Pre/post archive hook scripts ;pre_archive_script = env | grep ^BARMAN ;pre_archive_retry_script = env | grep ^BARMAN ;post_archive_retry_script = env | grep ^BARMAN ;post_archive_script = env | grep ^BARMAN ; Directory of configuration files. Place your sections in separate files with .conf extension ; For example place the 'main' server section in /etc/barman.d/main.conf ;configuration_files_directory = /etc/barman.d ; Minimum number of required backups (redundancy) - default 0 ;minimum_redundancy = 0 ; Global retention policy (REDUNDANCY or RECOVERY WINDOW) - default empty ;retention_policy = ; Global bandwidth limit in KBPS - default 0 (meaning no limit) ;bandwidth_limit = 4000 ; Immediate checkpoint for backup command - default false ;immediate_checkpoint = false ; Enable network compression for data transfers - default false ;network_compression = false ; Identify the standard behavior for backup operations: possible values are ; exclusive_backup (default), concurrent_backup ;backup_options = exclusive_backup ; Number of retries of data copy during base backup after an error - default 0 ;basebackup_retry_times = 0 ; Number of seconds of wait after a failed copy, before retrying - default 30 ;basebackup_retry_sleep = 30 ; Time frame that must contain the latest backup date. ; If the latest backup is older than the time frame, barman check ; command will report an error to the user. ; If empty, the latest backup is always considered valid. ; Syntax for this option is: "i (DAYS | WEEKS | MONTHS)" where i is an ; integer > 0 which identifies the number of days | weeks | months of ; validity of the latest backup for this check. Also known as 'smelly backup'. ;last_backup_maximum_age = ;; ; 'main' PostgreSQL Server configuration ;; [main] ;; ; Human readable description ;; description = "Main PostgreSQL Database" ;; ;; ; SSH options ;; ssh_command = ssh postgres@pg ;; ;; ; PostgreSQL connection string ;; conninfo = host=pg user=postgres ;; ;; ; Minimum number of required backups (redundancy) ;; ; minimum_redundancy = 1 ;; ;; ; Examples of retention policies ;; ;; ; Retention policy (disabled) ;; ; retention_policy = ;; ; Retention policy (based on redundancy) ;; ; retention_policy = REDUNDANCY 2 ;; ; Retention policy (based on recovery window) ;; ; retention_policy = RECOVERY WINDOW OF 4 WEEKS barman-1.5.1/INSTALL0000644000076500000240000000025312621416313013303 0ustar mnenciastaffBarman INSTALL instructions Copyright (C) 2011-2015 2ndQuadrant Italia Srl For further information, see the "Installation" section in the doc/barman-tutorial.en.md file. barman-1.5.1/LICENSE0000644000076500000240000010451312621123360013260 0ustar mnenciastaff GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . barman-1.5.1/MANIFEST.in0000644000076500000240000000040712621123360014006 0ustar mnenciastaffrecursive-include barman *.py recursive-include rpm * include doc/barman.1 doc/barman.5 doc/barman.conf include scripts/barman-wal-restore scripts/barman.bash_completion include AUTHORS NEWS ChangeLog LICENSE PKG-INFO MANIFEST.in MANIFEST setup.py INSTALL README barman-1.5.1/NEWS0000644000076500000240000003373112621416313012760 0ustar mnenciastaffBarman News - History of user-visible changes Copyright (C) 2011-2015 2ndQuadrant Italia Srl Version 1.5.1 - 16 Nov 2015 - Add support for the 'archive-wal' command which performs WAL maintenance operations on a given server - Add support for "per-server" concurrency of the 'cron' command - Improved management of xlog.db errors - Add support for mixed compression types in WAL files (SF.net#61) - Bug fixes: - Avoid retention policy checks during the recovery - Avoid 'wal_level' check on PostgreSQL version < 9.0 (#3) - Fix backup size calculation (#5) Version 1.5.0 - 28 Sep 2015 - Add support for the get-wal command which allows users to fetch any WAL file from the archive of a specific server - Add support for retry hook scripts, a special kind of hook scripts that Barman tries to run until they succeed - Add active configuration option for a server to temporarily disable the server by setting it to False - Add barman_lock_directory global option to change the location of lock files (by default: 'barman_home') - Execute the full suite of checks before starting a backup, and skip it in case one or more checks fail - Forbid to delete a running backup - Analyse include directives of a PostgreSQL server during backup and recover operations - Add check for conflicting paths in the configuration of Barman, both intra (by temporarily disabling a server) and inter-server (by refusing any command, to any server). - Add check for wal_level - Add barman-wal-restore script to be used as restore_command on a standby server, in conjunction with barman get-wal - Implement a standard and consistent policy for error management - Improved cache management of backups - Improved management of configuration in unit tests - Tutorial and man page sources have been converted to Markdown format - Add code documentation through Sphinx - Complete refactor of the code responsible for managing the backup and the recover commands - Changed internal directory structure of a backup - Introduce copy_method option (currently fixed to rsync) - Bug fixes: - Manage options without '=' in PostgreSQL configuration files - Preserve Timeline history files (Fixes: #70) - Workaround for rsync on SUSE Linux (Closes: #13 and #26) - Disables dangerous settings in postgresql.auto.conf (Closes: #68) Version 1.4.1 - 05 May 2015 * Fix for WAL archival stop working if first backup is EMPTY (Closes: #64) * Fix exception during error handling in Barman recovery (Closes: #65) * After a backup, limit cron activity to WAL archiving only (Closes: #62) * Improved robustness and error reporting of the backup delete command (Closes: #63) * Fix computation of WAL production ratio as reported in the show-backup command * Improved management of xlogb file, which is now correctly fsynced when updated. Also, the rebuild-xlogdb command now operates on a temporary new file, which overwrites the main one when finished. * Add unit tests for dateutil module compatibility * Modified Barman version following PEP 440 rules and added support of tests in Python 3.4 Version 1.4.0 - 26 Jan 2015 * Incremental base backup implementation through the reuse_backup global/server option. Possible values are off (disabled, default), copy (preventing unmodified files from being transferred) and link (allowing for deduplication through hard links). * Store and show deduplication effects when using reuse_backup= link. * Added transparent support of pg_stat_archiver (PostgreSQL 9.4) in check, show-server and status commands. * Improved administration by invoking WAL maintenance at the end of a successful backup. * Changed the way unused WAL files are trashed, by differentiating between concurrent and exclusive backup cases. * Improved performance of WAL statistics calculation. * Treat a missing pg_ident.conf as a WARNING rather than an error. * Refactored output layer by removing remaining yield calls. * Check that rsync is in the system path. * Include history files in WAL management. * Improved robustness through more unit tests. * Fixed bug #55: Ignore fsync EINVAL errors on directories. * Fixed bug #58: retention policies delete. Version 1.3.3 - 21 Aug 2014 * Added "last_backup_max_age", a new global/server option that allows administrators to set the max age of the last backup in a catalogue, making it easier to detect any issues with periodical backup execution * Improved robustness of "barman backup" by introducing two global/ server options: "basebackup_retry_times" and "basebackup_retry_sleep". These options allow an administrator to specify, respectively, the number of attempts for a copy operation after a failure, and the number of seconds of wait before retrying * Improved the recovery process via rsync on an existing directory (incremental recovery), by splitting the previous rsync call into several ones - invoking checksum control only when necessary * Added support for PostgreSQL 8.3 * Minor changes: + Support for comma separated list values configuration options + Improved backup durability by calling fsync() on backup and WAL files during "barman backup" and "barman cron" + Improved Nagios output for "barman check --nagios" + Display compression ratio for WALs in "barman show-backup" + Correctly handled keyboard interruption (CTRL-C) while performing barman backup + Improved error messages of failures regarding the stop of a backup + Wider coverage of unit tests * Bug fixes: + Copies "recovery.conf" on the remote server during "barman recover" (#45) + Correctly detect pre/post archive hook scripts (#41) Version 1.3.2 - 15 Apr 2014 * Fixed incompatibility with PostgreSQL 8.4 (Closes #40, bug introduced in version 1.3.1) Version 1.3.1 - 14 Apr 2014 * Added support for concurrent backup of PostgreSQL 9.2 and 9.3 servers that use the "pgespresso" extension. This feature is controlled by the "backup_options" configuration option (global/ server) and activated when set to "concurrent_backup". Concurrent backup allows DBAs to perform full backup operations from a streaming replicated standby. * Added the "barman diagnose" command which prints important information about the Barman system (extremely useful for support and problem solving) * Improved error messages and exception handling interface * Fixed bug in recovery of tablespaces that are created inside the PGDATA directory (bug introduced in version 1.3.0) * Fixed minor bug of unhandled -q option, for quiet mode of commands to be used in cron jobs (bug introduced in version 1.3.0) * Minor bug fixes and code refactoring Version 1.3.0 - 3 Feb 2014 * Refactored BackupInfo class for backup metadata to use the new FieldListFile class (infofile module) * Refactored output layer to use a dedicated module, in order to facilitate integration with Nagios (NagiosOutputWriter class) * Refactored subprocess handling in order to isolate stdin/stderr/ stdout channels (command_wrappers module) * Refactored hook scripts management * Extracted logging configuration and userid enforcement from the configuration class. * Support for hook scripts to be executed before and after a WAL file is archived, through the 'pre_archive_script' and 'post_archive_script' configuration options. * Implemented immediate checkpoint capability with --immediate-checkpoint command option and 'immediate_checkpoint' configuration option * Implemented network compression for remote backup and recovery through the 'network_compression' configuration option (#19) * Implemented the 'rebuild-xlogdb' command (Closes #27 and #28) * Added deduplication of tablespaces located inside the PGDATA directory * Refactored remote recovery code to work the same way local recovery does, by performing remote directory preparation (assuming the remote user has the right permissions on the remote server) * 'barman backup' now tries and create server directories before attempting to execute a full backup (#14) * Fixed bug #22: improved documentation for tablespaces relocation * Fixed bug #31: 'barman cron' checks directory permissions for lock file * Fixed bug #32: xlog.db read access during cron activities Version 1.2.3 - 5 September 2013 * Added support for PostgreSQL 9.3 * Added support for the "--target-name" recovery option, which allows to restore to a named point previously specified with pg_create_restore_point (only for PostgreSQL 9.1 and above users) * Fixed bug #27 about flock() usage with barman.lockfile (many thanks to Damon Snyder ) * Introduced Python 3 compatibility Version 1.2.2 - 24 June 2013 * Fix python 2.6 compatibility Version 1.2.1 - 17 June 2013 * Added the "bandwidth_limit" global/server option which allows to limit the I/O bandwidth (in KBPS) for backup and recovery operations * Added the "tablespace_bandwidth_limit" global/server option which allows to limit the I/O bandwidth (in KBPS) for backup and recovery operations on a per tablespace basis * Added /etc/barman/barman.conf as default location * Bug fix: avoid triggering the minimum_redundancy check on FAILED backups (thanks to Jérôme Vanandruel) Version 1.2.0 - 31 Jan 2013 * Added the "retention_policy_mode" global/server option which defines the method for enforcing retention policies (currently only "auto") * Added the "minimum_redundancy" global/server option which defines the minimum number of backups to be kept for a server * Added the "retention_policy" global/server option which defines retention policies management based on redunancy (e.g. REDUNDANCY 4) or recovery window (e.g. RECOVERY WINDOW OF 3 MONTHS) * Added retention policy support to the logging infrastructure, the "check" and the "status" commands * The "check" command now integrates minimum redundancy control * Added retention policy states (valid, obsolete and potentially obsolete) to "show-backup" and "list-backup" commands * The 'all' keyword is now forbidden as server name * Added basic support for Nagios plugin output to the 'check' command through the --nagios option * Barman now requires argh => 0.21.2 and argcomplete- * Minor bug fixes Version 1.1.2 - 29 Nov 2012 * Added "configuration_files_directory" option that allows to include multiple server configuration files from a directory * Support for special backup IDs: latest, last, oldest, first * Management of multiple servers to the 'list-backup' command. 'barman list-backup all' now list backups for all the configured servers. * Added "application_name" management for PostgreSQL >= 9.0 * Fixed bug #18: ignore missing WAL files if not found during delete Version 1.1.1 - 16 Oct 2012 * Fix regressions in recover command. Version 1.1.0 - 12 Oct 2012 * Support for hook scripts to be executed before and after a 'backup' command through the 'pre_backup_script' and 'post_backup_script' configuration options. * Management of multiple servers to the 'backup' command. 'barman backup all' now iteratively backs up all the configured servers. * Fixed bug #9: "9.2 issue with pg_tablespace_location()" * Add warning in recovery when file location options have been defined in the postgresql.conf file (issue #10) * Fail fast on recover command if the destination directory contains the ':' character (Closes: #4) or if an invalid tablespace relocation rule is passed * Report an informative message when pg_start_backup() invocation fails because an exclusive backup is already running (Closes: #8) Version 1.0.0 - 6 July 2012 * Backup of multiple PostgreSQL servers, with different versions. Versions from PostgreSQL 8.4+ are supported. * Support for secure remote backup (through SSH) * Management of a catalog of backups for every server, allowing users to easily create new backups, delete old ones or restore them * Compression of WAL files that can be configured on a per server basis using compression/decompression filters, both predefined (gzip and bzip2) or custom * Support for INI configuration file with global and per-server directives. Default location for configuration files are /etc/barman.conf or ~/.barman.conf. The '-c' option allows users to specify a different one * Simple indexing of base backups and WAL segments that does not require a local database * Maintenance mode (invoked through the 'cron' command) which performs ordinary operations such as WAL archival and compression, catalog updates, etc. * Added the 'backup' command which takes a full physical base backup of the given PostgreSQL server configured in Barman * Added the 'recover' command which performs local recovery of a given backup, allowing DBAs to specify a point in time. The 'recover' command supports relocation of both the PGDATA directory and, where applicable, the tablespaces * Added the '--remote-ssh-command' option to the 'recover' command for remote recovery of a backup. Remote recovery does not currently support relocation of tablespaces * Added the 'list-server' command that lists all the active servers that have been configured in barman * Added the 'show-server' command that shows the relevant information for a given server, including all configuration options * Added the 'status' command which shows information about the current state of a server, including Postgres version, current transaction ID, archive command, etc. * Added the 'check' command which returns 0 if everything Barman needs is functioning correctly * Added the 'list-backup' command that lists all the available backups for a given server, including size of the base backup and total size of the related WAL segments * Added the 'show-backup' command that shows the relevant information for a given backup, including time of start, size, number of related WAL segments and their size, etc. * Added the 'delete' command which removes a backup from the catalog * Added the 'list-files' command which lists all the files for a single backup * RPM Package for RHEL 5/6 barman-1.5.1/PKG-INFO0000644000076500000240000000300012621417067013347 0ustar mnenciastaffMetadata-Version: 1.0 Name: barman Version: 1.5.1 Summary: Backup and Recovery Manager for PostgreSQL Home-page: http://www.pgbarman.org/ Author: 2ndQuadrant Italia Srl Author-email: info@2ndquadrant.it License: GPL-3.0 Description: Barman (Backup and Recovery Manager) is an open source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments and help DBAs during the recovery phase. Barman's most requested features include backup catalogues, incremental backup, retention policies, remote backup and recovery, archiving and compression of WAL files and backups. Barman is written and maintained by PostgreSQL professionals 2ndQuadrant. Platform: Linux Platform: Mac OS X Classifier: Environment :: Console Classifier: Development Status :: 5 - Production/Stable Classifier: Topic :: System :: Archiving :: Backup Classifier: Topic :: Database Classifier: Topic :: System :: Recovery Tools Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+) Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 barman-1.5.1/README0000644000076500000240000000414512621416313013136 0ustar mnenciastaff# Barman, Backup and Recovery Manager for PostgreSQL Barman (Backup and Recovery Manager) is an open source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments and to help DBAs during the recovery phase. Barman's most requested features include backup catalogues, incremental backup, retention policies, remote backup and recovery, archiving and compression of WAL files and backups. Barman is maintained by 2ndQuadrant and is distributed under GNU GPL 3. For further information, look at the "Web resources" section below. ## Source content Here you can find a description of files and directory distributed with Barman: * AUTHORS : development team of Barman * ChangeLog : log of changes * LICENSE : GNU GPL3 details * TODO : our wishlist for Barman * barman : sources in Python * doc : tutorial and man pages * rpm : SPEC files for RHEL distributions * tests : unit tests ## Web resources * Website : http://www.pgbarman.org/ * Documentation : http://www.pgbarman.org/documentation/ * Man page, section 1 : http://docs.pgbarman.org/barman.1.html * Man page, section 5 : http://docs.pgbarman.org/barman.5.html * Community support : http://www.pgbarman.org/support/ * Professional support : http://www.2ndquadrant.com/ * pgespresso extension : https://github.com/2ndquadrant-it/pgespresso ## Licence Copyright (C) 2011-2015 2ndQuadrant Italia Srl Barman is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Barman is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Barman. If not, see . barman-1.5.1/rpm/0000755000076500000240000000000012621417067013057 5ustar mnenciastaffbarman-1.5.1/rpm/barman.spec0000644000076500000240000001516712621416525015203 0ustar mnenciastaff%if 0%{?rhel} == 7 %global pybasever 2.7 %else %if 0%{?fedora}>=21 %global pybasever 2.7 %else %global pybasever 2.6 %endif %endif %if 0%{?rhel} == 5 %global with_python26 1 %endif %if 0%{?with_python26} %global __python_ver python26 %global __python %{_bindir}/python%{pybasever} %global __os_install_post %{__multiple_python_os_install_post} %else %global __python_ver python %endif %global main_version 1.5.1 # comment out the next line if not a pre-release (use '#%%global ...') #%%global extra_version a1 # Usually 1 - unique sequence for all pre-release version %global package_release 1 %{!?pybasever: %define pybasever %(%{__python} -c "import sys;print(sys.version[0:3])")} %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?python_sitearch: %define python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} Summary: Backup and Recovery Manager for PostgreSQL Name: barman Version: %{main_version} Release: %{?extra_version:0.}%{package_release}%{?extra_version:.%{extra_version}}%{?dist} License: GPLv3 Group: Applications/Databases Url: http://www.pgbarman.org/ Source0: %{name}-%{version}%{?extra_version:%{extra_version}}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot-%(%{__id_u} -n) BuildArch: noarch Vendor: 2ndQuadrant Italia Srl Requires: python-abi = %{pybasever}, %{__python_ver}-psycopg2, %{__python_ver}-argh >= 0.21.2, %{__python_ver}-argcomplete, %{__python_ver}-dateutil Requires: /usr/sbin/useradd Requires: rsync >= 3.0.4 %description Barman (Backup and Recovery Manager) is an open source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments and help DBAs during the recovery phase. Barman's most requested features include backup catalogues, incremental backup, retention policies, remote backup and recovery, archiving and compression of WAL files and backups. Barman is distributed under GNU GPL 3. %prep %setup -n barman-%{version}%{?extra_version:%{extra_version}} -q %build %{__python} setup.py build cat > barman.cron << EOF # m h dom mon dow user command * * * * * barman [ -x %{_bindir}/barman ] && %{_bindir}/barman -q cron EOF cat > barman.logrotate << EOF /var/log/barman/barman.log { missingok notifempty create 0600 barman barman } EOF %install %{__python} setup.py install -O1 --skip-build --root %{buildroot} mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d mkdir -p %{buildroot}%{_sysconfdir}/cron.d/ mkdir -p %{buildroot}%{_sysconfdir}/logrotate.d/ mkdir -p %{buildroot}/var/lib/barman mkdir -p %{buildroot}/var/log/barman install -pm 644 doc/barman.conf %{buildroot}%{_sysconfdir}/barman.conf install -pm 644 scripts/barman.bash_completion %{buildroot}%{_sysconfdir}/bash_completion.d/barman install -pm 644 barman.cron %{buildroot}%{_sysconfdir}/cron.d/barman install -pm 644 barman.logrotate %{buildroot}%{_sysconfdir}/logrotate.d/barman touch %{buildroot}/var/log/barman/barman.log %clean rm -rf %{buildroot} %files %defattr(-,root,root) %doc INSTALL NEWS README %{python_sitelib}/%{name}-%{version}%{?extra_version:%{extra_version}}-py%{pybasever}.egg-info %{python_sitelib}/%{name}/ %{_bindir}/%{name} %doc %{_mandir}/man1/%{name}.1.gz %doc %{_mandir}/man5/%{name}.5.gz %config(noreplace) %{_sysconfdir}/bash_completion.d/ %config(noreplace) %{_sysconfdir}/%{name}.conf %config(noreplace) %{_sysconfdir}/cron.d/%{name} %config(noreplace) %{_sysconfdir}/logrotate.d/%{name} %attr(700,barman,barman) %dir /var/lib/%{name} %attr(755,barman,barman) %dir /var/log/%{name} %attr(600,barman,barman) %ghost /var/log/%{name}/%{name}.log %pre groupadd -f -r barman >/dev/null 2>&1 || : useradd -M -n -g barman -r -d /var/lib/barman -s /bin/bash \ -c "Backup and Recovery Manager for PostgreSQL" barman >/dev/null 2>&1 || : %changelog * Mon Nov 16 2015 - Marco Nenciarini 1.5.1-1 - New release 1.5.1 * Mon Sep 28 2015 - Francesco Canovai 1.5.0-1 - New release 1.5.0 * Tue Sep 15 2015 - Francesco Canovai 1.5.0-0.1.a1 - New release 1.5.0 alpha 1 * Tue May 5 2015 - Francesco Canovai 1.4.1-1 - New release 1.4.1 * Mon Jan 26 2015 - Francesco Canovai 1.4.0-1 - New release 1.4.0 * Mon Jan 12 2015 - Francesco Canovai 1.4.0-0.1.alpha.1 - New release 1.4.0-alpha.1 * Thu Aug 21 2014 - Marco Nenciarini 1.3.3-1 - New release 1.3.3 * Tue Jun 24 2014 - Marco Nenciarini 1.3.3-0.1.alpha.1 - New release 1.3.3-alpha.1 * Tue Apr 15 2014 - Marco Nenciarini 1.3.2-1 - New release 1.3.2 * Mon Apr 14 2014 - Marco Nenciarini 1.3.1-1 - New release 1.3.1 * Mon Feb 3 2014 - Marco Nenciarini 1.3.0-1 - New release 1.3.0 * Thu Sep 5 2013 - Marco Nenciarini 1.2.3-1 - New release 1.2.3 * Mon Jun 24 2013 - Marco Nenciarini 1.2.2-1 - New release 1.2.2 * Mon Jun 17 2013 - Marco Nenciarini 1.2.1-1 - New release 1.2.1 * Thu Jan 31 2013 - Marco Nenciarini 1.2.0-1 - New release 1.2.0 - Depend on python-argh >= 0.21.2 and python-argcomplete * Thu Nov 29 2012 - Marco Nenciarini 1.1.2-1 - New release 1.1.2 * Tue Oct 16 2012 - Marco Nenciarini 1.1.1-1 - New release 1.1.1 * Fri Oct 12 2012 - Marco Nenciarini 1.1.0-1 - New release 1.1.0 - Some improvements from Devrim Gunduz * Fri Jul 6 2012 - Marco Nenciarini 1.0.0-1 - Open source release * Thu May 17 2012 - Marco Nenciarini 0.99.0-5 - Fixed exception handling and documentation * Thu May 17 2012 - Marco Nenciarini 0.99.0-4 - Fixed documentation * Tue May 15 2012 - Marco Nenciarini 0.99.0-3 - Fixed cron job * Tue May 15 2012 - Marco Nenciarini 0.99.0-2 - Add cron job * Wed May 9 2012 - Marco Nenciarini 0.99.0-1 - Update to version 0.99.0 * Tue Dec 6 2011 - Marco Nenciarini 0.3.1-1 - Initial packaging. barman-1.5.1/rpm/rhel5/0000755000076500000240000000000012621417067014076 5ustar mnenciastaffbarman-1.5.1/rpm/rhel5/python-dateutil-1.4.1-remove-embedded-timezone-data.patch0000644000076500000240000000636712404074014026473 0ustar mnenciastaffdiff -up python-dateutil-1.4.1/dateutil/tz.py.remove-embedded-timezone-data python-dateutil-1.4.1/dateutil/tz.py --- python-dateutil-1.4.1/dateutil/tz.py.remove-embedded-timezone-data 2008-02-27 20:45:41.000000000 -0500 +++ python-dateutil-1.4.1/dateutil/tz.py 2010-07-13 14:40:30.228122861 -0400 @@ -930,9 +930,6 @@ def gettz(name=None): except OSError: pass if not tz: - from dateutil.zoneinfo import gettz - tz = gettz(name) - if not tz: for c in name: # name must have at least one offset to be a tzstr if c in "0123456789": diff -up python-dateutil-1.4.1/dateutil/zoneinfo/__init__.py.remove-embedded-timezone-data python-dateutil-1.4.1/dateutil/zoneinfo/__init__.py --- python-dateutil-1.4.1/dateutil/zoneinfo/__init__.py.remove-embedded-timezone-data 2005-12-22 13:13:50.000000000 -0500 +++ python-dateutil-1.4.1/dateutil/zoneinfo/__init__.py 2010-07-13 14:40:30.228122861 -0400 @@ -3,6 +3,10 @@ Copyright (c) 2003-2005 Gustavo Niemeye This module offers extensions to the standard python 2.3+ datetime module. + +This version of the code has been modified to remove the embedded copy +of zoneinfo-2008e.tar.gz and instead use the system data from the tzdata +package """ from dateutil.tz import tzfile from tarfile import TarFile @@ -13,49 +17,12 @@ __license__ = "PSF License" __all__ = ["setcachesize", "gettz", "rebuild"] -CACHE = [] -CACHESIZE = 10 - -class tzfile(tzfile): - def __reduce__(self): - return (gettz, (self._filename,)) - -def getzoneinfofile(): - filenames = os.listdir(os.path.join(os.path.dirname(__file__))) - filenames.sort() - filenames.reverse() - for entry in filenames: - if entry.startswith("zoneinfo") and ".tar." in entry: - return os.path.join(os.path.dirname(__file__), entry) - return None - -ZONEINFOFILE = getzoneinfofile() - -del getzoneinfofile - def setcachesize(size): - global CACHESIZE, CACHE - CACHESIZE = size - del CACHE[size:] + pass def gettz(name): - tzinfo = None - if ZONEINFOFILE: - for cachedname, tzinfo in CACHE: - if cachedname == name: - break - else: - tf = TarFile.open(ZONEINFOFILE) - try: - zonefile = tf.extractfile(name) - except KeyError: - tzinfo = None - else: - tzinfo = tzfile(zonefile) - tf.close() - CACHE.insert(0, (name, tzinfo)) - del CACHE[CACHESIZE:] - return tzinfo + from dateutil.tz import gettz + return gettz(name) def rebuild(filename, tag=None, format="gz"): import tempfile, shutil diff -up python-dateutil-1.4.1/MANIFEST.in.remove-embedded-timezone-data python-dateutil-1.4.1/MANIFEST.in --- python-dateutil-1.4.1/MANIFEST.in.remove-embedded-timezone-data 2010-07-13 14:42:07.974118722 -0400 +++ python-dateutil-1.4.1/MANIFEST.in 2010-07-13 14:42:14.409994960 -0400 @@ -1,4 +1,4 @@ -recursive-include dateutil *.py *.tar.* +recursive-include dateutil *.py recursive-include sandbox *.py include setup.py setup.cfg MANIFEST.in README LICENSE NEWS Makefile include test.py example.py barman-1.5.1/rpm/rhel5/python26-argcomplete.spec0000644000076500000240000000417012541044263020740 0ustar mnenciastaff# Use Python 2.6 %global pybasever 2.6 %global __python_ver 26 %global __python %{_bindir}/python%{pybasever} %global __os_install_post %{__multiple_python_os_install_post} %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?python_sitearch: %define python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} Summary: Bash tab completion for argparse Name: python%{__python_ver}-argcomplete Version: 0.3.5 Release: 1%{?dist} License: ASL 2.0 Group: Development/Libraries Url: https://github.com/kislyuk/argcomplete Source0: http://pypi.python.org/packages/source/a/argcomplete/argcomplete-%{version}.tar.gz BuildRequires: python%{__python_ver}-devel,python%{__python_ver}-setuptools BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot-%(%{__id_u} -n) BuildArch: noarch Requires: python-abi = %(%{__python} -c "import sys ; print sys.version[:3]") %if "%{__python_ver}" == "26" Requires: python%{__python_ver}-argparse %endif %description Argcomplete provides easy, extensible command line tab completion of arguments for your Python script. It makes two assumptions: * You're using bash as your shell * You're using argparse to manage your command line arguments/options Argcomplete is particularly useful if your program has lots of options or subparsers, and if your program can dynamically suggest completions for your argument/option values (for example, if the user is browsing resources over the network). %prep %setup -n argcomplete-%{version} -q %build %{__python} setup.py build %install %{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root) %doc README.rst %{python_sitelib}/argcomplete-%{version}-py%{pybasever}.egg-info %{python_sitelib}/argcomplete/ %{_bindir}/activate-global-python-argcomplete %{_bindir}/python-argcomplete-check-easy-install-script %{_bindir}/register-python-argcomplete %changelog * Thu Jan 31 2013 - Marco Neciarini 0.3.5-1 - Initial packaging. barman-1.5.1/rpm/rhel5/python26-argh.spec0000644000076500000240000000376212541044263017365 0ustar mnenciastaff# Use Python 2.6 %global pybasever 2.6 %global __python_ver 26 %global __python %{_bindir}/python%{pybasever} %global __os_install_post %{__multiple_python_os_install_post} %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?python_sitearch: %define python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} Summary: A simple argparse wrapper Name: python%{__python_ver}-argh Version: 0.23.0 Release: 1%{?dist} License: LGPLv3 Group: Development/Libraries Url: http://bitbucket.org/neithere/argh/ Source0: http://pypi.python.org/packages/source/a/argh/argh-%{version}.tar.gz BuildRequires: python%{__python_ver}-devel,python%{__python_ver}-setuptools BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot-%(%{__id_u} -n) BuildArch: noarch Requires: python-abi = %(%{__python} -c "import sys ; print sys.version[:3]") %if "%{__python_ver}" == "26" Requires: python%{__python_ver}-argparse %endif %description Agrh, argparse! =============== Did you ever say "argh" trying to remember the details of optparse or argparse API? If yes, this package may be useful for you. It provides a very simple wrapper for argparse with support for hierarchical commands that can be bound to modules or classes. Argparse can do it; argh makes it easy. %prep %setup -n argh-%{version} -q %build %{__python} setup.py build %install %{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root) %doc README %{python_sitelib}/argh-%{version}-py%{pybasever}.egg-info %{python_sitelib}/argh/ %changelog * Thu Jan 31 2013 - Marco Neciarini 0.23.0-1 - Update to version 0.23.0 * Wed May 9 2012 - Marco Neciarini 0.15.0-1 - Update to version 0.15.0 * Sat Dec 4 2011 - Marco Neciarini 0.14.2-1 - Initial packaging. barman-1.5.1/rpm/rhel5/python26-dateutil.spec0000644000076500000240000001014412404074014020242 0ustar mnenciastaff# Use Python 2.6 %global pybasever 2.6 %global __python_ver 26 %global __python %{_bindir}/python%{pybasever} %global __os_install_post %{__multiple_python_os_install_post} %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?python_sitearch: %define python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} Name: python%{__python_ver}-dateutil Version: 1.4.1 Release: 6%{?dist} Summary: Powerful extensions to the standard datetime module Group: Development/Languages License: Python URL: http://labix.org/python-dateutil Source0: http://labix.org/download/python-dateutil/python-dateutil-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) # Redirect the exposed parts of the dateutil.zoneinfo API to remove references # to the embedded copy of zoneinfo-2008e.tar.gz and instead use the system # data from the "tzdata" package (rhbz#559309): Patch0: python-dateutil-1.4.1-remove-embedded-timezone-data.patch BuildArch: noarch BuildRequires: python%{__python_ver}-devel,python%{__python_ver}-setuptools Requires: tzdata %description The dateutil module provides powerful extensions to the standard datetime module available in Python 2.3+. %prep %setup -n python-dateutil-%{version} -q # Remove embedded copy of timezone data: %patch0 -p1 rm dateutil/zoneinfo/zoneinfo-2008e.tar.gz # Change encoding of NEWS file to UTF-8, preserving timestamp: iconv -f ISO-8859-1 -t utf8 NEWS > NEWS.utf8 && \ touch -r NEWS NEWS.utf8 && \ mv NEWS.utf8 NEWS %build %{__python} setup.py build %install rm -rf $RPM_BUILD_ROOT %{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT %check %{__python} test.py %files %defattr(-,root,root,-) %doc example.py LICENSE NEWS README %{python_sitelib}/dateutil/ %{python_sitelib}/*.egg-info %changelog * Tue Jul 13 2010 David Malcolm - 1.4.1-6 - remove embedded copy of timezone data, and redirect the dateutil.zoneinfo API accordingly Resolves: rhbz#559309 - add a %%check, running the upstream selftest suite * Tue Jul 13 2010 David Malcolm - 1.4.1-5 - add requirement on tzdata Resolves: rhbz#559309 - fix encoding of the NEWS file * Mon Nov 30 2009 Dennis Gregorovic - 1.4.1-4.1 - Rebuilt for RHEL 6 * Sun Jul 26 2009 Fedora Release Engineering - 1.4.1-4 - Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild * Thu Feb 26 2009 Fedora Release Engineering - 1.4.1-3 - Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild * Fri Feb 20 2009 Jef Spaleta - 1.4.1-2 - small specfile fix * Fri Feb 20 2009 Jef Spaleta - 1.4.1-2 - New upstream version * Sat Nov 29 2008 Ignacio Vazquez-Abrams - 1.4-3 - Rebuild for Python 2.6 * Fri Aug 29 2008 Tom "spot" Callaway - 1.4-2 - fix license tag * Tue Jul 01 2008 Jef Spaleta 1.4-1 - Latest upstream release * Fri Jan 04 2008 Jef Spaleta 1.2-2 - Fix for egg-info file creation * Thu Jun 28 2007 Orion Poplawski 1.2-1 - Update to 1.2 * Mon Dec 11 2006 Jef Spaleta 1.1-5 - Fix python-devel BR, as per discussion in maintainers-list * Mon Dec 11 2006 Jef Spaleta 1.1-4 - Release bump for rebuild against python 2.5 in devel tree * Wed Jul 26 2006 Orion Poplawski 1.1-3 - Add patch to fix building on x86_64 * Wed Feb 15 2006 Orion Poplawski 1.1-2 - Rebuild for gcc/glibc changes * Thu Dec 22 2005 Orion Poplawski 1.1-1 - Update to 1.1 * Thu Jul 28 2005 Orion Poplawski 1.0-1 - Update to 1.0 * Tue Jul 05 2005 Orion Poplawski 0.9-1 - Initial Fedora Extras package barman-1.5.1/rpm/rhel5/python26-psycopg2.spec0000644000076500000240000001400012404074014020170 0ustar mnenciastaff# Use Python 2.6 %global pybasever 2.6 %global __python_ver 26 %global __python %{_bindir}/python%{pybasever} %global __os_install_post %{__multiple_python_os_install_post} %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?python_sitearch: %define python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} %define ZPsycopgDAdir %{_localstatedir}/lib/zope/Products/ZPsycopgDA %global pgmajorversion 90 %global pginstdir /usr/pgsql-9.0 %global sname psycopg2 Summary: A PostgreSQL database adapter for Python Name: python26-%{sname} Version: 2.4.5 Release: 1%{?dist} License: LGPLv3 with exceptions Group: Applications/Databases Url: http://www.psycopg.org/psycopg/ Source0: http://initd.org/psycopg/tarballs/PSYCOPG-2-4/%{sname}-%{version}.tar.gz Patch0: setup.cfg.patch BuildRequires: python%{__python_ver}-devel postgresql%{pgmajorversion}-devel BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot-%(%{__id_u} -n) Requires: python-abi = %(%{__python} -c "import sys ; print sys.version[:3]") %description psycopg is a PostgreSQL database adapter for the Python programming language (just like pygresql and popy.) It was written from scratch with the aim of being very small and fast, and stable as a rock. The main advantages of psycopg are that it supports the full Python DBAPI-2.0 and being thread safe at level 2. %package doc Summary: Documentation for psycopg python PostgreSQL database adapter Group: Documentation Requires: %{name} = %{version}-%{release} %description doc Documentation and example files for the psycopg python PostgreSQL database adapter. %package test Summary: Tests for psycopg2 Group: Development/Libraries Requires: %{name} = %{version}-%{release} %description test Tests for psycopg2. %package zope Summary: Zope Database Adapter ZPsycopgDA Group: Applications/Databases Requires: %{name} = %{version}-%{release} zope %description zope Zope Database Adapter for PostgreSQL, called ZPsycopgDA %prep %setup -q -n psycopg2-%{version} %patch0 -p0 %build %{__python} setup.py build # Fix for wrong-file-end-of-line-encoding problem; upstream also must fix this. for i in `find doc -iname "*.html"`; do sed -i 's/\r//' $i; done for i in `find doc -iname "*.css"`; do sed -i 's/\r//' $i; done %install rm -Rf %{buildroot} mkdir -p %{buildroot}%{python_sitearch}/psycopg2 %{__python} setup.py install --no-compile --root %{buildroot} install -d %{buildroot}%{ZPsycopgDAdir} cp -pr ZPsycopgDA/* %{buildroot}%{ZPsycopgDAdir} %clean rm -rf %{buildroot} %files %defattr(-,root,root) %doc AUTHORS ChangeLog INSTALL LICENSE README %dir %{python_sitearch}/psycopg2 %{python_sitearch}/psycopg2/*.py %{python_sitearch}/psycopg2/*.pyc %{python_sitearch}/psycopg2/*.so %{python_sitearch}/psycopg2/*.pyo %{python_sitearch}/psycopg2-*.egg-info %files doc %defattr(-,root,root) %doc doc examples/ %files test %defattr(-,root,root) %{python_sitearch}/%{sname}/tests/* %files zope %defattr(-,root,root) %dir %{ZPsycopgDAdir} %{ZPsycopgDAdir}/*.py %{ZPsycopgDAdir}/*.pyo %{ZPsycopgDAdir}/*.pyc %{ZPsycopgDAdir}/dtml/* %{ZPsycopgDAdir}/icons/* %changelog * Wed May 9 2012 - Marco Neciarini 2.4.5-1 - Update to version 2.4.5 * Mon Aug 22 2011 Devrim GUNDUZ 2.4.2-1 - Update to 2.4.2 - Add a patch for pg_config path. - Add new subpackage: test * Tue Mar 16 2010 Devrim GUNDUZ 2.0.14-1 - Update to 2.0.14 * Mon Oct 19 2009 Devrim GUNDUZ 2.0.13-1 - Update to 2.0.13 * Mon Sep 7 2009 Devrim GUNDUZ 2.0.12-1 - Update to 2.0.12 * Tue May 26 2009 Devrim GUNDUZ 2.0.11-1 - Update to 2.0.11 * Fri Apr 24 2009 Devrim GUNDUZ 2.0.10-1 - Update to 2.0.10 * Thu Mar 2 2009 Devrim GUNDUZ 2.0.9-1 - Update to 2.0.9 * Wed Apr 30 2008 - Devrim GUNDUZ 2.0.7-1 - Update to 2.0.7 * Fri Jun 15 2007 - Devrim GUNDUZ 2.0.6-1 - Update to 2.0.6 * Sun May 06 2007 Thorsten Leemhuis - rebuilt for RHEL5 final * Wed Dec 6 2006 - Devrim GUNDUZ 2.0.5.1-4 - Rebuilt for PostgreSQL 8.2.0 * Mon Sep 11 2006 - Devrim GUNDUZ 2.0.5.1-3 - Rebuilt * Wed Sep 6 2006 - Devrim GUNDUZ 2.0.5.1-2 - Remove ghost'ing, per Python Packaging Guidelines * Mon Sep 4 2006 - Devrim GUNDUZ 2.0.5.1-1 - Update to 2.0.5.1 * Sun Aug 6 2006 - Devrim GUNDUZ 2.0.3-3 - Fixed zope package dependencies and macro definition, per bugzilla review (#199784) - Fixed zope package directory ownership, per bugzilla review (#199784) - Fixed cp usage for zope subpackage, per bugzilla review (#199784) * Mon Jul 31 2006 - Devrim GUNDUZ 2.0.3-2 - Fixed 64 bit builds - Fixed license - Added Zope subpackage - Fixed typo in doc description - Added macro for zope subpackage dir * Mon Jul 31 2006 - Devrim GUNDUZ 2.0.3-1 - Update to 2.0.3 - Fixed spec file, per bugzilla review (#199784) * Sat Jul 22 2006 - Devrim GUNDUZ 2.0.2-3 - Removed python dependency, per bugzilla review. (#199784) - Changed doc package group, per bugzilla review. (#199784) - Replaced dos2unix with sed, per guidelines and bugzilla review (#199784) - Fix changelog dates * Sat Jul 21 2006 - Devrim GUNDUZ 2.0.2-2 - Added dos2unix to buildrequires - removed python related part from package name * Fri Jul 20 2006 - Devrim GUNDUZ 2.0.2-1 - Fix rpmlint errors, including dos2unix solution - Re-engineered spec file * Fri Jan 23 2006 - Devrim GUNDUZ - First 2.0.X build * Fri Jan 23 2006 - Devrim GUNDUZ - Update to 1.2.21 * Tue Dec 06 2005 - Devrim GUNDUZ - Initial release for 1.1.20 barman-1.5.1/rpm/rhel5/setup.cfg.patch0000644000076500000240000000100112404074014016773 0ustar mnenciastaff--- setup.cfg.old 2011-08-22 12:16:18.703486005 +0300 +++ setup.cfg 2011-08-22 12:16:31.596486005 +0300 @@ -26,7 +26,7 @@ # libraries needed to build psycopg2. If pg_config is not in the path or # is installed under a different name uncomment the following option and # set it to the pg_config full path. -#pg_config= +pg_config=/usr/pgsql-9.0/bin/pg_config # If "pg_config" is not available, "include_dirs" can be used to locate # postgresql headers and libraries. Some extra checks on sys.platform will barman-1.5.1/rpm/rhel6/0000755000076500000240000000000012621417067014077 5ustar mnenciastaffbarman-1.5.1/rpm/rhel6/python-argcomplete.spec0000644000076500000240000000353712541044263020577 0ustar mnenciastaff%{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?python_sitearch: %define python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} Summary: Bash tab completion for argparse Name: python-argcomplete Version: 0.3.5 Release: 1%{?dist} License: ASL 2.0 Group: Development/Libraries Url: https://github.com/kislyuk/argcomplete Source0: http://pypi.python.org/packages/source/a/argcomplete/argcomplete-%{version}.tar.gz BuildRequires: python-devel,python-setuptools BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot-%(%{__id_u} -n) BuildArch: noarch Requires: python-abi = %(%{__python} -c "import sys ; print sys.version[:3]") Requires: python-argparse %description Argcomplete provides easy, extensible command line tab completion of arguments for your Python script. It makes two assumptions: * You're using bash as your shell * You're using argparse to manage your command line arguments/options Argcomplete is particularly useful if your program has lots of options or subparsers, and if your program can dynamically suggest completions for your argument/option values (for example, if the user is browsing resources over the network). %prep %setup -n argcomplete-%{version} -q %build %{__python} setup.py build %install %{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root) %doc README.rst %{python_sitelib}/argcomplete-%{version}-py2.6.egg-info %{python_sitelib}/argcomplete/ %{_bindir}/activate-global-python-argcomplete %{_bindir}/python-argcomplete-check-easy-install-script %{_bindir}/register-python-argcomplete %changelog * Thu Jan 31 2013 - Marco Neciarini 0.3.5-1 - Initial packaging. barman-1.5.1/rpm/rhel6/python-argh.spec0000644000076500000240000000333212545213415017210 0ustar mnenciastaff%{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?python_sitearch: %define python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} Summary: A simple argparse wrapper Name: python-argh Version: 0.23.0 Release: 1%{?dist} License: LGPLv3 Group: Development/Libraries Url: http://bitbucket.org/neithere/argh/ Source0: http://pypi.python.org/packages/source/a/argh/argh-%{version}.tar.gz BuildRequires: python-devel, python-setuptools BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot-%(%{__id_u} -n) BuildArch: noarch Requires: python-abi = %(%{__python} -c "import sys ; print sys.version[:3]") Requires: python-argparse %description Argh, argparse! =============== Did you ever say "argh" trying to remember the details of optparse or argparse API? If yes, this package may be useful for you. It provides a very simple wrapper for argparse with support for hierarchical commands that can be bound to modules or classes. Argparse can do it; argh makes it easy. %prep %setup -n argh-%{version} -q %build %{__python} setup.py build %install %{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root) %doc README %{python_sitelib}/argh-%{version}-py2.6.egg-info %{python_sitelib}/argh/ %changelog * Thu Jan 31 2013 - Marco Neciarini 0.23.0-1 - Update to version 0.23.0 * Wed May 9 2012 - Marco Neciarini 0.15.0-1 - Update to version 0.15.0 * Sat Dec 4 2011 - Marco Neciarini 0.14.2-1 - Initial packaging. barman-1.5.1/rpm/rhel7/0000755000076500000240000000000012621417067014100 5ustar mnenciastaffbarman-1.5.1/rpm/rhel7/python-argh.spec0000644000076500000240000000352012545213415017210 0ustar mnenciastaff%{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %{!?python_sitearch: %define python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)")} Summary: A simple argparse wrapper Name: python-argh Version: 0.26.1 Release: 1%{?dist} License: LGPLv3 Group: Development/Libraries Url: http://bitbucket.org/neithere/argh/ Source0: http://pypi.python.org/packages/source/a/argh/argh-%{version}.tar.gz BuildRequires: python-devel, python-setuptools BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot-%(%{__id_u} -n) BuildArch: noarch Requires: python-abi = %(%{__python} -c "import sys ; print sys.version[:3]") Requires: python-argparse %description Argh, argparse! =============== Did you ever say "argh" trying to remember the details of optparse or argparse API? If yes, this package may be useful for you. It provides a very simple wrapper for argparse with support for hierarchical commands that can be bound to modules or classes. Argparse can do it; argh makes it easy. %prep %setup -n argh-%{version} -q %build %{__python} setup.py build %install %{__python} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root) %doc README.rst %{python_sitelib}/argh-%{version}-py2.7.egg-info %{python_sitelib}/argh/ %changelog * Tue Jan 20 2015 - Francesco Canovai 0.26.1-1 - Update to version 0.26.1 * Thu Jan 31 2013 - Marco Nenciarini 0.23.0-1 - Update to version 0.23.0 * Wed May 9 2012 - Marco Nenciarini 0.15.0-1 - Update to version 0.15.0 * Sat Dec 3 2011 - Marco Nenciarini 0.14.2-1 - Initial packaging. barman-1.5.1/scripts/0000755000076500000240000000000012621417067013750 5ustar mnenciastaffbarman-1.5.1/scripts/barman-wal-restore0000755000076500000240000000605412621115517017400 0ustar mnenciastaff#!/bin/bash # # Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This file is part of Barman. # # Barman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Barman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Barman. If not, see . VERSION=1.0 USER='barman' usage () { echo "barman-wal-restore:" echo " This script will be used as a 'restore_command' based on the get-wal feature of Barman." echo " A ssh connection will be opened to the Barman host." echo "" echo "Usage: barman-wal-restore [-U user] " echo "" echo "Optional:" echo " -U " echo " The user used for the ssh connection to the Barman server. Defaults to 'barman'." echo "" echo "parameters:" echo " " echo " The host of the Barman server (MANDATORY)." echo " " echo " The server name configured in Barman from which WALs are taken (MANDATORY)." echo " , " echo " those two parameters have to be valued with the '%f' and '%p' keywords (MANDATORY)." echo "" echo "info:" echo " --help" echo " Display this help and exit." echo " --version" echo " Output version information and exit." echo "" exit 1 } version (){ echo " barman-wal-restore: Version $VERSION" echo "" exit 1 } exit_with_error () { echo "ERROR: $1" exit $2 } opt="$1" case "$opt" in --help ) usage ;; --version ) version ;; esac while [ $# -gt 4 ] do opt="$1" shift; # expose next argument case "$opt" in -U ) USER="$1"; shift;; --version ) version;; * ) usage;; esac done ######################################################### # check mandatory parameter ######################################################### if [ -z "$1" ] then exit_with_error "missing barman host" 2 fi if [ -z "$2" ] then exit_with_error "missing server name" 2 fi if [ -z "$3" ] then exit_with_error "missing WAL name" 2 fi if [ -z "$4" ] then exit_with_error "missing WAL full path" 2 fi BARMANHOST=$1 NODEHOST=$2 WAL_NAME=$3 WAL_DEST=$4 # # Check WAL destination is not a directory and does not exist # if [ -d $WAL_DEST ] then exit_with_error "WAL_DEST cannot be a directory" 2 fi # # EXECUTE BARMAN GET-WAL THROUGH THE SSH CONNECTION # ssh $USER@$BARMANHOST barman get-wal $NODEHOST $WAL_NAME > $WAL_DEST STATUS=$? # # Manage the exit status # if [ $STATUS -ne 0 ] then if [ $STATUS -eq 255 ] then exit_with_error "Connection problem with ssh" 3 else exit_with_error "barman-wal-restore has failed!" $STATUS fi fi barman-1.5.1/scripts/barman.bash_completion0000644000076500000240000000005512541044263020273 0ustar mnenciastaffeval "$(register-python-argcomplete barman)" barman-1.5.1/setup.cfg0000644000076500000240000000007312621417067014102 0ustar mnenciastaff[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 barman-1.5.1/setup.py0000755000076500000240000000725412621123360013774 0ustar mnenciastaff#!/usr/bin/env python # # barman - Backup and Recovery Manager for PostgreSQL # # Copyright (C) 2011-2015 2ndQuadrant Italia Srl # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Backup and Recovery Manager for PostgreSQL Barman (Backup and Recovery Manager) is an open source administration tool for disaster recovery of PostgreSQL servers written in Python. It allows your organisation to perform remote backups of multiple servers in business critical environments and help DBAs during the recovery phase. Barman's most requested features include backup catalogues, incremental backup, retention policies, remote backup and recovery, archiving and compression of WAL files and backups. Barman is written and maintained by PostgreSQL professionals 2ndQuadrant. """ import sys # support fo running test through setup.py # requires recent setuptools library try: from setuptools import setup from setuptools.command.test import test as TestCommand class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = ['tests'] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.test_args) sys.exit(errno) cmdclass = {'test': PyTest} except ImportError: from distutils.core import setup cmdclass = {} if sys.version_info < (2, 6): raise SystemExit('ERROR: Barman needs at least python 2.6 to work') install_requires = ['psycopg2', 'argh >= 0.21.2', 'python-dateutil', 'argcomplete'] if sys.version_info < (2, 7): install_requires.append('argparse') barman = {} with open('barman/version.py', 'r') as fversion: exec (fversion.read(), barman) setup( name='barman', version=barman['__version__'], author='2ndQuadrant Italia Srl', author_email='info@2ndquadrant.it', url='http://www.pgbarman.org/', packages=['barman', ], scripts=['bin/barman', ], data_files=[ ('share/man/man1', ['doc/barman.1']), ('share/man/man5', ['doc/barman.5']), ], license='GPL-3.0', description=__doc__.split("\n")[0], long_description="\n".join(__doc__.split("\n")[2:]), install_requires=install_requires, platforms=['Linux', 'Mac OS X'], classifiers=[ 'Environment :: Console', 'Development Status :: 5 - Production/Stable', 'Topic :: System :: Archiving :: Backup', 'Topic :: Database', 'Topic :: System :: Recovery Tools', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], tests_require=['pytest', 'mock', 'pytest-catchlog==1.1', 'pytest-timeout'], cmdclass=cmdclass, use_2to3=True, )