pax_global_header00006660000000000000000000000064150061703760014517gustar00rootroot0000000000000052 comment=665f261ea4e8a89d116a22e9fb5ad12676665622 pgbackrest-release-2.55.1/000077500000000000000000000000001500617037600153545ustar00rootroot00000000000000pgbackrest-release-2.55.1/.cirrus.yml000066400000000000000000000070671500617037600174760ustar00rootroot00000000000000# Cirrus CI Build Definitions # ---------------------------------------------------------------------------------------------------------------------------------- # Build the branch if it is integration, a pull request, or ends in -ci/-cic (-cic targets only Cirrus CI) only_if: $CIRRUS_BRANCH == 'integration' || $CIRRUS_PR != '' || $CIRRUS_BRANCH =~ '.*-ci$' || $CIRRUS_BRANCH =~ '.*-cic$' # No auto-cancel on integration auto_cancellation: $CIRRUS_BRANCH != 'integration' # Arm64 - disabled because it often takes several hours for the test to get queued in Cirrus CI # ---------------------------------------------------------------------------------------------------------------------------------- # arm64_task: # arm_container: # image: ubuntu:20.04 # cpu: 4 # memory: 2G # install_script: # - apt-get update && apt-get install -y perl sudo locales # - sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen # - dpkg-reconfigure --frontend=noninteractive locales # - update-locale LANG=en_US.UTF-8 # - adduser --disabled-password --gecos "" testuser # - echo '%testuser ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers # - chown -R testuser ${CIRRUS_WORKING_DIR?} # script: # - su - testuser -c "${CIRRUS_WORKING_DIR?}/test/ci.pl test --sudo --no-tempfs --param=c-only --param=no-coverage" # FreeBSD 13 # ---------------------------------------------------------------------------------------------------------------------------------- freebsd_13_task: freebsd_instance: image_family: freebsd-13-5 cpu: 4 memory: 4G install_script: pkg update && pkg upgrade -y libiconv && pkg install -y bash git postgresql-libpqxx pkgconf libxml2 gmake perl5 libyaml p5-YAML-LibYAML rsync meson script: - rsync --version - cd .. && perl ${CIRRUS_WORKING_DIR}/test/test.pl --make-cmd=gmake --vm-max=2 --no-coverage --no-valgrind --module=command --test=backup debug_script: - ls -lah ${CIRRUS_WORKING_DIR} # FreeBSD 14 # ---------------------------------------------------------------------------------------------------------------------------------- freebsd_14_task: freebsd_instance: image_family: freebsd-14-2 cpu: 4 memory: 4G install_script: pkg update && pkg upgrade -y libiconv && pkg install -y bash git postgresql-libpqxx pkgconf libxml2 gmake perl5 libyaml p5-YAML-LibYAML rsync meson script: - cd .. && perl ${CIRRUS_WORKING_DIR}/test/test.pl --make-cmd=gmake --vm-max=2 --no-coverage --no-valgrind --module=command --test=backup debug_script: - ls -lah ${CIRRUS_WORKING_DIR} # MacOS Ventura # ---------------------------------------------------------------------------------------------------------------------------------- macos_ventura_task: osx_instance: image: ghcr.io/cirruslabs/macos-runner:sonoma environment: LDFLAGS: -L/opt/homebrew/opt/openssl@3/lib -L/opt/homebrew/opt/libpq/lib -L/opt/homebrew/opt/libxml2/lib -L/opt/homebrew/opt/libyaml/lib CPPFLAGS: -I/opt/homebrew/opt/openssl@3/include -I/opt/homebrew/opt/libpq/include -I/opt/homebrew/opt/libxml2/include/libxml2 -I/opt/homebrew/opt/libyaml/include PERL5LIB: /opt/homebrew/opt/perl5/lib/perl5 PKG_CONFIG_PATH: /opt/homebrew/opt/libpq/lib/pkgconfig:/opt/homebrew/opt/openssl@3/lib/pkgconfig install_script: - brew install -q pkg-config openssl@3 libpq libxml2 libyaml meson script: - cd .. - ${CIRRUS_WORKING_DIR}/test/test.pl --vm-max=2 --no-coverage --no-valgrind --module=command --test=backup --test=info debug_script: - ls -lah ${CIRRUS_WORKING_DIR} - ls -lahR /opt/homebrew/opt pgbackrest-release-2.55.1/.editorconfig000066400000000000000000000002631500617037600200320ustar00rootroot00000000000000root = true [*] indent_style = space indent_size = 4 end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true [*.{yaml,yml}] indent_size = 2 pgbackrest-release-2.55.1/.git-blame-ignore-revs000066400000000000000000000005041500617037600214530ustar00rootroot00000000000000# Reformat code with uncrustify. d4070c90641a61fa3cb1169f3bd53067193bab4e # Fix formatting errors. b2202c36d9e7c4557ac37087757df80193d516b5 # Remove double spaces from comments and documentation. 1bd5530a59cd8ddbabc279802d1ede4f8fbd5314 # Remove double spaces missed in 1bd5530a. d49907239eb37c3e4e905f97543824181a1bd406 pgbackrest-release-2.55.1/.gitattributes000066400000000000000000000001271500617037600202470ustar00rootroot00000000000000# Classify all .h/c.inc files as C *.h linguist-language=C *.c.inc linguist-language=C pgbackrest-release-2.55.1/.github/000077500000000000000000000000001500617037600167145ustar00rootroot00000000000000pgbackrest-release-2.55.1/.github/ISSUE_TEMPLATE.md000066400000000000000000000015301500617037600214200ustar00rootroot00000000000000Please provide the following information when submitting an issue (feature requests or general comments can skip this): 1. pgBackRest version: 2. PostgreSQL version: 3. Operating system/version - if you have more than one server (for example, a database server, a repository host server, one or more standbys), please specify each: 4. Did you install pgBackRest from source or from a package? 5. Please attach the following as applicable: - `pgbackrest.conf` file(s) - `postgresql.conf` settings applicable to pgBackRest (`archive_command`, `archive_mode`, `listen_addresses`, `max_wal_senders`, `wal_level`, `port`) - errors in the postgresql log file before or during the time you experienced the issue - log file in `/var/log/pgbackrest` for the commands run (e.g. `/var/log/pgbackrest/mystanza_backup.log`) 7. Describe the issue: pgbackrest-release-2.55.1/.github/pull_request_template.md000066400000000000000000000002311500617037600236510ustar00rootroot00000000000000Please read [Submitting a Pull Request](https://github.com/pgbackrest/pgbackrest/blob/main/CONTRIBUTING.md#submitting-a-pull-request) before submitting. pgbackrest-release-2.55.1/.github/workflows/000077500000000000000000000000001500617037600207515ustar00rootroot00000000000000pgbackrest-release-2.55.1/.github/workflows/lock-thread.yml000066400000000000000000000010241500617037600236660ustar00rootroot00000000000000# Configuration documentation at https://github.com/dessant/lock-threads name: 'lock-thread' on: schedule: - cron: '37 7 * * 0' workflow_dispatch: permissions: issues: write concurrency: group: lock jobs: action: runs-on: ubuntu-latest steps: - uses: dessant/lock-threads@v4 with: issue-inactive-days: '90' issue-comment: '' issue-lock-reason: '' pr-inactive-days: '90' pr-comment: '' pr-lock-reason: '' log-output: true pgbackrest-release-2.55.1/.github/workflows/symbol.out000066400000000000000000000012211500617037600230030ustar00rootroot00000000000000_IO_stdin_used __TMC_END__ __bss_start __data_start __dso_handle __environ@GLIBC_2.2.5 _edata _end _fini _init _start backtrace_alloc backtrace_close backtrace_create_state backtrace_dwarf_add backtrace_free backtrace_full backtrace_get_view backtrace_initialize backtrace_open backtrace_pcinfo backtrace_qsort backtrace_release_view backtrace_syminfo backtrace_syminfo_to_full_callback backtrace_syminfo_to_full_error_callback backtrace_uncompress_lzma backtrace_uncompress_zdebug backtrace_uncompress_zstd backtrace_vector_finish backtrace_vector_grow backtrace_vector_release data_start environ@GLIBC_2.2.5 main stderr@GLIBC_2.2.5 xmlFree@LIBXML2_2.4.30 pgbackrest-release-2.55.1/.github/workflows/test.yml000066400000000000000000000137141500617037600224610ustar00rootroot00000000000000name: test on: push: branches: - integration - '**-ci' - '**-cig' pull_request: branches: - integration - '**-ci' - '**-cig' jobs: test: runs-on: ubuntu-22.04 strategy: # Let all the jobs run to completion even if one fails fail-fast: false # The first jobs should be the canaries in the coal mine, i.e. the most likely to fail if there are problems in the code. They # should also be a good mix of unit, integration, and documentation tests. # # In general tests should be ordered from slowest to fastest. This does not make a difference for testing a single commit, but # when multiple commits are being tested it is best to have the slowest jobs first so that as jobs become available they will # tackle the slowest tests first. matrix: include: # All integration tests for oldest Debian - param: test --vm=u20 --param=module=integration # All tests (without coverage or valgrind) for 32-bit - param: test --vm=d11 --param=no-performance --param=no-coverage --param=no-valgrind # Debian/Ubuntu documentation - param: doc --vm=u20 # All integration tests - param: test --vm=u22 --param=build-package --param=module=integration # All unit tests with coverage, backtrace and alternate timezone - param: test --vm=u22 --param=c-only --param=no-valgrind --param=tz=America/New_York # All unit tests with valgrind (disable coverage and backtrace for performance) - param: test --vm=u22 --param=c-only --param=no-coverage --param=no-back-trace # All unit tests on the newest gcc available - param: test --vm=f41 --param=c-only --param=no-valgrind --param=no-coverage --param=no-performance # RHEL documentation - param: doc --vm=rh8 # All integration tests - param: test --vm=rh8 --param=module=integration steps: - name: Checkout Code uses: actions/checkout@v4 with: path: pgbackrest - name: Run Test run: cd ${HOME?} && ${GITHUB_WORKSPACE?}/pgbackrest/test/ci.pl ${{matrix.param}} --param=build-max=2 # Output the coverage report on failure in case the failure was caused by lack of coverage. This is not ideal since the report # needs to be copied from the log output into an HTML file where it can be viewed, but better than nothing. - name: Coverage Report if: failure() run: | cat ${GITHUB_WORKSPACE?}/pgbackrest/test/result/coverage/coverage.html # Basic tests on other architectures using emulation. The emulation is so slow that running all the unit tests would be too # expensive, but this at least shows that the build works and some of the more complex tests run. In particular, it is good to # test on one big-endian architecture to be sure that checksums are correct. arch: runs-on: ubuntu-24.04 strategy: matrix: include: - arch: 'ppc64le' - arch: 's390x' steps: - name: Checkout Code uses: actions/checkout@v4 with: path: pgbackrest - name: Install run: | sudo apt-get update sudo DEBCONF_NONINTERACTIVE_SEEN=true DEBIAN_FRONTEND=noninteractive apt-get install -y perl sudo libxml-checker-perl libyaml-perl rsync zlib1g-dev libssl-dev libxml2-dev libpq-dev libyaml-dev pkg-config make gcc ccache meson git liblz4-dev liblz4-tool zstd libzstd-dev bzip2 libbz2-dev docker run --privileged --rm tonistiigi/binfmt --install all - name: Build VM run: ${GITHUB_WORKSPACE?}/pgbackrest/test/test.pl --vm-build --vm=u22 --vm-arch=${{matrix.arch}} - name: Run Test run: | ${GITHUB_WORKSPACE?}/pgbackrest/test/test.pl --vm=u22 --vm-arch=${{matrix.arch}} --no-valgrind --no-coverage --no-optimize --build-max=2 --module=command --test=backup ${GITHUB_WORKSPACE?}/pgbackrest/test/test.pl --vm=u22 --vm-arch=${{matrix.arch}} --no-valgrind --no-coverage --no-optimize --build-max=2 --module=postgres --test=interface # Run meson unity build to check for errors, unused functions, and externed functions unity: runs-on: ubuntu-24.04 steps: - name: Checkout Code uses: actions/checkout@v4 with: path: pgbackrest - name: Install run: | sudo apt-get update DEBCONF_NONINTERACTIVE_SEEN=true DEBIAN_FRONTEND=noninteractive sudo apt-get install -y zlib1g-dev libssl-dev libxml2-dev libpq-dev libyaml-dev pkg-config meson liblz4-dev libzstd-dev libbz2-dev - name: Build run: | meson setup --unity=on -Dwerror=true build ${GITHUB_WORKSPACE?}/pgbackrest ninja -vC build - name: Check run: | diff ${GITHUB_WORKSPACE?}/pgbackrest/.github/workflows/symbol.out <(nm -gj --defined-only build/src/pgbackrest) # Check that code is correctly formatted code-format: runs-on: ubuntu-22.04 steps: - name: Checkout Code uses: actions/checkout@v4 with: path: pgbackrest - name: Check run: | cd ${HOME?} && ${GITHUB_WORKSPACE?}/pgbackrest/test/ci.pl test --param=code-format-check codeql: runs-on: ubuntu-24.04 permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: - cpp steps: - name: Checkout Code uses: actions/checkout@v4 - name: Install Packages run: sudo apt-get install -y --no-install-recommends libyaml-dev libbz2-dev meson - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{matrix.language}} - name: Build run: meson setup ${HOME?}/build ${GITHUB_WORKSPACE?} && ninja -C ${HOME?}/build - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 pgbackrest-release-2.55.1/.gitignore000066400000000000000000000000311500617037600173360ustar00rootroot00000000000000**/*~ *~ *.swp .DS_Store pgbackrest-release-2.55.1/CODING.md000066400000000000000000000247071500617037600166530ustar00rootroot00000000000000# pgBackRest
Coding Standards ## Formatting with uncrustify pgBackRest uses uncrustify to check/update the code formatting. If the `code-format` test fails in CI then reformat the code: ``` pgbackrest/test/test.pl --code-format ``` Also review the standards in the following sections below. Some standards require further explanation and others are not enforced by uncrustify. ## Standards ### Indentation Indentation is four spaces -- no tabs. Only file types that absolutely require tabs (e.g. `Makefile`) may use them. ### Line Length With the exception of documentation code, no line of any code or test file shall exceed 132 characters. If a line break is required, then it shall be after the first function parenthesis: ``` // CORRECT - location of line break after first function parenthesis if line length is greater than 132 StringList *removeList = infoBackupDataLabelList( infoBackup, strNewFmt("^%s.*", strZ(strLstGet(currentBackupList, fullIdx)))); // INCORRECT StringList *removeList = infoBackupDataLabelList(infoBackup, strNewFmt("^%s.*", strZ(strLstGet(currentBackupList, fullIdx)))); ``` If a conditional, then after a completed conditional, for example: ``` // CORRECT - location of line break after a completed conditional if line length is greater than 132 if (archiveInfoPgHistory.id != backupInfoPgHistory.id || archiveInfoPgHistory.systemId != backupInfoPgHistory.systemId || archiveInfoPgHistory.version != backupInfoPgHistory.version) // INCORRECT if (archiveInfoPgHistory.id != backupInfoPgHistory.id || archiveInfoPgHistory.systemId != backupInfoPgHistory.systemId || archiveInfoPgHistory.version != backupInfoPgHistory.version) ``` ### Function Comments Comments for `extern` functions should be included in the `.h` file. Comments for `static` functions and implementation-specific notes for `extern` functions (i.e., not of interest to the general user) should be included in the `.c` file. ### Inline Comment Inline comments shall start at character 69 and must not exceed the line length of 132. For example: ``` typedef struct InlineCommentExample { const String *comment; // Inline comment example const String *longComment; // Inline comment example that exceeds 132 characters should // then go to next line but this should be avoided } InlineCommentExample; ``` ### Naming #### Variables Variable names use camel case with the first letter lower-case. - `stanzaName` - the name of the stanza - `nameIdx` - loop variable for iterating through a list of names Variable names should be descriptive. Avoid `i`, `j`, etc. #### Types Type names use camel case with the first letter upper case: `typedef struct MemContext <...>` `typedef enum {<...>} ErrorState;` #### Constants **#define Constants** `#define` constants should be all caps with `_` separators. ```c #define MY_CONSTANT "STRING" ``` The value should be aligned at column 69 whenever possible. This type of constant should mostly be used for strings. Use enums whenever possible for integer constants. **String Constants** String constants can be declared using the `STRING_STATIC()` macro for local strings and `STRING_EXTERN()` for strings that will be externed for use in other modules. Externed strings should be declared in the header file as: ```c #define SAMPLE_VALUE "STRING" STRING_DECLARE(SAMPLE_VALUE_STR); ``` And in the C file as: ```c STRING_EXTERN(SAMPLE_VALUE_STR, SAMPLE_VALUE); ``` Static strings declared in the C file are not required to have a `#define` if the `#define` version is not used. Externed strings must always have the `#define` in the header file. **Enum Constants** Enum elements follow the same case rules as variables. They are strongly typed so this shouldn't present any confusion. ```c typedef enum { cipherModeEncrypt, cipherModeDecrypt, } CipherMode; ``` Note the comma after the last element. This reduces diff churn when new elements are added. #### Macros Macro names should be upper-case with underscores between words. Macros (except simple constants) should be avoided whenever possible as they make code less clear and test coverage harder to measure. Macros should follow the format: ```c #define MACRO(paramName1, paramName2) \ ``` If the macro defines a block it should look like: ```c #define MACRO_2(paramName1, paramName2) \ { \ \ } ``` Continuation characters should be aligned at column 132 (unlike the examples above that have been shortened for display purposes). To avoid conflicts, variables in a macro will be named `[macro name]_[var name]`, e.g. `TEST_RESULT_resultExpected`. Variables that need to be accessed in wrapped code should be provided accessor macros. [Variadic functions](#variadic-functions) are an exception to the capitalization rule. #### Begin / End Use `Begin` / `End` for names rather than `Start` / `Finish`, etc. #### New / Free Use `New` / `Free` for constructors and destructors rather than `Create` / `Destroy`, etc. ### Formatting #### Braces C allows braces to be excluded for a single statement. However, braces should be used when the control statement (if, while, etc.) spans more than one line or the statement to be executed spans more than one line. No braces needed: ```c if (condition) return value; ``` Braces needed: ```c if (conditionThatUsesEntireLine1 && conditionThatUsesEntireLine2) { return value; } ``` ```c if (condition) { return valueThatUsesEntireLine1 && valueThatUsesEntireLine2; } ``` Braces should be added to `switch` statement cases that have a significant amount of code. As a general rule of thumb, if the code block in the `case` is large enough to have blank lines and/or multiple comments then it should be enclosed in braces. ```c switch (int) { case 1: a = 2; break; case 2: { # Comment this more complex code a = 1; b = 2; c = func(a, b); break; } } ``` #### Hints, Warnings, and Errors Hints are to be formatted with capitalized `HINT:` followed by a space and a sentence. The sentence shall only begin with a capital letter if the first word is an acronym (e.g. TLS) or a proper name (e.g. PostgreSQL). The sentence must end with a period, question mark or exclamation point as appropriate. Warning and errors shall be lowercase with the exceptions for proper names and acronyms and end without punctuation. ## Language Elements ### Data Types Don't get exotic - use the simplest type that will work. Use `int` or `unsigned int` for general cases. `int` will be at least 32 bits. When not using `int` use one of the types defined in `common/type.h`. ### Macros Don't use a macro when a function could be used instead. Macros make it hard to measure code coverage. ### Objects Object-oriented programming is used extensively. The object pointer is always referred to as `this`. An object can expose internal struct members by defining a public struct that contains the members to be exposed and using inline functions to get/set the members. The header file: ```c /*********************************************************************************************************************************** Getters/setters ***********************************************************************************************************************************/ typedef struct ListPub { unsigned int listSize; // List size } ListPub; // List size FN_INLINE_ALWAYS unsigned int lstSize(const List *const this) { return THIS_PUB(List)->listSize; } ``` `THIS_PUB()` ensures that `this != NULL` so there is no need to check that in the calling function. And the C file: ```c struct List { ListPub pub; // Publicly accessible variables ... }; ``` The public struct must be the first member of the private struct. The naming convention for the public struct is to add `Pub` to the end of the private struct name. ### Variadic Functions Variadic functions can take a variable number of parameters. While the `printf()` pattern is variadic, it is not very flexible in terms of optional parameters given in any order. This project implements variadic functions using macros (which are exempt from the normal macro rule of being all caps). A typical variadic function definition: ```c typedef struct StoragePathCreateParam { bool errorOnExists; bool noParentCreate; mode_t mode; } StoragePathCreateParam; #define storagePathCreateP(this, pathExp, ...) \ storagePathCreate(this, pathExp, (StoragePathCreateParam){__VA_ARGS__}) #define storagePathCreateP(this, pathExp) \ storagePathCreate(this, pathExp, (StoragePathCreateParam){0}) void storagePathCreate(const Storage *this, const String *pathExp, StoragePathCreateParam param); ``` Continuation characters should be aligned at column 132 (unlike the example above that has been shortened for display purposes). This function can be called without variable parameters: ```c storagePathCreateP(storageLocal(), "/tmp/pgbackrest"); ``` Or with variable parameters: ```c storagePathCreateP(storageLocal(), "/tmp/pgbackrest", .errorOnExists = true, .mode = 0777); ``` If the majority of functions in a module or object are variadic it is best to provide macros for all functions even if they do not have variable parameters. Do not use the base function when variadic macros exist. ## Testing ### Uncoverable/Uncovered Code #### Uncoverable Code The `uncoverable` keyword marks code that can never be covered. For instance, a function that never returns because it always throws an error. Uncoverable code should be rare to non-existent outside the common libraries and test code. ```c } // {uncoverable - function throws error so never returns} ``` Subsequent code that is uncoverable for the same reason is marked with `// {+uncoverable}`. #### Uncovered Code Marks code that is not tested for one reason or another. This should be kept to a minimum and an excuse given for each instance. ```c exit(EXIT_FAILURE); // {uncovered - test harness does not support non-zero exit} ``` Subsequent code that is uncovered for the same reason is marked with `// {+uncovered}`. pgbackrest-release-2.55.1/CONTRIBUTING.md000066400000000000000000001114521500617037600176110ustar00rootroot00000000000000# pgBackRest
Contributing to pgBackRest ## Table of Contents [Introduction](#introduction) [Building a Development Environment](#building-a-development-environment) [Coding](#coding) [Testing](#testing) [Submitting a Pull Request](#submitting-a-pull-request) ## Introduction This documentation is intended to assist contributors to pgBackRest by outlining some basic steps and guidelines for contributing to the project. Code fixes or new features can be submitted via pull requests. Ideas for new features and improvements to existing functionality or documentation can be [submitted as issues](https://github.com/pgbackrest/pgbackrest/issues). You may want to check the [Project Boards](https://github.com/pgbackrest/pgbackrest/projects) to see if your suggestion has already been submitted. Bug reports should be [submitted as issues](https://github.com/pgbackrest/pgbackrest/issues). Please provide as much information as possible to aid in determining the cause of the problem. You will always receive credit in the [release notes](http://www.pgbackrest.org/release.html) for your contributions. Coding standards are defined in [CODING.md](https://github.com/pgbackrest/pgbackrest/blob/main/CODING.md) and some important coding details and an example are provided in the [Coding](#coding) section below. At a minimum, unit tests must be written and run and the documentation generated before [submitting a Pull Request](#submitting-a-pull-request); see the [Testing](#testing) section below for details. ## Building a Development Environment This example is based on Ubuntu 20.04, but it should work on many versions of Debian and Ubuntu. pgbackrest-dev => Install development tools ``` sudo apt-get install rsync git devscripts build-essential valgrind autoconf \ autoconf-archive libssl-dev zlib1g-dev libxml2-dev libpq-dev pkg-config \ libxml-checker-perl libyaml-perl libdbd-pg-perl liblz4-dev liblz4-tool \ zstd libzstd-dev bzip2 libbz2-dev libyaml-dev ccache python3-distutils meson ``` Some unit tests and all the integration tests require Docker. Running in containers allows us to simulate multiple hosts, test on different distributions and versions of PostgreSQL, and use sudo without affecting the host system. pgbackrest-dev => Install Docker ``` curl -fsSL https://get.docker.com | sudo sh sudo usermod -aG docker `whoami` ``` This clone of the pgBackRest repository is sufficient for experimentation. For development, create a fork and clone that instead. pgbackrest-dev => Clone pgBackRest repository ``` git clone https://github.com/pgbackrest/pgbackrest.git ``` If using a RHEL-based system, the CPAN XML parser is required to run `test.pl` and `doc.pl`. Instructions for installing Docker and the XML parser can be found in the `README.md` file of the pgBackRest [doc](https://github.com/pgbackrest/pgbackrest/blob/main/doc) directory in the section "The following is a sample RHEL 7 configuration that can be used for building the documentation". NOTE that the "Install latex (for building PDF)" section is not required since testing of the docs need only be run for HTML output. ## Coding The following sections provide information on some important concepts needed for coding within pgBackRest. ### Memory Contexts Memory is allocated inside contexts and can be long lasting (for objects) or temporary (for functions). In general, use `OBJ_NEW_BEGIN(MyObj)` for objects and `MEM_CONTEXT_TEMP_BEGIN()` for functions. See [memContext.h](https://github.com/pgbackrest/pgbackrest/blob/main/src/common/memContext.h) for more details and the [Coding Example](#coding-example) below. ### Logging Logging is used for debugging with the built-in macros `FUNCTION_LOG_*()` and `FUNCTION_TEST_*()` which are used to trace parameters passed to/returned from functions. `FUNCTION_LOG_*()` macros are used for production logging whereas `FUNCTION_TEST_*()` macros will be compiled out of production code. For functions where no parameter is valuable enough to justify the cost of debugging in production, use `FUNCTION_TEST_BEGIN()/FUNCTION_TEST_END()`, else use `FUNCTION_LOG_BEGIN(someLogLevel)/FUNCTION_LOG_END()`. See [debug.h](https://github.com/pgbackrest/pgbackrest/blob/main/src/common/debug.h) for more details and the [Coding Example](#coding-example) below. Logging is also used for providing information to the user via the `LOG_*()` macros, such as `LOG_INFO("some informational message")` and `LOG_WARN_FMT("no prior backup exists, %s backup has been changed to full", strZ(cfgOptionDisplay(cfgOptType)))` and also via `THROW_*()` macros for throwing an error. See [log.h](https://github.com/pgbackrest/pgbackrest/blob/main/src/common/log.h) and [error.h](https://github.com/pgbackrest/pgbackrest/blob/main/src/common/error/error.h) for more details and the [Coding Example](#coding-example) below. ### Coding Example The example below is not structured like an actual implementation and is intended only to provide an understanding of some of the more common coding practices. The comments in the example are only here to explain the example and are not representative of the coding standards. Refer to the Coding Standards document ([CODING.md](https://github.com/pgbackrest/pgbackrest/blob/main/CODING.md)) and sections above for an introduction to the concepts provided here. For an actual implementation, see [db.h](https://github.com/pgbackrest/pgbackrest/blob/main/src/db/db.h) and [db.c](https://github.com/pgbackrest/pgbackrest/blob/main/src/db/db.c). #### Example: hypothetical basic object construction ```c /* * HEADER FILE - see db.h for a complete implementation example */ // Typedef the object declared in the C file typedef struct MyObj MyObj; // Constructor, and any functions in the header file, are all declared on one line MyObj *myObjNew(unsigned int myData, const String *secretName); // Declare the publicly accessible variables in a structure with Pub appended to the name typedef struct MyObjPub // First letter upper case { unsigned int myData; // Contents of the myData variable } MyObjPub; // Declare getters and setters inline for the publicly visible variables // Only setters require "Set" appended to the name FN_INLINE_ALWAYS unsigned int myObjMyData(const MyObj *const this) { return THIS_PUB(MyObj)->myData; // Use the built-in THIS_PUB macro } // Destructor FN_INLINE_ALWAYS void myObjFree(MyObj *const this) { objFree(this); } // TYPE and FORMAT macros for function logging #define FUNCTION_LOG_MY_OBJ_TYPE \ MyObj * #define FUNCTION_LOG_MY_OBJ_FORMAT(value, buffer, bufferSize) \ FUNCTION_LOG_OBJECT_FORMAT(value, myObjToLog, buffer, bufferSize) /* * C FILE - see db.c for a more complete and actual implementation example */ // Declare the object type struct MyObj { MyObjPub pub; // Publicly accessible variables must be first and named "pub" const String *name; // Pointer to lightweight string object - see string.h }; // Object constructor, and any functions in the C file, have the return type and function signature on separate lines MyObj * myObjNew(unsigned int myData, const String *secretName) { FUNCTION_LOG_BEGIN(logLevelDebug); // Use FUNCTION_LOG_BEGIN with a log level for displaying in production FUNCTION_LOG_PARAM(UINT, myData); // When log level is debug, myData variable will be logged FUNCTION_TEST_PARAM(STRING, secretName); // FUNCTION_TEST_PARAM will not display secretName value in production logging FUNCTION_LOG_END(); ASSERT(secretName != NULL || myData > 0); // Development-only assertions (will be compiled out of production code) OBJ_NEW_BEGIN(MyObj) // Create a long lasting memory context with the name of the object { *this = (MyObj) // Initialize the object { .pub = { .myData = myData, // Copy the simple data type to this object }, .name = strDup(secretName), // Duplicate the String data type to the this object's memory context }; } OBJ_NEW_END(); FUNCTION_LOG_RETURN(MyObj, this); } // Function using temporary memory context String * myObjDisplay(unsigned int myData) { FUNCTION_TEST_BEGIN(); // No parameters passed to this function will be logged in production FUNCTION_TEST_PARAM(UINT, myData); FUNCTION_TEST_END(); String *result = NULL; // Result is created in the caller's memory context (referred to as "prior context" below) MEM_CONTEXT_TEMP_BEGIN() // Begin a new temporary context { String *resultStr = strNewZ("Hello"); // Allocate a string in the temporary memory context if (myData > 1) resultStr = strCatZ(" World"); // Append a value to the string still in the temporary memory context else LOG_WARN("Am I not your World?"); // Log a warning to the user MEM_CONTEXT_PRIOR_BEGIN() // Switch to the prior context so the string duplication is in the caller's context { result = strDup(resultStr); // Create a copy of the string in the caller's context } MEM_CONTEXT_PRIOR_END(); // Switch back to the temporary context } MEM_CONTEXT_TEMP_END(); // Free everything created inside this temporary memory context - i.e resultStr FUNCTION_TEST_RETURN(STRING, result); // Return result but do not log the value in production } // Create the logging function for displaying important information from the object String * myObjToLog(const MyObj *this) { return strNewFmt( "{name: %s, myData: %u}", this->name == NULL ? NULL_Z : strZ(this->name), myObjMyData(this)); } ``` ## Testing A list of all possible test combinations can be viewed by running: ``` pgbackrest/test/test.pl --dry-run ``` While some files are automatically generated during `make`, others are generated by running the test harness as follows: ``` pgbackrest/test/test.pl --gen-only ``` Prior to any submission, the html version of the documentation should also be run and the output checked by viewing the generated html on the local file system under `pgbackrest/doc/output/html`. More details can be found in the pgBackRest [doc/README.md](https://github.com/pgbackrest/pgbackrest/blob/main/doc/README.md) file. ``` pgbackrest/doc/doc.pl --out=html ``` > **NOTE:** `ERROR: [028]` regarding cache is invalid is OK; it just means there have been changes and the documentation will be built from scratch. In this case, be patient as the build could take 20 minutes or more depending on your system. ### Running Tests Examples of test runs are provided in the following sections. There are several important options for running a test: - `--dry-run` - without any other options, this will list all the available tests - `--module` - identifies the module in which the test is located - `--test` - the actual test set to be run - `--run` - a number identifying the run within a test if testing a single run rather than the entire test - `--vm-out` - displays the test output (helpful for monitoring the progress) - `--vm` - identifies the pre-built container when using Docker, otherwise the setting should be `none`. See [test.yml](https://github.com/pgbackrest/pgbackrest/blob/main/.github/workflows/test.yml) for a list of valid vm codes noted by `param: test`. For more options, run the test or documentation engine with the `--help` option: ``` pgbackrest/test/test.pl --help pgbackrest/doc/doc.pl --help ``` #### Without Docker If Docker is not installed, then the available tests can be listed using `--dry-run`. Some tests require containers and will only be available when Docker is installed. pgbackrest-dev => List tests that don't require a container ``` pgbackrest/test/test.pl --dry-run --- output --- P00 INFO: test begin on x86_64 - log level info P00 INFO: clean autogenerate code --> P00 INFO: 83 tests selected P00 INFO: P1-T01/83 - vm=none, module=common, test=error [filtered 80 lines of output] P00 INFO: P1-T82/83 - vm=none, module=performance, test=type P00 INFO: P1-T83/83 - vm=none, module=performance, test=storage --> P00 INFO: DRY RUN COMPLETED SUCCESSFULLY ``` pgbackrest-dev => Run a test ``` pgbackrest/test/test.pl --vm-out --module=common --test=wait --- output --- P00 INFO: test begin on x86_64 - log level info P00 INFO: cleanup old data P00 INFO: autogenerate code P00 INFO: build for none (/home/vagrant/test/build/none) P00 INFO: 1 test selected P00 INFO: P1-T1/1 - vm=none, module=common, test=wait P00 INFO: test command begin 2.55.1: [common/wait] --log-level=info --no-log-timestamp --repo-path=/home/vagrant/test/repo --scale=1 --test-path=/home/vagrant/test --vm=none --vm-id=0 P00 INFO: test command end: completed successfully run 1 - waitNew(), waitMore, and waitFree() L0018 expect AssertError: assertion 'waitTime <= 999999000' failed run 1/1 ----- L0021 0ms wait L0025 new wait L0026 check wait time L0027 check sleep time L0028 check sleep prev time L0029 no wait more run 1/2 ----- L0032 100ms with retries after time expired L0034 new wait L0037 time expired, first retry L0038 time expired, second retry L0039 time expired, retries expired run 1/3 ----- L0042 200ms wait L0046 new wait = 0.2 sec L0047 check wait time L0048 check sleep time L0049 check sleep prev time L0050 check begin time L0052 first retry L0053 check retry L0055 second retry L0056 check retry L0058 still going because of time L0064 lower range check L0065 upper range check L0067 free wait run 1/4 ----- L0070 1100ms wait L0074 new wait = 1.1 sec L0075 check wait time L0076 check sleep time L0077 check sleep prev time L0078 check begin time L0084 lower range check L0085 upper range check L0087 free wait run 1/5 ----- L0090 waitRemainder() L0092 new wait = 500ms L0093 check initial wait remainder L0094 check initial wait remainder L0098 check updated wait remainder L0099 check updated wait remainder TESTS COMPLETED SUCCESSFULLY P00 INFO: P1-T1/1 - vm=none, module=common, test=wait P00 INFO: tested modules have full coverage P00 INFO: TESTS COMPLETED SUCCESSFULLY ``` An entire module can be run by using only the `--module` option. pgbackrest-dev => Run a module ``` pgbackrest/test/test.pl --module=postgres --- output --- P00 INFO: test begin on x86_64 - log level info P00 INFO: cleanup old data P00 INFO: autogenerate code P00 INFO: build for none (/home/vagrant/test/build/none) P00 INFO: 2 tests selected P00 INFO: P1-T1/2 - vm=none, module=postgres, test=client P00 INFO: P1-T2/2 - vm=none, module=postgres, test=interface P00 INFO: tested modules have full coverage P00 INFO: TESTS COMPLETED SUCCESSFULLY ``` #### With Docker Build a container to run tests. The vm must be pre-configured but a variety are available. A vagrant file is provided in the test directory as an example of running in a virtual environment. The vm names are all three character abbreviations, e.g. `u20` for Ubuntu 20.04. pgbackrest-dev => Build a VM ``` pgbackrest/test/test.pl --vm-build --vm=u20 --- output --- P00 INFO: test begin on x86_64 - log level info P00 INFO: Using cached pgbackrest/test:u20-base-x86_64-20250228A image (862159b4d2169a4752b106639ca0f47c1ebb1f86) ... P00 INFO: Building pgbackrest/test:u20-test-x86_64 image ... P00 INFO: Build Complete ``` > **NOTE:** to build all the vms, just omit the `--vm` option above. pgbackrest-dev => Run a Specific Test Run ``` pgbackrest/test/test.pl --vm=u20 --module=postgres --test=interface --run=2 --- output --- P00 INFO: test begin on x86_64 - log level info P00 INFO: cleanup old data and containers P00 INFO: autogenerate code P00 INFO: clean build for u20 (/home/vagrant/test/build/u20) P00 INFO: 1 test selected P00 INFO: P1-T1/1 - vm=u20, module=postgres, test=interface, run=2 P00 INFO: TESTS COMPLETED SUCCESSFULLY ``` ### Writing a Unit Test The goal of unit testing is to have 100 percent code coverage. Two files will usually be involved in this process: - **define.yaml** - defines the number of tests to be run for each module and test file. There is a comment at the top of the file that provides more information about this file. - **src/module/somefileTest.c** - where "somefile" is the path and name of the test file where the unit tests are located for the code being updated (e.g. `src/module/command/expireTest.c`). #### define.yaml Each module is separated by a line of asterisks (*) and each test within is separated by a line of dashes (-). In the example below, the module is `command` and the unit test is `check`. The number of calls to `testBegin()` in a unit test file will dictate the number following `total:`, in this case 4. Under `coverage:`, the list of files that will be tested. ``` # ******************************************************************************************************************************** - name: command test: # ---------------------------------------------------------------------------------------------------------------------------- - name: check total: 4 containerReq: true coverage: - command/check/common - command/check/check ``` #### somefileTest.c Unit test files are organized in the `test/src/module` directory with the same directory structure as the source code being tested. For example, if new code is added to src/**command/expire**.c then test/src/module/**command/expire**Test.c will need to be updated. Assuming that a test file already exists, new unit tests will either go in a new `testBegin()` section or be added to an existing section. Each such section is a test run. The comment string passed to `testBegin()` should reflect the function(s) being tested in the test run. Tests within a run should use `TEST_TITLE()` with a comment string describing the test. ``` // ***************************************************************************************************************************** if (testBegin("expireBackup()")) { // ------------------------------------------------------------------------------------------------------------------------- TEST_TITLE("manifest file removal"); ``` #### Setting up the command to be run The [harnessConfig.h](https://github.com/pgbackrest/pgbackrest/blob/main/test/src/common/harnessConfig.h) describes a list of functions that should be used when configuration options are required for a command being tested. Options are set in a `StringList` which must be defined and passed to the `HRN_CFG_LOAD()` macro with the command. For example, the following will set up a test to run `pgbackrest --repo-path=test/test-0/repo info` command on multiple repositories, one of which is encrypted: ``` StringList *argList = strLstNew(); // Create an empty string list hrnCfgArgRawZ(argList, cfgOptRepoPath, TEST_PATH "/repo"); // Add the --repo-path option hrnCfgArgKeyRawZ(argList, cfgOptRepoPath, 2, TEST_PATH "/repo2"); // Add the --repo2-path option hrnCfgArgKeyRawStrId(argList, cfgOptRepoCipherType, 2, cipherTypeAes256Cbc); // Add the --repo2-cipher-type option hrnCfgEnvKeyRawZ(cfgOptRepoCipherPass, 2, TEST_CIPHER_PASS); // Set environment variable for the --repo2-cipher-pass option HRN_CFG_LOAD(cfgCmdInfo, argList); // Load the command and option list into the test harness ``` #### Storing a file Sometimes it is desirable to store or manipulate files before or during a test and then confirm the contents. The [harnessStorage.h](https://github.com/pgbackrest/pgbackrest/blob/main/test/src/common/harnessStorage.h) file contains macros (e.g. `HRN_STORAGE_PUT` and `TEST_STORAGE_GET`) for doing this. In addition, `HRN_INFO_PUT` is convenient for writing out info files (archive.info, backup.info, backup.manifest) since it will automatically add header and checksum information. ``` HRN_STORAGE_PUT_EMPTY( storageRepoWrite(), STORAGE_REPO_ARCHIVE "/10-1/000000010000000100000001-abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.gz"); ``` #### Testing results Tests are run and results confirmed via macros that are described in [harnessTest.h](https://github.com/pgbackrest/pgbackrest/blob/main/test/src/common/harnessTest.h). With the exception of TEST_ERROR, the third parameter is a short description of the test. Some of the more common macros are: - `TEST_RESULT_STR` - Test the actual value of the string returned by the function. - `TEST_RESULT_UINT` / `TEST_RESULT_INT` - Test for an unsigned integer / integer. - `TEST_RESULT_BOOL` - Test a boolean value. - `TEST_RESULT_PTR` / `TEST_RESULT_PTR_NE` - Test a pointer: useful for testing if the pointer is `NULL` or not equal (`NE`) to `NULL`. - `TEST_RESULT_VOID` - The function being tested returns a `void`. This is then usually followed by tests that ensure other actions occurred (e.g. a file was written to disk). - `TEST_ERROR` / `TEST_ERROR_FMT` - Test that a specific error code was raised with specific wording. > **NOTE:** `HRN_*` macros should be used only for test setup and cleanup. `TEST_*` macros must be used for testing results. #### Testing a log message If a function being tested logs something with `LOG_WARN`, `LOG_INFO` or other `LOG_*()` macro, then the logged message must be cleared before the end of the test by using the `TEST_RESULT_LOG()/TEST_RESULT_LOG_FMT()` macros. ``` TEST_RESULT_LOG( "P00 WARN: WAL segment '000000010000000100000001' was not pushed due to error [25] and was manually skipped: error"); ``` In the above, `Pxx` indicates the process (P) and the process number (xx), e.g. P00, P01. #### Testing using child process Sometimes it is useful to use a child process for testing. Below is a simple example. See [harnessFork.h](https://github.com/pgbackrest/pgbackrest/blob/main/test/src/common/harnessFork.h) for more details. ``` HRN_FORK_BEGIN() { HRN_FORK_CHILD_BEGIN() { TEST_RESULT_BOOL(cmdLockAcquireP(), true, "create backup/expire lock"); // Notify parent that lock has been acquired HRN_FORK_CHILD_NOTIFY_PUT(); // Wait for parent to allow release lock HRN_FORK_CHILD_NOTIFY_GET(); cmdLockReleaseP(); } HRN_FORK_CHILD_END(); HRN_FORK_PARENT_BEGIN() { // Wait for child to acquire lock HRN_FORK_PARENT_NOTIFY_GET(0); HRN_CFG_LOAD(cfgCmdInfo, argListText); TEST_RESULT_STR_Z( infoRender(), "stanza: stanza1\n" " status: error (no valid backups, backup/expire running)\n" " cipher: none\n" "\n" " db (current)\n" " wal archive min/max (9.6): none present\n", "text - single stanza, no valid backups, backup/expire lock detected"); // Notify child to release lock HRN_FORK_PARENT_NOTIFY_PUT(0); } HRN_FORK_PARENT_END(); } HRN_FORK_END(); ``` #### Testing using a shim A PostgreSQL libpq shim is provided to simulate interactions with PostgreSQL. Below is a simple example. See [harnessPq.h](https://github.com/pgbackrest/pgbackrest/blob/main/test/src/common/harnessPq.h) for more details. ``` // Set up two standbys but no primary HRN_PQ_SCRIPT_SET( HRN_PQ_SCRIPT_OPEN_GE_96(1, "dbname='postgres' port=5432", PG_VERSION_96, "/pgdata", true, NULL, NULL), HRN_PQ_SCRIPT_OPEN_GE_96(8, "dbname='postgres' port=5433", PG_VERSION_96, "/pgdata", true, NULL, NULL), // Close the "inner" session first (8) then the outer (1) HRN_PQ_SCRIPT_CLOSE(8), HRN_PQ_SCRIPT_CLOSE(1)); TEST_ERROR(cmdCheck(), ConfigError, "primary database not found\nHINT: check indexed pg-path/pg-host configurations"); ``` ### Running a Unit Test **Code Coverage** Unit tests are run for all files that are listed in `define.yaml` and a coverage report generated for each file listed under the tag `coverage:`. Note that some files are listed in multiple `coverage:` sections for a module; in this case, each test for the file being modified should be specified for the module in which the file exists (e.g. `--module=storage --test=posix --test=gcs`, etc.) or, alternatively, simply run the module without the `--test` option. It is recommended that a `--vm` be specified since running the same test for multiple vms is unnecessary for coverage. The following example would run the test set from the **define.yaml** section detailed above. ``` pgbackrest/test/test.pl --vm-out --module=command --test=check --vm=u20 ``` > **NOTE:** Not all systems perform at the same speed, so if a test is timing out, try rerunning with another vm. A coverage report will be generated and written to the local file system under the pgBackRest repository in `test/result/coverage.html`. If 100 percent code coverage has not been achieved, an error message will be displayed, for example: `ERROR: [125]: c module command/check/check is not fully covered` **Debugging with files** Sometimes it is useful to look at files that were generated during the test. The default for running any test is that, at the start/end of the test, the test harness will clean up all files and directories created. To override this behavior, a single test run must be specified and the option `--no-cleanup` provided. Again, continuing with the check command, from **define.yaml** above, there are four tests. Below, test one will be run and nothing will be cleaned up so that the files and directories in `test/test-0` can be inspected. ``` pgbackrest/test/test.pl --vm-out --module=command --test=check --run=1 --no-cleanup ``` ### Understanding Test Output The following is a small sample of a typical test output. ``` run 8 - expireTimeBasedBackup() run 8/1 ------------- L2285 no current backups 000.002s L2298 empty backup.info 000.009s 000.007s L2300 no backups to expire ``` **run 8 - expireTimeBasedBackup()** - indicates the run number (8) within the module and the parameter provided to testBegin, e.g. `testBegin("expireTimeBasedBackup()")` **run 8/1 ------------- L2285 no current backups** - this is the first test (1) in run 8 which is the `TEST_TITLE("no current backups");` at line number 2285. **000.002s L2298 empty backup.info** - the first number, 000.002s, is the time in seconds that the test started from the beginning of the run. L2298 is the line number of the test and `empty backup.info` is the test comment. **000.009s 000.007s L2300 no backups to expire** - again, 000.009s, is the time in seconds that the test started from the beginning of the run. The second number, 000.007s, is the run time of the **previous** test (i.e. `empty backup.info` test took 000.007 seconds to execute). L2300 is the line number of the test and `no backups to expire` is the test comment. ## Adding an Option Options can be added to a command or multiple commands. Options can be configuration file only, command-line only or valid for both. Once an option is successfully added, the `config.auto.h` and `parse.auto.c.inc` files will automatically be generated by the build system. To add an option, two files need be to be modified: - `src/build/config/config.yaml` - `src/build/help/help.xml` These files are discussed in the following sections along with how to verify the `help` command output. ### config.yaml There are detailed comment blocks above each section that explain the rules for defining commands and options. Regarding options, there are two types: 1) command line only, and 2) configuration file. With the exception of secrets, all configuration file options can be passed on the command line. To configure an option for the configuration file, the `section:` key must be present. The `option:` section is broken into sub-sections by a simple comment divider (e.g. `# Repository options`) under which the options are organized alphabetically by option name. To better explain this section, two hypothetical examples will be discussed. For more details, see [config.yaml](https://github.com/pgbackrest/pgbackrest/blob/main/src/build/config/config.yaml). #### EXAMPLE 1 hypothetical command line only option ``` set: type: string command: backup: depend: option: stanza required: false restore: default: latest command-role: main: {} ``` Note that `section:` is not present thereby making this a command-line only option defined as follows: - `set` - the name of the option - `type` - the type of the option. Valid values for types are: `boolean`, `hash`, `integer`, `list`, `path`, `size`, `string`, and `time` - `command` - list each command for which the option is valid. If a command is not listed, then the option is not valid for the command and an error will be thrown if it is attempted to be used for that command. In this case the valid commands are `backup` and `restore`. - `backup` - details the requirements for the `--set` option for the `backup` command. It is dependent on the option `--stanza`, meaning it is only allowed to be specified for the `backup` command if the `--stanza` option has been specified. And `required: false` indicates that the `--set` option is never required, even with the dependency. - `restore` - details the requirements for the `--set` option for the `restore` command. Since `required:` is omitted, it is not required to be set by the user but it is required by the command and will default to `latest` if it has not been specified by the user. - `command-role` - defines the processes for which the option is valid. `main` indicates the option will be used by the main process and not be passed on to other local/remote processes. #### EXAMPLE 2 hypothetical configuration file option ``` repo-test-type: section: global type: string group: repo default: full allow-list: - full - diff - incr command: backup: {} restore: {} command-role: main: {} ``` - `repo-test-type` - the name of the option - `section` - the section of the configuration file where this option is valid (omitted for command line only options, see [Example 1](#example-1-hypothetical-command-line-only-option) above) - `type` - the type of the option. Valid values for types are: `boolean`, `hash`, `integer`, `list`, `path`, `size`, `string`, and `time` - `group` - indicates that this option is part of the `repo` group of indexed options and therefore will follow the indexing rules e.g. `repo1-test-type`. - `default` - sets a default for the option if the option is not provided when the command is run. The default can be global (as it is here) or it can be specified for a specific command in the command section (as in [Example 1](#example-1-hypothetical-command-line-only-option) above). - `allow-list` - lists the allowable values for the option for all commands for which the option is valid. - `command` - list each command for which the option is valid. If a command is not listed, then the option is not valid for the command and an error will be thrown if it is attempted to be used for that command. In this case the valid commands are `backup` and `restore`. - `command-role` - defines the processes for which the option is valid. `main` indicates the option will be used by the main process and not be passed on to other local/remote processes. When `test.pl` is run the `config.auto.h` file will be generated to contain the constants used for options in the code. For the C enums, any dashes in the option name will be removed, camel-cased and prefixed with `cfgOpt`, e.g. `repo-path` becomes `cfgOptRepoPath`. ### help.xml All options must be documented or the system will error during the build. To add an option, find the command section identified by `command id="COMMAND"` section where `COMMAND` is the name of the command (e.g. `expire`) or, if the option is used by more than one command and the definition for the option is the same for all of the commands, the `operation-general title="General Options"` section. To add an option, add the following to the `` section; if it does not exist, then wrap the following in `` ``. This example uses the boolean option `force` of the `restore` command. Simply replace that with your new option and the appropriate `summary`, `text` and `example`. ``` ``` > **IMPORTANT:** A period (.) is required to end the `summary` section. ### Testing the help It is important to run the `help` command unit test after adding an option in case a change is required: ``` pgbackrest/test/test.pl --module=command --test=help --vm-out ``` To verify the `help` command output, build the pgBackRest executable: ``` pgbackrest/test/test.pl --build-only ``` Use the pgBackRest executable to test the help output: ``` test/bin/none/pgbackrest help backup repo-type ``` ### Testing the documentation To quickly view the HTML documentation, the `--no-exe` option can be passed to the documentation generator in order to bypass executing the code elements: ``` pgbackrest/doc/doc.pl --out=html --no-exe ``` The generated HTML files will be placed in the `doc/output/html` directory where they can be viewed locally in a browser. If Docker is installed, it will be used by the documentation generator to execute the code elements while building the documentation, therefore, the `--no-exe` should be omitted, (i.e. `pgbackrest/doc/doc.pl --output=html`). `--no-cache` may be used to force a full build even when no code elements have changed since the last build. `--pre` will reuse the container definitions from the prior build and saves time during development. The containers created for documentation builds can be useful for manually testing or trying out new code or features. The following demonstrates building through just the `quickstart` section of the `user-guide` without encryption. ``` pgbackrest/doc/doc.pl --out=html --include=user-guide --require=/quickstart --var=encrypt=n --no-cache --pre ``` The resulting Docker containers can be listed with `docker ps` and the container can be entered with `docker exec doc-pg-primary bash`. Additionally, the `-u` option can be added for entering the container as a specific user (e.g. `postgres`). ## Submitting a Pull Request Before submitting a Pull Request: - Does it meet the [coding standards](https://github.com/pgbackrest/pgbackrest/blob/main/CODING.md)? - Have [Unit Tests](#writing-a-unit-test) been written and [run](#running-a-unit-test) with 100% coverage? - If your submission includes changes to the help or online documentation, have the [help](#testing-the-help) and [documentation](#testing-the-documentation) tests been run? - Has it passed continuous integration testing? Simply renaming your branch with the appendix `-cig` and pushing it to your GitHub account will initiate GitHub Actions to run CI tests. When submitting a Pull Request: - Provide a short submission title. - Write a detailed comment to describe the purpose of your submission and any issue(s), if any, it is resolving; a link to the GitHub issue is also helpful. - Select the `integration` branch as the base for your PR, do not select `main` nor any other branch. After submitting a Pull Request: - One or more reviewers will be assigned. - Respond to any issues (conversations) in GitHub but do not resolve the conversation; the reviewer is responsible for ensuring the issue raised has been resolved and marking the conversation resolved. It is helpful to supply the commit in your reply if one was submitted to fix the issue. Lastly, thank you for contributing to pgBackRest! pgbackrest-release-2.55.1/LICENSE000066400000000000000000000022201500617037600163550ustar00rootroot00000000000000The MIT License (MIT) Portions Copyright (c) 2015-2025, The PostgreSQL Global Development Group Portions Copyright (c) 2013-2025, David Steele Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pgbackrest-release-2.55.1/README.md000066400000000000000000000223751500617037600166440ustar00rootroot00000000000000# pgBackRest
Reliable PostgreSQL Backup & Restore ## Introduction pgBackRest is a reliable backup and restore solution for PostgreSQL that seamlessly scales up to the largest databases and workloads. pgBackRest [v2.55.1](https://github.com/pgbackrest/pgbackrest/releases/tag/release/2.55.1) is the current stable release. Release notes are on the [Releases](http://www.pgbackrest.org/release.html) page. Please give us a star on [GitHub](https://github.com/pgbackrest/pgbackrest) if you like pgBackRest! ## Features ### Parallel Backup & Restore Compression is usually the bottleneck during backup operations so pgBackRest solves this problem with parallel processing and more efficient compression algorithms such as lz4 and zstd. ### Local or Remote Operation A custom protocol allows pgBackRest to backup, restore, and archive locally or remotely via TLS/SSH with minimal configuration. An interface to query PostgreSQL is also provided via the protocol layer so that remote access to PostgreSQL is never required, which enhances security. ### Multiple Repositories Multiple repositories allow, for example, a local repository with minimal retention for fast restores and a remote repository with a longer retention for redundancy and access across the enterprise. ### Full, Differential, & Incremental Backups (at File or Block Level) Full, differential, and incremental backups are supported. pgBackRest is not susceptible to the time resolution issues of rsync, making differential and incremental backups safe without the requirement to checksum each file. Block-level backups save space by only copying the parts of files that have changed. ### Backup Rotation & Archive Expiration Retention polices can be set for full and differential backups to create coverage for any time frame. The WAL archive can be maintained for all backups or strictly for the most recent backups. In the latter case WAL required to make older backups consistent will be maintained in the archive. ### Backup Integrity Checksums are calculated for every file in the backup and rechecked during a restore or verify. After a backup finishes copying files, it waits until every WAL segment required to make the backup consistent reaches the repository. Backups in the repository may be stored in the same format as a standard PostgreSQL cluster (including tablespaces). If compression is disabled and hard links are enabled it is possible to snapshot a backup in the repository and bring up a PostgreSQL cluster directly on the snapshot. This is advantageous for terabyte-scale databases that are time consuming to restore in the traditional way. All operations utilize file and directory level fsync to ensure durability. ### Page Checksums If page checksums are enabled pgBackRest will validate the checksums for every file that is copied during a backup. All page checksums are validated during a full backup and checksums in files that have changed are validated during differential and incremental backups. Validation failures do not stop the backup process, but warnings with details of exactly which pages have failed validation are output to the console and file log. This feature allows page-level corruption to be detected early, before backups that contain valid copies of the data have expired. ### Backup Resume An interrupted backup can be resumed from the point where it was stopped. Files that were already copied are compared with the checksums in the manifest to ensure integrity. Since this operation can take place entirely on the repository host, it reduces load on the PostgreSQL host and saves time since checksum calculation is faster than compressing and retransmitting data. ### Streaming Compression & Checksums Compression and checksum calculations are performed in stream while files are being copied to the repository, whether the repository is located locally or remotely. If the repository is on a repository host, compression is performed on the PostgreSQL host and files are transmitted in a compressed format and simply stored on the repository host. When compression is disabled a lower level of compression is utilized to make efficient use of available bandwidth while keeping CPU cost to a minimum. ### Delta Restore The manifest contains checksums for every file in the backup so that during a restore it is possible to use these checksums to speed processing enormously. On a delta restore any files not present in the backup are first removed and then checksums are generated for the remaining files. Files that match the backup are left in place and the rest of the files are restored as usual. Parallel processing can lead to a dramatic reduction in restore times. ### Parallel, Asynchronous WAL Push & Get Dedicated commands are included for pushing WAL to the archive and getting WAL from the archive. Both commands support parallelism to accelerate processing and run asynchronously to provide the fastest possible response time to PostgreSQL. WAL push automatically detects WAL segments that are pushed multiple times and de-duplicates when the segment is identical, otherwise an error is raised. Asynchronous WAL push allows transfer to be offloaded to another process which compresses WAL segments in parallel for maximum throughput. This can be a critical feature for databases with extremely high write volume. Asynchronous WAL get maintains a local queue of WAL segments that are decompressed and ready for replay. This reduces the time needed to provide WAL to PostgreSQL which maximizes replay speed. Higher-latency connections and storage (such as S3) benefit the most. The push and get commands both ensure that the database and repository match by comparing PostgreSQL versions and system identifiers. This virtually eliminates the possibility of misconfiguring the WAL archive location. ### Tablespace & Link Support Tablespaces are fully supported and on restore tablespaces can be remapped to any location. It is also possible to remap all tablespaces to one location with a single command which is useful for development restores. File and directory links are supported for any file or directory in the PostgreSQL cluster. When restoring it is possible to restore all links to their original locations, remap some or all links, or restore some or all links as normal files or directories within the cluster directory. ### S3, Azure, and GCS Compatible Object Store Support pgBackRest repositories can be located in S3, Azure, and GCS compatible object stores to allow for virtually unlimited capacity and retention. ### Encryption pgBackRest can encrypt the repository to secure backups wherever they are stored. ### Compatibility with ten versions of PostgreSQL pgBackRest includes support for ten versions of PostgreSQL, the five supported versions and the last five EOL versions. This allows ample time to upgrade to a supported version. ## Getting Started pgBackRest strives to be easy to configure and operate: - [User guides](http://www.pgbackrest.org/user-guide-index.html) for various operating systems and PostgreSQL versions. - [Command reference](http://www.pgbackrest.org/command.html) for command-line operations. - [Configuration reference](http://www.pgbackrest.org/configuration.html) for creating pgBackRest configurations. Documentation for v1 can be found [here](http://www.pgbackrest.org/1). No further releases are planned for v1 because v2 is backward-compatible with v1 options and repositories. ## Contributions Contributions to pgBackRest are always welcome! Please see our [Contributing Guidelines](https://github.com/pgbackrest/pgbackrest/blob/main/CONTRIBUTING.md) for details on how to contribute features, improvements or issues. ## Support pgBackRest is completely free and open source under the [MIT](https://github.com/pgbackrest/pgbackrest/blob/main/LICENSE) license. You may use it for personal or commercial purposes without any restrictions whatsoever. Bug reports are taken very seriously and will be addressed as quickly as possible. Creating a robust disaster recovery policy with proper replication and backup strategies can be a very complex and daunting task. You may find that you need help during the architecture phase and ongoing support to ensure that your enterprise continues running smoothly. [Crunchy Data](http://www.crunchydata.com) provides packaged versions of pgBackRest for major operating systems and expert full life-cycle commercial support for pgBackRest and all things PostgreSQL. [Crunchy Data](http://www.crunchydata.com) is committed to providing open source solutions with no vendor lock-in, ensuring that cross-compatibility with the community version of pgBackRest is always strictly maintained. Please visit [Crunchy Data](http://www.crunchydata.com) for more information. ## Recognition Primary recognition goes to Stephen Frost for all his valuable advice and criticism during the development of pgBackRest. [Crunchy Data](http://www.crunchydata.com) has contributed significant time and resources to pgBackRest and continues to actively support development. [Resonate](http://www.resonate.com) also contributed to the development of pgBackRest and allowed early (but well tested) versions to be installed as their primary PostgreSQL backup solution. [Armchair](https://thenounproject.com/search/?q=lounge+chair&i=129971) graphic by [Sandor Szabo](https://thenounproject.com/sandorsz). pgbackrest-release-2.55.1/doc/000077500000000000000000000000001500617037600161215ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/.gitignore000066400000000000000000000000101500617037600201000ustar00rootroot00000000000000output/ pgbackrest-release-2.55.1/doc/NEWS.md000066400000000000000000000065711500617037600172300ustar00rootroot00000000000000**June 12, 2023**: [Crunchy Data](https://www.crunchydata.com) is pleased to announce the release of [pgBackRest](https://pgbackrest.org/) 2.46, the latest version of the reliable, easy-to-use backup and restore solution that can seamlessly scale up to the largest databases and workloads. Over the last year pgBackRest has introduced many exciting new features including block incremental backup, file bundling, repository verification, backup annotations, and SFTP repository storage. IMPORTANT NOTE: pgBackRest 2.44 is the last version to support PostgreSQL 9.0/9.1/9.2. pgBackRest supports a robust set of features for managing your backup and recovery infrastructure, including: parallel backup/restore, full/differential/incremental backups, block incremental backup, multiple repositories, delta restore, parallel asynchronous archiving, per-file checksums, page checksums (when enabled) validated during backup, multiple compression types, encryption, partial/failed backup resume, backup from standby, tablespace and link support, S3/Azure/GCS/SFTP support, backup expiration, local/remote operation via SSH or TLS, flexible configuration, and more. pgBackRest can be installed from the [PostgreSQL Yum Repository](https://yum.postgresql.org/) or the [PostgreSQL APT Repository](https://apt.postgresql.org). Source code can be downloaded from [releases](https://github.com/pgbackrest/pgbackrest/releases). ## Major New Features ### Block Incremental Backup Block incremental backup saves space in the repository by only storing file parts that have changed since the prior backup. In addition to space savings, this feature makes backup faster since there is less data to compress and transfer. Delta restore is also improved because less data from the repository is required to restore files. See [User Guide](https://pgbackrest.org/user-guide-rhel.html#backup/block) and [pgBackRest File Bundling and Block Incremental Backup](https://www.crunchydata.com/blog/pgbackrest-file-bundling-and-block-incremental-backup). ### File Bundling File bundling combines smaller files to improve the efficiency of repository reads and writes, especially on object stores such as S3, Azure, and GCS. Zero-length files are stored only in the manifest. See [User Guide](https://pgbackrest.org/user-guide-rhel.html#backup/bundle) and [pgBackRest File Bundling and Block Incremental Backup](https://www.crunchydata.com/blog/pgbackrest-file-bundling-and-block-incremental-backup). ### Verify The `verify` command checks that files in the repository have not been lost or corrupted and generates a report when problems are found. See [Command Reference](https://pgbackrest.org/command.html#command-verify). ### Backup Key/Value Annotations Backup annotations allow custom annotations to be stored with a backup and queried with the `info` command. See [User Guide](https://pgbackrest.org/user-guide-rhel.html#backup/annotate). ### SFTP Repository Storage Repositories can now be stored on an SFTP server. See [User Guide](https://pgbackrest.org/user-guide-rhel.html#sftp-support). ## Links - [Website](https://pgbackrest.org) - [User Guides](https://pgbackrest.org/user-guide-index.html) - [Release Notes](https://pgbackrest.org/release.html) - [Support](http://pgbackrest.org/#support) [Crunchy Data](https://www.crunchydata.com) is proud to support the development and maintenance of [pgBackRest](https://github.com/pgbackrest/pgbackrest). pgbackrest-release-2.55.1/doc/README.md000066400000000000000000000070371500617037600174070ustar00rootroot00000000000000# pgBackRest
Building Documentation ## General Builds The pgBackRest documentation can output a variety of formats and target several platforms and PostgreSQL versions. This will build all documentation with defaults: ```bash ./doc.pl ``` The user guide can be built for `rhel` and `debian`. This will build the HTML user guide for RHEL: ```bash ./doc.pl --out=html --include=user-guide --var=os-type=rhel ``` Documentation generation will build a cache of all executed statements and use the cache to build the documentation quickly if no executed statements have changed. This makes proofing text-only edits very fast, but sometimes it is useful to do a full build without using the cache: ```bash ./doc.pl --out=html --include=user-guide --var=os-type=rhel --no-cache ``` Each `os-type` has a default container image that will be used as a base for creating hosts but it may be useful to change the image. ```bash ./doc.pl --out=html --include=user-guide --var=os-type=debian --var=os-image=debian:9 ./doc.pl --out=html --include=user-guide --var=os-type=rhel --var=os-image=centos:7 ``` The following is a sample RHEL 7 configuration that can be used for building the documentation. ```bash # Install docker sudo yum install -y yum-utils device-mapper-persistent-data lvm2 sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo sudo yum install -y docker-ce sudo systemctl start docker # Install tools sudo yum install -y git wget # Install latex (for building PDF) sudo yum install -y texlive texlive-titlesec texlive-sectsty texlive-framed texlive-epstopdf ghostscript # Install Perl modules via CPAN that do not have packages sudo yum install -y yum cpanminus sudo yum groupinstall -y "Development Tools" "Development Libraries" sudo cpanm install --force XML::Checker::Parser # Add documentation test user sudo groupadd test sudo adduser -gtest -n testdoc sudo usermod -aG docker testdoc ``` ## Building with Packages A user-specified package can be used when building the documentation. Since the documentation exercises most pgBackRest functionality this is a great way to smoke-test packages. The package must be located within the pgBackRest repo and the specified path should be relative to the repository base. `test/package` is a good default path to use. Ubuntu 16.04: ```bash ./doc.pl --out=html --include=user-guide --no-cache --var=os-type=debian --var=os-image=ubuntu:16.04 --var=package=test/package/pgbackrest_2.08-0_amd64.deb ``` RHEL 7: ```bash ./doc.pl --out=html --include=user-guide --no-cache --var=os-type=rhel --var=os-image=centos:7 --var=package=test/package/pgbackrest-2.08-1.el7.x86_64.rpm ``` RHEL 8: ```bash ./doc.pl --out=html --include=user-guide --no-cache --var=os-type=rhel --var=os-image=centos:8 --var=package=test/package/pgbackrest-2.08-1.el8.x86_64.rpm ``` Packages can be built with `test.pl` using the following configuration on top of the configuration given for building the documentation. ```bash # Install recent git sudo yum remove -y git sudo yum install -y https://centos7.iuscommunity.org/ius-release.rpm sudo yum install -y git2u-all # Install Perl modules sudo yum install -y perl-ExtUtils-ParseXS perl-ExtUtils-Embed perl-ExtUtils-MakeMaker perl-YAML-LibYAML # Install dev libraries sudo yum install -y libxml2-devel openssl-devel # Add test user with sudo privileges sudo adduser -gtest -n test sudo usermod -aG docker test sudo chmod 750 /home/test sudo echo 'test ALL=(ALL) NOPASSWD: ALL' > /etc/sudoers.d/pgbackrest # Add pgbackrest user required by tests sudo adduser -gtest -n pgbackrest ``` pgbackrest-release-2.55.1/doc/RELEASE.md000066400000000000000000000152561500617037600175340ustar00rootroot00000000000000# Release Build Instructions ## Update CI container builds If there have been PostgreSQL minor releases since the last pgBackRest release then update the CI containers to include the latest releases. This should be committed before the release. ## Create a branch to test the release ``` git checkout -b release-ci ``` ## Update the date, version, and release title Edit the latest release in `doc/xml/release.xml`, e.g.: ``` ``` to: ``` ``` Edit version in `src/version.h`, e.g.: ``` #define PROJECT_VERSION "2.14dev" ``` to: ``` #define PROJECT_VERSION "2.14.0" ``` ## Update code counts ``` pgbackrest/test/test.pl --code-count ``` ## Build release documentation. Be sure to install latex using the instructions from the Vagrantfile before running this step. ``` pgbackrest/doc/release.pl --build ``` ## Commit release branch and push to CI for testing ``` git commit -m "Release test" git push origin release-ci ``` ## Run Coverity - Prepare Coverity build directory (update version/paths as required): ``` mkdir coverity tar -xvf ~/Downloads/cov-analysis-linux-arm64-2024.6.1.tar.gz --strip-components=1 -C ~/coverity export COVERITY_TOKEN=? export COVERITY_EMAIL=? export COVERITY_VERSION=? ``` - Clean directories and run Coverity: ``` rm -rf .cache/ccache && rm -rf build && rm -rf pgbackrest.tgz && rm -rf cov-int meson setup -Dwerror=true -Dfatal-errors=true -Dbuildtype=debug build pgbackrest coverity/bin/cov-build --dir cov-int ninja -C build tar czvf pgbackrest.tgz cov-int ``` - Upload results: ``` curl --form token=${COVERITY_TOKEN?} --form email="${COVERITY_EMAIL?}" --form file=@pgbackrest.tgz \ --form version="${COVERITY_VERSION?}" --form description="dev build" \ "https://scan.coverity.com/builds?project=pgbackrest%2Fpgbackrest" ``` Check issues at https://scan.coverity.com/projects/pgbackrest-pgbackrest then fix and repeat Coverity runs as needed. ## Perform stress testing on release - Build the documentation with stress testing enabled: ``` pgbackrest/doc/doc.pl --out=html --include=user-guide --require=/stress --var=stress=y --var=stress-scale-table=100 --var=stress-scale-data=1000 --pre --no-cache ``` During data load the archive-push and archive-get processes can be monitored with: ``` docker exec -it doc-pg-primary tail -f /var/log/pgbackrest/demo-archive-push-async.log docker exec -it doc-pg-standby tail -f /var/log/pgbackrest/demo-archive-get-async.log ``` During backup/restore the processes can be monitored with: ``` docker exec -it doc-repository tail -f /var/log/pgbackrest/demo-backup.log docker exec -it doc-pg-standby tail -f /var/log/pgbackrest/demo-restore.log ``` Processes can generally be monitored using 'top'. Once `top` is running, press `o` then enter `COMMAND=pgbackrest`. This will filter output to pgbackrest processes. - Check for many log entries in the `archive-push`/`archive-get` logs to ensure async archiving was enabled: ``` docker exec -it doc-pg-primary vi /var/log/pgbackrest/demo-archive-push-async.log docker exec -it doc-pg-standby vi /var/log/pgbackrest/demo-archive-get-async.log ``` - Check the backup log to ensure the correct tables/data were created and backed up. It should look something like: ``` INFO: full backup size = 14.9GB, file total = 101004 ``` - Check the restore log to ensure the correct tables/data were restored. The size and file total should match exactly. ## Clone web documentation into `doc/site` ``` cd pgbackrest/doc git clone git@github.com:pgbackrest/website.git site ``` ## Deploy web documentation to `doc/site` ``` pgbackrest/doc/release.pl --deploy ``` ## Final commit of release to integration Create release notes based on the pattern in prior git commits (this should be automated at some point), e.g. ``` v2.14.0: Bug Fix and Improvements Bug Fixes: * Fix segfault when process-max > 8 for archive-push/archive-get. (Reported by User.) Improvements: * Bypass database checks when stanza-delete issued with force. (Contributed by User. Suggested by User.) * Add configure script for improved multi-platform support. Documentation Features: * Add user guide for Debian. ``` Commit to integration with the above message and push to CI. ## Push to main Push release commit to main once CI testing is complete. ## Create release on github Create release notes based on pattern in prior releases (this should be automated at some point), e.g. ``` v2.14.0: Bug Fix and Improvements **Bug Fixes**: - Fix segfault when process-max > 8 for archive-push/archive-get. (Reported by User.) **Improvements**: - Bypass database checks when stanza-delete issued with force. (Contributed by User. Suggested by User.) - Add configure script for improved multi-platform support. **Documentation Features**: - Add user guide for Debian. ``` The first line will be the release title and the rest will be the body. The tag field should be updated with the current version so a tag is created from main. **Be sure to select the release commit explicitly rather than auto-tagging the last commit in main!** ## Push web documentation to main and deploy ``` cd pgbackrest/doc/site git commit -m "v2.14.0 documentation." git push origin main ``` Deploy the documentation on `pgbackrest.org`. ## Notify packagers of new release Notify the Debian packagers by email and RHEL packagers at https://github.com/pgdg-packaging/pgdg-rpms/issues. ## Announce release on Twitter ## Publish a postgresql.org news item when there are major new features Start from NEWS.md and update with the new date, version, and interesting features added since the last release. News items are automatically sent to the `pgsql-announce` mailing list once they have been approved. ## Update PostgreSQL ecosystem wiki Update version, date, and minimum supported version (when changed): https://wiki.postgresql.org/wiki/Ecosystem:Backup#pgBackRest ## Prepare for the next release Add new release in `doc/xml/release.xml`, e.g.: ``` ``` Edit version in `src/version.h`, e.g.: ``` #define PROJECT_VERSION "2.14.0" ``` to: ``` #define PROJECT_VERSION "2.15dev" ``` Run deploy to generate git history (ctrl-c as soon as the file is generated): ``` pgbackrest/doc/release.pl --build ``` Run code count to add new release file: ``` pgbackrest/test/test.pl --code-count ``` Commit and push to integration: ``` git commit -m "Begin v2.15.0 development." git push origin integration ``` pgbackrest-release-2.55.1/doc/doc.pl000077500000000000000000000314521500617037600172330ustar00rootroot00000000000000#!/usr/bin/perl #################################################################################################################################### # doc.pl - PgBackRest Doc Builder #################################################################################################################################### #################################################################################################################################### # Perl includes #################################################################################################################################### use strict; use warnings FATAL => qw(all); use Carp qw(confess); use English '-no_match_vars'; $SIG{__DIE__} = sub { Carp::confess @_ }; use Cwd qw(abs_path); use File::Basename qw(dirname); use Getopt::Long qw(GetOptions); use Pod::Usage qw(pod2usage); use Storable; use lib dirname(abs_path($0)) . '/lib'; use lib dirname(dirname(abs_path($0))) . '/lib'; use lib dirname(dirname(abs_path($0))) . '/build/lib'; use lib dirname(dirname(abs_path($0))) . '/test/lib'; use pgBackRestTest::Common::ExecuteTest; use pgBackRestTest::Common::Storage; use pgBackRestTest::Common::StoragePosix; use pgBackRestDoc::Common::Doc; use pgBackRestDoc::Common::DocConfig; use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::DocRender; use pgBackRestDoc::Common::Exception; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::Html::DocHtmlSite; use pgBackRestDoc::Latex::DocLatex; use pgBackRestDoc::Markdown::DocMarkdown; use pgBackRestDoc::ProjectInfo; #################################################################################################################################### # Usage #################################################################################################################################### =head1 NAME doc.pl - Generate pgBackRest documentation =head1 SYNOPSIS doc.pl [options] General Options: --help Display usage and exit --version Display pgBackRest version --quiet Sets log level to ERROR --log-level Log level for execution (e.g. ERROR, WARN, INFO, DEBUG) --deploy Write exe.cache into resource for persistence --no-exe Should commands be executed when building help? (for testing only) --no-cache Don't use execution cache --cache-only Only use the execution cache - don't attempt to generate it --pre Pre-build containers for execute elements marked pre --var Override defined variable --key-var Override defined variable and use in cache key --doc-path Document path to render (manifest.xml should be located here) --out Output types (html, pdf, markdown) --out-preserve Don't clean output directory --require Require only certain sections of the document (to speed testing) --include Include source in generation (links will reference website) --exclude Exclude source from generation (links will reference website) Variable Options: --dev Set 'dev' variable to 'y' --debug Set 'debug' variable to 'y' =cut #################################################################################################################################### # Load command line parameters and config (see usage above for details) #################################################################################################################################### my $bHelp = false; my $bVersion = false; my $bQuiet = false; my $strLogLevel = 'info'; my $bNoExe = false; my $bNoCache = false; my $bCacheOnly = false; my $rhVariableOverride = {}; my $rhKeyVariableOverride = {}; my $strDocPath; my @stryOutput; my $bOutPreserve = false; my @stryRequire; my @stryInclude; my @stryExclude; my $bDeploy = false; my $bDev = false; my $bDebug = false; my $bPre = false; GetOptions ('help' => \$bHelp, 'version' => \$bVersion, 'quiet' => \$bQuiet, 'log-level=s' => \$strLogLevel, 'out=s@' => \@stryOutput, 'out-preserve' => \$bOutPreserve, 'require=s@' => \@stryRequire, 'include=s@' => \@stryInclude, 'exclude=s@' => \@stryExclude, 'no-exe', \$bNoExe, 'deploy', \$bDeploy, 'no-cache', \$bNoCache, 'dev', \$bDev, 'debug', \$bDebug, 'pre', \$bPre, 'cache-only', \$bCacheOnly, 'key-var=s%', $rhKeyVariableOverride, 'var=s%', $rhVariableOverride, 'doc-path=s', \$strDocPath) or pod2usage(2); #################################################################################################################################### # Run in eval block to catch errors #################################################################################################################################### eval { # Display version and exit if requested if ($bHelp || $bVersion) { print PROJECT_NAME . ' ' . PROJECT_VERSION . " Documentation Builder\n"; if ($bHelp) { print "\n"; pod2usage(); } exit 0; } # Disable cache when no exe if ($bNoExe) { $bNoCache = true; } # Make sure options are set correctly for deploy if ($bDeploy) { my $strError = 'cannot be specified for deploy'; !$bNoExe or confess "--no-exe ${strError}"; !@stryRequire or confess "--require ${strError}"; } # one --include must be specified when --required is if (@stryRequire && @stryInclude != 1) { confess "one --include is required when --require is specified"; } # Set console log level if ($bQuiet) { $strLogLevel = 'error'; } # If --dev passed then set the dev var to 'y' if ($bDev) { $rhVariableOverride->{'dev'} = 'y'; } # If --debug passed then set the debug var to 'y' if ($bDebug) { $rhVariableOverride->{'debug'} = 'y'; } # Doesn't make sense to pass include and exclude if (@stryInclude > 0 && @stryExclude > 0) { confess "cannot specify both --include and --exclude"; } logLevelSet(undef, uc($strLogLevel), OFF); # Get the base path my $strBasePath = abs_path(dirname($0)); my $oStorageDoc = new pgBackRestTest::Common::Storage( $strBasePath, new pgBackRestTest::Common::StoragePosix({bFileSync => false, bPathSync => false})); if (!defined($strDocPath)) { $strDocPath = $strBasePath; } my $strOutputPath = "${strDocPath}/output"; # Create the out path if it does not exist if (!-e $strOutputPath) { mkdir($strOutputPath) or confess &log(ERROR, "unable to create path ${strOutputPath}"); } # Merge key variables into the variable list and ensure there are no duplicates foreach my $strKey (sort(keys(%{$rhKeyVariableOverride}))) { if (defined($rhVariableOverride->{$strKey})) { confess &log(ERROR, "'${strKey}' cannot be passed as --var and --key-var"); } $rhVariableOverride->{$strKey} = $rhKeyVariableOverride->{$strKey}; } # Build C code my $strBuildPath = "${strBasePath}/output/build"; my $strRepoPath = dirname($strBasePath); my $strBuildNinja = "${strBuildPath}/build.ninja"; &log(INFO, "build C helper"); if (!-e $strBuildNinja) { executeTest("meson setup -Dwerror=true -Dfatal-errors=true -Dbuildtype=debug ${strBuildPath} ${strRepoPath}"); } executeTest("ninja -C ${strBuildPath} doc/src/doc-pgbackrest"); executeTest( "${strBuildPath}/doc/src/doc-pgbackrest --repo-path=${strRepoPath}" . ($strLogLevel ne 'info' ? " --log-level=${strLogLevel}" : ''), {bShowOutputAsync => true}); # Load the manifest my $oManifest = new pgBackRestDoc::Common::DocManifest( $oStorageDoc, \@stryRequire, \@stryInclude, \@stryExclude, $rhKeyVariableOverride, $rhVariableOverride, $strDocPath, $bDeploy, $bCacheOnly, $bPre); if (!$bNoCache) { $oManifest->cacheRead(); } # If no outputs were given if (@stryOutput == 0) { @stryOutput = $oManifest->renderList(); if ($oManifest->isBackRest()) { push(@stryOutput, 'man'); } } # Build host containers if (!$bCacheOnly && !$bNoExe) { foreach my $strSource ($oManifest->sourceList()) { if ((@stryInclude == 0 || grep(/$strSource/, @stryInclude)) && !grep(/$strSource/, @stryExclude)) { &log(INFO, "source $strSource"); foreach my $oHostDefine ($oManifest->sourceGet($strSource)->{doc}->nodeList('host-define', false)) { if ($oManifest->evaluateIf($oHostDefine)) { my $strImage = $oManifest->variableReplace($oHostDefine->paramGet('image')); my $strFrom = $oManifest->variableReplace($oHostDefine->paramGet('from')); my $strDockerfile = "${strOutputPath}/doc-host.dockerfile"; &log(INFO, "Build vm '${strImage}' from '${strFrom}'"); $oStorageDoc->put( $strDockerfile, "FROM ${strFrom}\n\n" . trim($oManifest->variableReplace($oHostDefine->valueGet())) . "\n"); executeTest("docker build -f ${strDockerfile} -t ${strImage} ${strBasePath}", {bSuppressStdErr => true}); } } } } } # Render output for my $strOutput (@stryOutput) { &log(INFO, "render ${strOutput} output"); # Man output has already been generated in C so do not remove it next if ($strOutput eq 'man'); # Clean contents of out directory if (!$bOutPreserve) { my $strOutputPath = $strOutput eq 'pdf' ? "${strOutputPath}/latex" : "${strOutputPath}/$strOutput"; # Clean the current out path if it exists if (-e $strOutputPath) { executeTest("rm -rf ${strOutputPath}/*"); } # Else create the html path else { mkdir($strOutputPath) or confess &log(ERROR, "unable to create path ${strOutputPath}"); } } $oManifest->renderGet($strOutput); if ($strOutput eq 'markdown') { my $oMarkdown = new pgBackRestDoc::Markdown::DocMarkdown ( $oManifest, "${strBasePath}/xml", "${strOutputPath}/markdown", !$bNoExe ); $oMarkdown->process(); } elsif ($strOutput eq 'html') { my $oHtmlSite = new pgBackRestDoc::Html::DocHtmlSite ( $oManifest, "${strBasePath}/xml", "${strOutputPath}/html", "${strBasePath}/resource/html/default.css", defined($oManifest->variableGet('project-favicon')) ? "${strBasePath}/resource/html/" . $oManifest->variableGet('project-favicon') : undef, defined($oManifest->variableGet('project-logo')) ? "${strBasePath}/resource/" . $oManifest->variableGet('project-logo') : undef, !$bNoExe ); $oHtmlSite->process(); } elsif ($strOutput eq 'pdf') { my $oLatex = new pgBackRestDoc::Latex::DocLatex ( $oManifest, "${strBasePath}/xml", "${strOutputPath}/latex", "${strBasePath}/resource/latex/preamble.tex", !$bNoExe ); $oLatex->process(); } } # Cache the manifest (mostly useful for testing rendering changes in the code) if (!$bNoCache && !$bCacheOnly) { $oManifest->cacheWrite(); } # Exit with success exit 0; } #################################################################################################################################### # Check for errors #################################################################################################################################### or do { # If a backrest exception then return the code exit $EVAL_ERROR->code() if (isException(\$EVAL_ERROR)); # Else output the unhandled error print $EVAL_ERROR; exit ERROR_UNHANDLED; }; # It shouldn't be possible to get here &log(ASSERT, 'execution reached invalid location in ' . __FILE__ . ', line ' . __LINE__); exit ERROR_ASSERT; pgbackrest-release-2.55.1/doc/example/000077500000000000000000000000001500617037600175545ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/example/pgsql-pgbackrest-info.sql000066400000000000000000000015471500617037600245060ustar00rootroot00000000000000-- An example of monitoring pgBackRest from within PostgreSQL -- -- Use copy to export data from the pgBackRest info command into the jsonb -- type so it can be queried directly by PostgreSQL. -- Create monitor schema create schema monitor; -- Get pgBackRest info in JSON format create function monitor.pgbackrest_info() returns jsonb AS $$ declare data jsonb; begin -- Create a temp table to hold the JSON data create temp table temp_pgbackrest_data (data text); -- Copy data into the table directly from the pgBackRest info command copy temp_pgbackrest_data (data) from program 'pgbackrest --output=json info' (format text); select replace(temp_pgbackrest_data.data, E'\n', '\n')::jsonb into data from temp_pgbackrest_data; drop table temp_pgbackrest_data; return data; end $$ language plpgsql; pgbackrest-release-2.55.1/doc/example/pgsql-pgbackrest-query.sql000066400000000000000000000011401500617037600247050ustar00rootroot00000000000000-- Get last successful backup for each stanza -- -- Requires the monitor.pgbackrest_info function. with stanza as ( select data->'name' as name, data->'backup'->( jsonb_array_length(data->'backup') - 1) as last_backup, data->'archive'->( jsonb_array_length(data->'archive') - 1) as current_archive from jsonb_array_elements(monitor.pgbackrest_info()) as data ) select name, to_timestamp( (last_backup->'timestamp'->>'stop')::numeric) as last_successful_backup, current_archive->>'max' as last_archived_wal from stanza; pgbackrest-release-2.55.1/doc/lib/000077500000000000000000000000001500617037600166675ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/000077500000000000000000000000001500617037600213425ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/000077500000000000000000000000001500617037600225725ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/Doc.pm000066400000000000000000000606301500617037600236420ustar00rootroot00000000000000#################################################################################################################################### # DOC MODULE #################################################################################################################################### package pgBackRestDoc::Common::Doc; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use English '-no_match_vars'; use Cwd qw(abs_path); use File::Basename qw(dirname); use Scalar::Util qw(blessed); use XML::Checker::Parser; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; $self->{strClass} = $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{strFileName}, my $strSgmlSearchPath, ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'strFileName', required => false}, {name => 'strSgmlSearchPath', required => false}, ); # Load the doc from a file if one has been defined if (defined($self->{strFileName})) { my $oParser = XML::Checker::Parser->new(ErrorContext => 2, Style => 'Tree'); $oParser->set_sgml_search_path( defined($strSgmlSearchPath) ? $strSgmlSearchPath : dirname(dirname(abs_path($0))) . '/doc/xml/dtd'); my $oTree; eval { local $XML::Checker::FAIL = sub { my $iCode = shift; die XML::Checker::error_string($iCode, @_); }; $oTree = $oParser->parsefile($self->{strFileName}); return true; } # Report any error that stopped parsing or do { my $strException = $EVAL_ERROR; $strException =~ s/at \/.*?$//s; # remove module line number die "malformed xml in '$self->{strFileName}':\n" . trim($strException); }; # Parse and build the doc $self->{oDoc} = $self->build($self->parse(${$oTree}[0], ${$oTree}[1])); } # Else create a blank doc else { $self->{oDoc} = {name => 'doc', children => []}; } $self->{strName} = 'root'; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # parse # # Parse the xml doc into a more usable hash and array structure. #################################################################################################################################### sub parse { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strName, $oyNode ) = logDebugParam ( __PACKAGE__ . '->parse', \@_, {name => 'strName', trace => true}, {name => 'oyNode', trace => true} ); my %oOut; my $iIndex = 0; my $bText = $strName eq 'text' || $strName eq 'p' || $strName eq 'title' || $strName eq 'summary' || $strName eq 'table-cell' || $strName eq 'table-column' || $strName eq 'list-item' || $strName eq 'admonition'; # Store the node name $oOut{name} = $strName; if (keys(%{$$oyNode[$iIndex]})) { $oOut{param} = $$oyNode[$iIndex]; } $iIndex++; # Look for strings and children while (defined($$oyNode[$iIndex])) { # Process string data if (ref(\$$oyNode[$iIndex]) eq 'SCALAR' && $$oyNode[$iIndex] eq '0') { $iIndex++; my $strBuffer = $$oyNode[$iIndex++]; # Strip tabs, CRs, and LFs $strBuffer =~ s/\t|\r//g; # If anything is left if (length($strBuffer) > 0) { # If text node then create array entries for strings if ($bText) { if (!defined($oOut{children})) { $oOut{children} = []; } push(@{$oOut{children}}, $strBuffer); } # Don't allow strings mixed with children elsif (length(trim($strBuffer)) > 0) { if (defined($oOut{children})) { confess "text mixed with children in node ${strName} (spaces count)"; } if (defined($oOut{value})) { confess "value is already defined in node ${strName} - this shouldn't happen"; } # Don't allow text mixed with $oOut{value} = $strBuffer; } } } # Process a child else { if (defined($oOut{value}) && $bText) { confess "text mixed with children in node ${strName} before child " . $$oyNode[$iIndex++] . " (spaces count)"; } if (!defined($oOut{children})) { $oOut{children} = []; } push(@{$oOut{children}}, $self->parse($$oyNode[$iIndex++], $$oyNode[$iIndex++])); } } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oDoc', value => \%oOut, trace => true} ); } #################################################################################################################################### # build # # Restructure the doc to make walking it easier. #################################################################################################################################### sub build { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oDoc ) = logDebugParam ( __PACKAGE__ . '->build', \@_, {name => 'oDoc', trace => true} ); # Initialize the node object my $oOut = {name => $$oDoc{name}, children => [], value => $$oDoc{value}}; my $strError = "in node $$oDoc{name}"; # Get all params if (defined($$oDoc{param})) { for my $strParam (keys %{$$oDoc{param}}) { $$oOut{param}{$strParam} = $$oDoc{param}{$strParam}; } } if ($$oDoc{name} eq 'p' || $$oDoc{name} eq 'title' || $$oDoc{name} eq 'summary' || $$oDoc{name} eq 'table-cell' || $$oDoc{name} eq 'table-column' || $$oDoc{name} eq 'list-item' || $$oDoc{name} eq 'admonition') { $$oOut{field}{text} = $oDoc; } elsif (defined($$oDoc{children})) { for (my $iIndex = 0; $iIndex < @{$$oDoc{children}}; $iIndex++) { my $oSub = $$oDoc{children}[$iIndex]; my $strName = $$oSub{name}; if ($strName eq 'text') { $$oOut{field}{text} = $oSub; } elsif ((defined($$oSub{value}) && !defined($$oSub{param})) && $strName ne 'code-block') { $$oOut{field}{$strName} = $$oSub{value}; } elsif (!defined($$oSub{children}) && !defined($$oSub{value}) && !defined($$oSub{param})) { $$oOut{field}{$strName} = true; } else { push(@{$$oOut{children}}, $self->build($oSub)); } } } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oDoc', value => $oOut, trace => true} ); } #################################################################################################################################### # nodeGetById # # Return a node by name - error if more than one exists #################################################################################################################################### sub nodeGetById { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strName, $strId, $bRequired, ) = logDebugParam ( __PACKAGE__ . 'nodeGetById', \@_, {name => 'strName', trace => true}, {name => 'strId', required => false, trace => true}, {name => 'bRequired', default => true, trace => true} ); my $oDoc = $self->{oDoc}; my $oNode; for (my $iIndex = 0; $iIndex < @{$$oDoc{children}}; $iIndex++) { if ((defined($strName) && $$oDoc{children}[$iIndex]{name} eq $strName) && (!defined($strId) || $$oDoc{children}[$iIndex]{param}{id} eq $strId)) { if (!defined($oNode)) { $oNode = $$oDoc{children}[$iIndex]; } else { confess "found more than one child ${strName} in node $$oDoc{name}"; } } } if (!defined($oNode) && $bRequired) { confess "unable to find child ${strName}" . (defined($strId) ? " (${strId})" : '') . " in node $$oDoc{name}"; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oNodeDoc', value => $self->nodeBless($oNode), trace => true} ); } #################################################################################################################################### # nodeGet # # Return a node by name - error if more than one exists #################################################################################################################################### sub nodeGet { my $self = shift; return $self->nodeGetById(shift, undef, shift); } #################################################################################################################################### # nodeTest # # Test that a node exists #################################################################################################################################### sub nodeTest { my $self = shift; return defined($self->nodeGetById(shift, undef, false)); } #################################################################################################################################### # nodeAdd # # Add a node to the current doc's child list #################################################################################################################################### sub nodeAdd { my $self = shift; my $strName = shift; my $strValue = shift; my $oParam = shift; my $oField = shift; my $oDoc = $self->{oDoc}; my $oNode = {name => $strName, value => $strValue, param => $oParam, field => $oField}; push(@{$$oDoc{children}}, $oNode); return $self->nodeBless($oNode); } #################################################################################################################################### # nodeBless # # Make a new Doc object from a node. #################################################################################################################################### sub nodeBless { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oNode ) = logDebugParam ( __PACKAGE__ . '->nodeBless', \@_, {name => 'oNode', required => false, trace => true} ); my $oDoc; if (defined($oNode)) { $oDoc = {}; bless $oDoc, $self->{strClass}; $oDoc->{strClass} = $self->{strClass}; $oDoc->{strName} = $$oNode{name}; $oDoc->{oDoc} = $oNode; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oDoc', value => $oDoc, trace => true} ); } #################################################################################################################################### # nodeList # # Get a list of nodes. #################################################################################################################################### sub nodeList { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strName, $bRequired, ) = logDebugParam ( __PACKAGE__ . '->nodeList', \@_, {name => 'strName', required => false, trace => true}, {name => 'bRequired', default => true, trace => true}, ); my $oDoc = $self->{oDoc}; my @oyNode; if (defined($$oDoc{children})) { for (my $iIndex = 0; $iIndex < @{$$oDoc{children}}; $iIndex++) { if (!defined($strName) || $$oDoc{children}[$iIndex]{name} eq $strName) { if (ref(\$$oDoc{children}[$iIndex]) eq "SCALAR") { push(@oyNode, $$oDoc{children}[$iIndex]); } else { push(@oyNode, $self->nodeBless($$oDoc{children}[$iIndex])); } } } } if (@oyNode == 0 && $bRequired) { confess 'unable to find ' . (defined($strName) ? "children named '${strName}'" : 'any children') . " in node $$oDoc{name}"; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oyNode', value => \@oyNode, trace => true} ); } #################################################################################################################################### # nodeRemove # # Remove a child node. #################################################################################################################################### sub nodeRemove { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oChildRemove ) = logDebugParam ( __PACKAGE__ . '->nodeRemove', \@_, {name => 'oChildRemove', required => false, trace => true} ); my $bRemove = false; my $oDoc = $self->{oDoc}; # Error if there are no children if (!defined($$oDoc{children})) { confess &log(ERROR, "node has no children"); } for (my $iIndex = 0; $iIndex < @{$$oDoc{children}}; $iIndex++) { if ($$oDoc{children}[$iIndex] == $oChildRemove->{oDoc}) { splice(@{$$oDoc{children}}, $iIndex, 1); $bRemove = true; last; } } if (!$bRemove) { confess &log(ERROR, "child was not found in node, could not be removed"); } # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # nodeReplace # # Replace a child node with one or more child nodes. #################################################################################################################################### sub nodeReplace { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oChildRemove, $oyChildReplace, ) = logDebugParam ( __PACKAGE__ . '->nodeReplace', \@_, {name => 'oChildRemove', trace => true}, {name => 'oChildReplace', trace => true}, ); my $bReplace = false; my $iReplaceIdx = undef; my $iReplaceTotal = undef; my $oDoc = $self->{oDoc}; # Error if there are no children if (!defined($$oDoc{children})) { confess &log(ERROR, "node has no children"); } for (my $iIndex = 0; $iIndex < @{$$oDoc{children}}; $iIndex++) { if ($$oDoc{children}[$iIndex] == $oChildRemove->{oDoc}) { splice(@{$$oDoc{children}}, $iIndex, 1); splice(@{$$oDoc{children}}, $iIndex, 0, @{$oyChildReplace}); $iReplaceIdx = $iIndex; $iReplaceTotal = scalar(@{$oyChildReplace}); $bReplace = true; last; } } if (!$bReplace) { confess &log(ERROR, "child was not found in node, could not be replaced"); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'iReplaceIdx', value => $iReplaceIdx, trace => true}, {name => 'iReplaceTotal', value => $iReplaceTotal, trace => true}, ); } #################################################################################################################################### # nameGet #################################################################################################################################### sub nameGet { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->nameGet'); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strName', value => ${$self->{oDoc}}{name}, trace => true} ); } #################################################################################################################################### # valueGet #################################################################################################################################### sub valueGet { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->valueGet'); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strValue', value => ${$self->{oDoc}}{value}, trace => true} ); } #################################################################################################################################### # valueSet #################################################################################################################################### sub valueSet { my $self = shift; my $strValue = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->valueSet'); # Set the value ${$self->{oDoc}}{value} = $strValue; # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # paramGet # # Get a parameter from a node. #################################################################################################################################### sub paramGet { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strName, $bRequired, $strDefault, $strType ) = logDebugParam ( __PACKAGE__ . '->paramGet', \@_, {name => 'strName', trace => true}, {name => 'bRequired', default => true, trace => true}, {name => 'strDefault', required => false, trace => true}, {name => 'strType', default => 'param', trace => true} ); my $strValue = ${$self->{oDoc}}{$strType}{$strName}; if (!defined($strValue)) { if ($bRequired) { confess "${strType} '${strName}' is required in node '$self->{strName}'"; } $strValue = $strDefault; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strValue', value => $strValue, trace => true} ); } #################################################################################################################################### # paramTest # # Test that a parameter exists or has a certain value. #################################################################################################################################### sub paramTest { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strName, $strExpectedValue, $strType ) = logDebugParam ( __PACKAGE__ . '->paramTest', \@_, {name => 'strName', trace => true}, {name => 'strExpectedValue', required => false, trace => true}, {name => 'strType', default => 'param', trace => true} ); my $bResult = true; my $strValue = $self->paramGet($strName, false, undef, $strType); if (!defined($strValue)) { $bResult = false; } elsif (defined($strExpectedValue) && $strValue ne $strExpectedValue) { $bResult = false; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'bResult', value => $bResult, trace => true} ); } #################################################################################################################################### # paramSet # # Set a parameter in a node. #################################################################################################################################### sub paramSet { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strName, $strValue, $strType ) = logDebugParam ( __PACKAGE__ . '->paramSet', \@_, {name => 'strName', trace => true}, {name => 'strValue', required => false, trace => true}, {name => 'strType', default => 'param', trace => true} ); ${$self->{oDoc}}{$strType}{$strName} = $strValue; # Return from function and log return values if any logDebugReturn($strOperation); } #################################################################################################################################### # fieldGet # # Get a field from a node. #################################################################################################################################### sub fieldGet { my $self = shift; return $self->paramGet(shift, shift, shift, 'field'); } #################################################################################################################################### # fieldTest # # Test if a field exists. #################################################################################################################################### sub fieldTest { my $self = shift; return $self->paramTest(shift, shift, 'field'); } #################################################################################################################################### # textGet # # Get a field from a node. #################################################################################################################################### sub textGet { my $self = shift; return $self->nodeBless($self->paramGet('text', shift, shift, 'field')); } #################################################################################################################################### # textSet # # Get a field from a node. #################################################################################################################################### sub textSet { my $self = shift; my $oText = shift; if (blessed($oText) && $oText->isa('pgBackRestDoc::Common::Doc')) { $oText = $oText->{oDoc}; } elsif (ref($oText) ne 'HASH') { $oText = {name => 'text', children => [$oText]}; } return $self->paramSet('text', $oText, 'field'); } #################################################################################################################################### # fieldSet # # Set a parameter in a node. #################################################################################################################################### sub fieldSet { my $self = shift; $self->paramSet(shift, shift, 'field'); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/DocConfig.pm000066400000000000000000000315331500617037600247700ustar00rootroot00000000000000#################################################################################################################################### # DOC CONFIG MODULE #################################################################################################################################### package pgBackRestDoc::Common::DocConfig; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::Custom::DocConfigData; use pgBackRestDoc::ProjectInfo; #################################################################################################################################### # Help types #################################################################################################################################### use constant CONFIG_HELP_COMMAND => 'command'; push @EXPORT, qw(CONFIG_HELP_COMMAND); use constant CONFIG_HELP_DESCRIPTION => 'description'; push @EXPORT, qw(CONFIG_HELP_DESCRIPTION); use constant CONFIG_HELP_INTERNAL => 'internal'; use constant CONFIG_HELP_OPTION => 'option'; push @EXPORT, qw(CONFIG_HELP_OPTION); use constant CONFIG_HELP_SECTION => 'section'; push @EXPORT, qw(CONFIG_HELP_SECTION); use constant CONFIG_HELP_SUMMARY => 'summary'; push @EXPORT, qw(CONFIG_HELP_SUMMARY); use constant CONFIG_HELP_SOURCE => 'source'; push @EXPORT, qw(CONFIG_HELP_SOURCE); use constant CONFIG_HELP_SOURCE_DEFAULT => 'default'; use constant CONFIG_HELP_SOURCE_SECTION => CONFIG_HELP_SECTION; use constant CONFIG_HELP_SOURCE_COMMAND => CONFIG_HELP_COMMAND; push @EXPORT, qw(CONFIG_HELP_SOURCE_COMMAND); #################################################################################################################################### # Config Section Types #################################################################################################################################### use constant CFGDEF_COMMAND => 'command'; use constant CFGDEF_GENERAL => 'general'; use constant CFGDEF_LOG => 'log'; use constant CFGDEF_REPOSITORY => 'repository'; #################################################################################################################################### # Option define hash #################################################################################################################################### my $rhConfigDefine = cfgDefine(); #################################################################################################################################### # Returns the option defines based on the command. #################################################################################################################################### sub docConfigCommandDefine { my $strOption = shift; my $strCommand = shift; if (defined($strCommand)) { return defined($rhConfigDefine->{$strOption}{&CFGDEF_COMMAND}) && defined($rhConfigDefine->{$strOption}{&CFGDEF_COMMAND}{$strCommand}) && ref($rhConfigDefine->{$strOption}{&CFGDEF_COMMAND}{$strCommand}) eq 'HASH' ? $rhConfigDefine->{$strOption}{&CFGDEF_COMMAND}{$strCommand} : undef; } return; } #################################################################################################################################### # Does the option have a default for this command? #################################################################################################################################### sub docConfigOptionDefault { my $strOption = shift; my $strCommand = shift; # Get the command define my $oCommandDefine = docConfigCommandDefine($strOption, $strCommand); # Check for default in command my $strDefault = defined($oCommandDefine) ? $$oCommandDefine{&CFGDEF_DEFAULT} : undef; # If defined return, else try to grab the global default return defined($strDefault) ? $strDefault : $rhConfigDefine->{$strOption}{&CFGDEF_DEFAULT}; } push @EXPORT, qw(docConfigOptionDefault); #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{oDoc}, $self->{oDocRender} ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oDoc'}, {name => 'oDocRender', required => false} ); $self->process(); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # process # # Parse the xml doc into commands and options. #################################################################################################################################### sub process { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->process'); # Iterate through all commands my $oDoc = $self->{oDoc}; my $oConfigHash = {}; foreach my $strCommand (cfgDefineCommandList()) { my $oCommandDoc = $oDoc->nodeGet('operation')->nodeGet('command-list')->nodeGetById('command', $strCommand); $$oConfigHash{&CONFIG_HELP_COMMAND}{$strCommand} = {}; my $oCommand = $$oConfigHash{&CONFIG_HELP_COMMAND}{$strCommand}; $$oCommand{&CONFIG_HELP_SUMMARY} = $oCommandDoc->nodeGet('summary')->textGet(); $$oCommand{&CONFIG_HELP_DESCRIPTION} = $oCommandDoc->textGet(); $oCommand->{&CONFIG_HELP_INTERNAL} = cfgDefineCommand()->{$strCommand}{&CFGDEF_INTERNAL}; } # Iterate through all options my $oOptionDefine = cfgDefine(); foreach my $strOption (sort(keys(%{$oOptionDefine}))) { # Iterate through all commands my @stryCommandList = sort(keys(%{defined($$oOptionDefine{$strOption}{&CFGDEF_COMMAND}) ? $$oOptionDefine{$strOption}{&CFGDEF_COMMAND} : $$oConfigHash{&CONFIG_HELP_COMMAND}})); foreach my $strCommand (@stryCommandList) { if (!defined($$oConfigHash{&CONFIG_HELP_COMMAND}{$strCommand})) { next; } # Skip the option if it is not valid for this command and the default role. Only options valid for the default role are # show in help because that is the only role available to a user. if (!defined($oOptionDefine->{$strOption}{&CFGDEF_COMMAND}{$strCommand}{&CFGDEF_COMMAND_ROLE}{&CFGCMD_ROLE_MAIN})) { next; } my $oCommandDoc = $oDoc->nodeGet('operation')->nodeGet('command-list')->nodeGetById('command', $strCommand); # First check if the option is documented in the command my $oOptionDoc; my $strOptionSource; my $oCommandOptionList = $oCommandDoc->nodeGet('option-list', false); if (defined($oCommandOptionList)) { $oOptionDoc = $oCommandOptionList->nodeGetById('option', $strOption, false); $strOptionSource = CONFIG_HELP_SOURCE_COMMAND if (defined($oOptionDoc)); } # If the option wasn't found keep looking my $strSection; if (!defined($oOptionDoc)) { # Next see if it's documented in the section if (defined($$oOptionDefine{$strOption}{&CFGDEF_SECTION})) { # &log(INFO, " trying section ${strSection}"); foreach my $oSectionNode ($oDoc->nodeGet('config')->nodeGet('config-section-list')->nodeList()) { my $oOptionDocCheck = $oSectionNode->nodeGetById('config-key-list') ->nodeGetById('config-key', $strOption, false); if ($oOptionDocCheck) { if (defined($oOptionDoc)) { confess 'option exists in more than one section'; } $oOptionDoc = $oOptionDocCheck; $strOptionSource = CONFIG_HELP_SOURCE_SECTION; $strSection = $oSectionNode->paramGet('id'); } } } # If no section is defined then look in the default command option list else { $oOptionDoc = $oDoc->nodeGet('operation')->nodeGet('operation-general')->nodeGet('option-list') ->nodeGetById('option', $strOption, false); $strOptionSource = CONFIG_HELP_SOURCE_DEFAULT if (defined($oOptionDoc)); # If a section is specified then use it, otherwise the option should be general since it is not for a specific # command if (defined($oOptionDoc)) { $strSection = $oOptionDoc->paramGet('section', false); if (!defined($strSection)) { $strSection = "general"; } } } } # If the option wasn't found then error if (!defined($oOptionDoc)) { confess &log(ERROR, "unable to find option '${strOption}' for command '${strCommand}'") } # if the option is documented in the command then it should be accessible from the command line only. if (!defined($strSection)) { if (defined($$oOptionDefine{$strOption}{&CFGDEF_SECTION})) { &log(ERROR, "option ${strOption} defined in command ${strCommand} must not have " . CFGDEF_SECTION . " defined"); } } # Store the option in the command $$oConfigHash{&CONFIG_HELP_COMMAND}{$strCommand}{&CONFIG_HELP_OPTION}{$strOption}{&CONFIG_HELP_SOURCE} = $strOptionSource; my $oCommandOption = $$oConfigHash{&CONFIG_HELP_COMMAND}{$strCommand}{&CONFIG_HELP_OPTION}{$strOption}; $$oCommandOption{&CONFIG_HELP_SUMMARY} = $oOptionDoc->nodeGet('summary')->textGet(); $$oCommandOption{&CONFIG_HELP_DESCRIPTION} = $oOptionDoc->textGet(); $oCommandOption->{&CONFIG_HELP_INTERNAL} = cfgDefineCommand()->{$strCommand}{&CFGDEF_INTERNAL} ? true : $oOptionDefine->{$strOption}{&CFGDEF_INTERNAL}; # If internal is defined for the option/command it overrides everything else if (defined($oOptionDefine->{$strOption}{&CFGDEF_COMMAND}{$strCommand}{&CFGDEF_INTERNAL})) { $oCommandOption->{&CONFIG_HELP_INTERNAL} = $oOptionDefine->{$strOption}{&CFGDEF_COMMAND}{$strCommand}{&CFGDEF_INTERNAL}; } # If the option did not come from the command also store in global option list. This prevents duplication of commonly # used options. if ($strOptionSource ne CONFIG_HELP_SOURCE_COMMAND) { $$oConfigHash{&CONFIG_HELP_OPTION}{$strOption}{&CONFIG_HELP_SUMMARY} = $$oCommandOption{&CONFIG_HELP_SUMMARY}; my $oOption = $$oConfigHash{&CONFIG_HELP_OPTION}{$strOption}; if (defined($strSection)) { $$oOption{&CONFIG_HELP_SECTION} = $strSection; } $$oOption{&CONFIG_HELP_DESCRIPTION} = $$oCommandOption{&CONFIG_HELP_DESCRIPTION}; $oOption->{&CONFIG_HELP_INTERNAL} = $oOptionDefine->{$strOption}{&CFGDEF_INTERNAL}; } } } # Store the config hash $self->{oConfigHash} = $oConfigHash; # Return from function and log return values if any logDebugReturn($strOperation); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/DocExecute.pm000066400000000000000000001133461500617037600251700ustar00rootroot00000000000000#################################################################################################################################### # DOC EXECUTE MODULE #################################################################################################################################### package pgBackRestDoc::Common::DocExecute; use parent 'pgBackRestDoc::Common::DocRender'; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use English '-no_match_vars'; use Cwd qw(abs_path); use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); use Storable qw(dclone); use pgBackRestTest::Common::ExecuteTest; use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::Exception; use pgBackRestDoc::Common::Host; use pgBackRestDoc::Common::HostGroup; use pgBackRestDoc::Common::Ini; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::Custom::DocConfigData; use pgBackRestDoc::ProjectInfo; #################################################################################################################################### # User that's building the docs #################################################################################################################################### use constant DOC_USER => getpwuid($UID) eq 'root' ? 'ubuntu' : getpwuid($UID) . ''; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Assign function parameters, defaults, and log debug info my ( $strOperation, $strType, $oManifest, $strRenderOutKey, $bExe ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'strType'}, {name => 'oManifest'}, {name => 'strRenderOutKey'}, {name => 'bExe'} ); # Create the class hash my $self = $class->SUPER::new($strType, $oManifest, $bExe, $strRenderOutKey); bless $self, $class; if (defined($self->{oSource}{hyCache})) { $self->{bCache} = true; $self->{iCacheIdx} = 0; } else { $self->{bCache} = false; } $self->{bExe} = $bExe; $self->{iCmdLineLen} = $self->{oDoc}->paramGet('cmd-line-len', false, 80); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # executeKey # # Get a unique key for the execution step to determine if the cache is valid. #################################################################################################################################### sub executeKey { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strHostName, $oCommand, ) = logDebugParam ( __PACKAGE__ . '->executeKey', \@_, {name => 'strHostName', trace => true}, {name => 'oCommand', trace => true}, ); # Add user to command my $bUserForce = $oCommand->paramTest('user-force', 'y') ? true : false; my $strCommand = $self->{oManifest}->variableReplace(trim($oCommand->fieldGet('exe-cmd'))); my $strUser = $self->{oManifest}->variableReplace($oCommand->paramGet('user', false, DOC_USER)); $strCommand = ($strUser eq DOC_USER || $bUserForce ? '' : ('sudo ' . ($strUser eq 'root' ? '' : "-u $strUser "))) . $strCommand; # Format and split command $strCommand =~ s/[ ]*\n[ ]*/ \\\n /smg; $strCommand =~ s/ \\\@ \\//smg; my @stryCommand = split("\n", $strCommand); my $hCacheKey = { host => $strHostName, cmd => \@stryCommand, output => JSON::PP::false, }; $$hCacheKey{'run-as-user'} = $bUserForce ? $strUser : undef; if (defined($oCommand->fieldGet('exe-cmd-extra', false))) { $$hCacheKey{'cmd-extra'} = $self->{oManifest}->variableReplace($oCommand->fieldGet('exe-cmd-extra')); } if (defined($oCommand->paramGet('err-expect', false))) { $$hCacheKey{'err-expect'} = $oCommand->paramGet('err-expect'); } if ($oCommand->paramTest('output', 'y') || $oCommand->paramTest('show', 'y') || $oCommand->paramTest('variable-key')) { $$hCacheKey{'output'} = JSON::PP::true; } $$hCacheKey{'load-env'} = $oCommand->paramTest('load-env', 'n') ? JSON::PP::false : JSON::PP::true; $$hCacheKey{'bash-wrap'} = $oCommand->paramTest('bash-wrap', 'n') ? JSON::PP::false : JSON::PP::true; if (defined($oCommand->fieldGet('exe-highlight', false))) { $$hCacheKey{'output'} = JSON::PP::true; $$hCacheKey{highlight}{'filter'} = $oCommand->paramTest('filter', 'n') ? JSON::PP::false : JSON::PP::true; $$hCacheKey{highlight}{'filter-context'} = $oCommand->paramGet('filter-context', false, 2); my @stryHighlight; $stryHighlight[0] = $self->{oManifest}->variableReplace($oCommand->fieldGet('exe-highlight')); $$hCacheKey{highlight}{list} = \@stryHighlight; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'hExecuteKey', value => $hCacheKey, trace => true} ); } #################################################################################################################################### # execute #################################################################################################################################### sub execute { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $strHostName, $oCommand, $iIndent, $bCache, $bShow, ) = logDebugParam ( __PACKAGE__ . '->execute', \@_, {name => 'oSection'}, {name => 'strHostName'}, {name => 'oCommand'}, {name => 'iIndent', optional => true, default => 1}, {name => 'bCache', optional => true, default => true}, {name => 'bShow', optional => true, default => true}, ); # Working variables my $hCacheKey = $self->executeKey($strHostName, $oCommand); my $strCommand = join("\n", @{$$hCacheKey{cmd}}); my $strOutput; if ($bShow && $self->{bExe} && $self->isRequired($oSection)) { # Make sure that no lines are greater than 80 chars foreach my $strLine (split("\n", $strCommand)) { if (length(trim($strLine)) > $self->{iCmdLineLen}) { confess &log(ERROR, "command has a line > $self->{iCmdLineLen} characters:\n${strCommand}\noffending line: ${strLine}"); } } } &log(DEBUG, (' ' x $iIndent) . "execute: $strCommand"); if ($self->{oManifest}->variableReplace($oCommand->paramGet('skip', false, 'n')) ne 'y') { if ($self->{bExe} && $self->isRequired($oSection)) { my ($bCacheHit, $strCacheType, $hCacheKey, $hCacheValue) = $self->cachePop('exe', $hCacheKey); if ($bCacheHit) { $strOutput = defined($$hCacheValue{output}) ? join("\n", @{$$hCacheValue{output}}) : undef; } else { # Check that the host is valid my $oHost = $self->{host}{$strHostName}; if (!defined($oHost)) { confess &log(ERROR, "cannot execute on host ${strHostName} because the host does not exist"); } my $oExec = $oHost->execute( $strCommand . (defined($$hCacheKey{'cmd-extra'}) ? ' ' . $$hCacheKey{'cmd-extra'} : ''), {iExpectedExitStatus => $$hCacheKey{'err-expect'}, bSuppressError => $oCommand->paramTest('err-suppress', 'y'), iRetrySeconds => $oCommand->paramGet('retry', false)}, $hCacheKey->{'run-as-user'}, {bLoadEnv => $hCacheKey->{'load-env'}, bBashWrap => $hCacheKey->{'bash-wrap'}}); $oExec->begin(); $oExec->end(); if (defined($oExec->{strOutLog}) && $oExec->{strOutLog} ne '') { $strOutput = $oExec->{strOutLog}; # Trim off extra linefeeds before and after $strOutput =~ s/^\n+|\n$//g; } if (defined($$hCacheKey{'err-expect'}) && defined($oExec->{strErrorLog}) && $oExec->{strErrorLog} ne '') { $strOutput .= $oExec->{strErrorLog}; } if ($$hCacheKey{output} && defined($$hCacheKey{highlight}) && $$hCacheKey{highlight}{filter} && defined($strOutput)) { my $strHighLight = @{$$hCacheKey{highlight}{list}}[0]; if (!defined($strHighLight)) { confess &log(ERROR, 'filter requires highlight definition: ' . $strCommand); } my $iFilterContext = $$hCacheKey{highlight}{'filter-context'}; my @stryOutput = split("\n", $strOutput); undef($strOutput); # my $iFiltered = 0; my $iLastOutput = -1; for (my $iIndex = 0; $iIndex < @stryOutput; $iIndex++) { if ($stryOutput[$iIndex] =~ /$strHighLight/) { # Determine the first line to output my $iFilterFirst = $iIndex - $iFilterContext; # Don't go past the beginning $iFilterFirst = $iFilterFirst < 0 ? 0 : $iFilterFirst; # Don't repeat lines that have already been output $iFilterFirst = $iFilterFirst <= $iLastOutput ? $iLastOutput + 1 : $iFilterFirst; # Determine the last line to output my $iFilterLast = $iIndex + $iFilterContext; # Don't got past the end $iFilterLast = $iFilterLast >= @stryOutput ? @stryOutput -1 : $iFilterLast; # Mark filtered lines if any if ($iFilterFirst > $iLastOutput + 1) { my $iFiltered = $iFilterFirst - ($iLastOutput + 1); if ($iFiltered > 1) { $strOutput .= (defined($strOutput) ? "\n" : '') . " [filtered ${iFiltered} lines of output]"; } else { $iFilterFirst -= 1; } } # Output the lines for (my $iOutputIndex = $iFilterFirst; $iOutputIndex <= $iFilterLast; $iOutputIndex++) { $strOutput .= (defined($strOutput) ? "\n" : '') . $stryOutput[$iOutputIndex]; } $iLastOutput = $iFilterLast; } } if (@stryOutput - 1 > $iLastOutput + 1) { my $iFiltered = (@stryOutput - 1) - ($iLastOutput + 1); if ($iFiltered > 1) { $strOutput .= (defined($strOutput) ? "\n" : '') . " [filtered ${iFiltered} lines of output]"; } else { $strOutput .= (defined($strOutput) ? "\n" : '') . $stryOutput[-1]; } } } if (!$$hCacheKey{output}) { $strOutput = undef; } if (defined($strOutput)) { my @stryOutput = split("\n", $strOutput); $$hCacheValue{output} = \@stryOutput; } if ($bCache) { $self->cachePush($strCacheType, $hCacheKey, $hCacheValue); } } # Output is assigned to a var if ($oCommand->paramTest('variable-key')) { $self->{oManifest}->variableSet($oCommand->paramGet('variable-key'), trim($strOutput), true); } } elsif ($$hCacheKey{output}) { $strOutput = 'Output suppressed for testing'; } } # Default variable output when it was not set by execution if ($oCommand->paramTest('variable-key') && !defined($self->{oManifest}->variableGet($oCommand->paramGet('variable-key')))) { $self->{oManifest}->variableSet($oCommand->paramGet('variable-key'), '[Test Variable]', true); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strCommand', value => $strCommand, trace => true}, {name => 'strOutput', value => $strOutput, trace => true} ); } #################################################################################################################################### # configKey #################################################################################################################################### sub configKey { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oConfig, ) = logDebugParam ( __PACKAGE__ . '->hostKey', \@_, {name => 'oConfig', trace => true}, ); my $hCacheKey = { host => $self->{oManifest}->variableReplace($oConfig->paramGet('host')), file => $self->{oManifest}->variableReplace($oConfig->paramGet('file')), }; if ($oConfig->paramTest('reset', 'y')) { $$hCacheKey{reset} = JSON::PP::true; } # Add all options to the key my $strOptionTag = $oConfig->nameGet() eq 'backrest-config' ? 'backrest-config-option' : 'postgres-config-option'; foreach my $oOption ($oConfig->nodeList($strOptionTag)) { my $hOption = {}; if ($oOption->paramTest('remove', 'y')) { $$hOption{remove} = JSON::PP::true; } if (defined($oOption->valueGet(false))) { $$hOption{value} = $self->{oManifest}->variableReplace($oOption->valueGet()); } my $strKey = $self->{oManifest}->variableReplace($oOption->paramGet('key')); if ($oConfig->nameGet() eq 'backrest-config') { my $strSection = $self->{oManifest}->variableReplace($oOption->paramGet('section')); $$hCacheKey{option}{$strSection}{$strKey} = $hOption; } else { $$hCacheKey{option}{$strKey} = $hOption; } } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'hCacheKey', value => $hCacheKey, trace => true} ); } #################################################################################################################################### # backrestConfig #################################################################################################################################### sub backrestConfig { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $oConfig, $iDepth ) = logDebugParam ( __PACKAGE__ . '->backrestConfig', \@_, {name => 'oSection'}, {name => 'oConfig'}, {name => 'iDepth'} ); # Working variables my $hCacheKey = $self->configKey($oConfig); my $strFile = $$hCacheKey{file}; my $strConfig = undef; &log(DEBUG, (' ' x $iDepth) . 'process backrest config: ' . $$hCacheKey{file}); if ($self->{bExe} && $self->isRequired($oSection)) { my ($bCacheHit, $strCacheType, $hCacheKey, $hCacheValue) = $self->cachePop('cfg-' . PROJECT_EXE, $hCacheKey); if ($bCacheHit) { $strConfig = defined($$hCacheValue{config}) ? join("\n", @{$$hCacheValue{config}}) : undef; } else { # Check that the host is valid my $strHostName = $self->{oManifest}->variableReplace($oConfig->paramGet('host')); my $oHost = $self->{host}{$strHostName}; if (!defined($oHost)) { confess &log(ERROR, "cannot configure backrest on host ${strHostName} because the host does not exist"); } # Reset all options if ($oConfig->paramTest('reset', 'y')) { delete(${$self->{config}}{$strHostName}{$$hCacheKey{file}}) } foreach my $oOption ($oConfig->nodeList('backrest-config-option')) { my $strSection = $self->{oManifest}->variableReplace($oOption->paramGet('section')); my $strKey = $self->{oManifest}->variableReplace($oOption->paramGet('key')); my $strValue; if (!$oOption->paramTest('remove', 'y')) { $strValue = $self->{oManifest}->variableReplace(trim($oOption->valueGet(false))); } if (!defined($strValue)) { delete(${$self->{config}}{$strHostName}{$$hCacheKey{file}}{$strSection}{$strKey}); if (keys(%{${$self->{config}}{$strHostName}{$$hCacheKey{file}}{$strSection}}) == 0) { delete(${$self->{config}}{$strHostName}{$$hCacheKey{file}}{$strSection}); } &log(DEBUG, (' ' x ($iDepth + 1)) . "reset ${strSection}->${strKey}"); } else { # If this option is a hash and the value is already set then append to the array if (defined(cfgDefine()->{$strKey}) && cfgDefine()->{$strKey}{&CFGDEF_TYPE} eq CFGDEF_TYPE_HASH && defined(${$self->{config}}{$strHostName}{$$hCacheKey{file}}{$strSection}{$strKey})) { my @oValue = (); my $strHashValue = ${$self->{config}}{$strHostName}{$$hCacheKey{file}}{$strSection}{$strKey}; # If there is only one key/value if (ref(\$strHashValue) eq 'SCALAR') { push(@oValue, $strHashValue); } # Else if there is an array of values else { @oValue = @{$strHashValue}; } push(@oValue, $strValue); ${$self->{config}}{$strHostName}{$$hCacheKey{file}}{$strSection}{$strKey} = \@oValue; } # else just set the value else { ${$self->{config}}{$strHostName}{$$hCacheKey{file}}{$strSection}{$strKey} = $strValue; } &log(DEBUG, (' ' x ($iDepth + 1)) . "set ${strSection}->${strKey} = ${strValue}"); } } my $strLocalFile = abs_path(dirname($0)) . '/output/pgbackrest.conf'; # Save the ini file $self->{oManifest}->storage()->put($strLocalFile, iniRender($self->{config}{$strHostName}{$$hCacheKey{file}}, true)); $oHost->copyTo( $strLocalFile, $$hCacheKey{file}, $self->{oManifest}->variableReplace($oConfig->paramGet('owner', false, 'postgres:postgres')), '640'); # Remove the log-console-stderr option before pushing into the cache # ??? This is not very pretty and should be replaced with a general way to hide config options my $oConfigClean = dclone($self->{config}{$strHostName}{$$hCacheKey{file}}); delete($$oConfigClean{&CFGDEF_SECTION_GLOBAL}{&CFGOPT_LOG_LEVEL_STDERR}); delete($$oConfigClean{&CFGDEF_SECTION_GLOBAL}{&CFGOPT_LOG_TIMESTAMP}); if (keys(%{$$oConfigClean{&CFGDEF_SECTION_GLOBAL}}) == 0) { delete($$oConfigClean{&CFGDEF_SECTION_GLOBAL}); } $self->{oManifest}->storage()->put("${strLocalFile}.clean", iniRender($oConfigClean, true)); # Push config file into the cache $strConfig = ${$self->{oManifest}->storage()->get("${strLocalFile}.clean")}; my @stryConfig = undef; if (trim($strConfig) ne '') { @stryConfig = split("\n", $strConfig); } $$hCacheValue{config} = \@stryConfig; $self->cachePush($strCacheType, $hCacheKey, $hCacheValue); } } else { $strConfig = 'Config suppressed for testing'; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strFile', value => $strFile, trace => true}, {name => 'strConfig', value => $strConfig, trace => true}, {name => 'bShow', value => $oConfig->paramTest('show', 'n') ? false : true, trace => true} ); } #################################################################################################################################### # postgresConfig #################################################################################################################################### sub postgresConfig { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $oConfig, $iDepth ) = logDebugParam ( __PACKAGE__ . '->postgresConfig', \@_, {name => 'oSection'}, {name => 'oConfig'}, {name => 'iDepth'} ); # Working variables my $hCacheKey = $self->configKey($oConfig); my $strFile = $$hCacheKey{file}; my $strConfig; if ($self->{bExe} && $self->isRequired($oSection)) { my ($bCacheHit, $strCacheType, $hCacheKey, $hCacheValue) = $self->cachePop('cfg-postgresql', $hCacheKey); if ($bCacheHit) { $strConfig = defined($$hCacheValue{config}) ? join("\n", @{$$hCacheValue{config}}) : undef; } else { # Check that the host is valid my $strHostName = $self->{oManifest}->variableReplace($oConfig->paramGet('host')); my $oHost = $self->{host}{$strHostName}; if (!defined($oHost)) { confess &log(ERROR, "cannot configure postgres on host ${strHostName} because the host does not exist"); } my $strLocalFile = abs_path(dirname($0)) . '/output/postgresql.conf'; $oHost->copyFrom($$hCacheKey{file}, $strLocalFile); if (!defined(${$self->{'pg-config'}}{$strHostName}{$$hCacheKey{file}}{base}) && $self->{bExe}) { ${$self->{'pg-config'}}{$strHostName}{$$hCacheKey{file}}{base} = ${$self->{oManifest}->storage()->get($strLocalFile)}; } my $oConfigHash = $self->{'pg-config'}{$strHostName}{$$hCacheKey{file}}; my $oConfigHashNew; if (!defined($$oConfigHash{old})) { $oConfigHashNew = {}; $$oConfigHash{old} = {} } else { $oConfigHashNew = dclone($$oConfigHash{old}); } &log(DEBUG, (' ' x $iDepth) . 'process postgres config: ' . $$hCacheKey{file}); foreach my $oOption ($oConfig->nodeList('postgres-config-option')) { my $strKey = $oOption->paramGet('key'); my $strValue = $self->{oManifest}->variableReplace(trim($oOption->valueGet())); if ($strValue eq '') { delete($$oConfigHashNew{$strKey}); &log(DEBUG, (' ' x ($iDepth + 1)) . "reset ${strKey}"); } else { $$oConfigHashNew{$strKey} = $strValue; &log(DEBUG, (' ' x ($iDepth + 1)) . "set ${strKey} = ${strValue}"); } } # Generate config text foreach my $strKey (sort(keys(%$oConfigHashNew))) { if (defined($strConfig)) { $strConfig .= "\n"; } $strConfig .= "${strKey} = $$oConfigHashNew{$strKey}"; } # Save the conf file if ($self->{bExe}) { $self->{oManifest}->storage()->put($strLocalFile, $$oConfigHash{base} . (defined($strConfig) ? "\n# pgBackRest Configuration\n${strConfig}\n" : '')); $oHost->copyTo($strLocalFile, $$hCacheKey{file}, 'postgres:postgres', '640'); } $$oConfigHash{old} = $oConfigHashNew; my @stryConfig = undef; if (trim($strConfig) ne '') { @stryConfig = split("\n", $strConfig); } $$hCacheValue{config} = \@stryConfig; $self->cachePush($strCacheType, $hCacheKey, $hCacheValue); } } else { $strConfig = 'Config suppressed for testing'; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strFile', value => $strFile, trace => true}, {name => 'strConfig', value => $strConfig, trace => true}, {name => 'bShow', value => $oConfig->paramTest('show', 'n') ? false : true, trace => true} ); } #################################################################################################################################### # hostKey #################################################################################################################################### sub hostKey { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oHost, ) = logDebugParam ( __PACKAGE__ . '->hostKey', \@_, {name => 'oHost', trace => true}, ); my $hCacheKey = { name => $self->{oManifest}->variableReplace($oHost->paramGet('name')), image => $self->{oManifest}->variableReplace($oHost->paramGet('image')), }; if (defined($oHost->paramGet('id', false))) { $hCacheKey->{id} = $self->{oManifest}->variableReplace($oHost->paramGet('id')); } else { $hCacheKey->{id} = $hCacheKey->{name}; } if (defined($oHost->paramGet('option', false))) { $$hCacheKey{option} = $self->{oManifest}->variableReplace($oHost->paramGet('option')); } if (defined($oHost->paramGet('param', false))) { $$hCacheKey{param} = $self->{oManifest}->variableReplace($oHost->paramGet('param')); } if (defined($oHost->paramGet('os', false))) { $$hCacheKey{os} = $self->{oManifest}->variableReplace($oHost->paramGet('os')); } $$hCacheKey{'update-hosts'} = $oHost->paramTest('update-hosts', 'n') ? JSON::PP::false : JSON::PP::true; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'hCacheKey', value => $hCacheKey, trace => true} ); } #################################################################################################################################### # cachePop #################################################################################################################################### sub cachePop { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strCacheType, $hCacheKey, ) = logDebugParam ( __PACKAGE__ . '->hostKey', \@_, {name => 'strCacheType', trace => true}, {name => 'hCacheKey', trace => true}, ); my $bCacheHit = false; my $oCacheValue = undef; if ($self->{bCache}) { my $oJSON = JSON::PP->new()->canonical()->allow_nonref(); # &log(WARN, "checking cache for\ncurrent key: " . $oJSON->encode($hCacheKey)); my $hCache = ${$self->{oSource}{hyCache}}[$self->{iCacheIdx}]; if (!defined($hCache)) { confess &log(ERROR, 'unable to get index from cache', ERROR_FILE_INVALID); } if (!defined($$hCache{key})) { confess &log(ERROR, 'unable to get key from cache', ERROR_FILE_INVALID); } if (!defined($$hCache{type})) { confess &log(ERROR, 'unable to get type from cache', ERROR_FILE_INVALID); } if ($$hCache{type} ne $strCacheType) { confess &log(ERROR, 'types do not match, cache is invalid', ERROR_FILE_INVALID); } if ($oJSON->encode($$hCache{key}) ne $oJSON->encode($hCacheKey)) { confess &log(ERROR, "keys at index $self->{iCacheIdx} do not match, cache is invalid." . "\n cache key: " . $oJSON->encode($$hCache{key}) . "\ncurrent key: " . $oJSON->encode($hCacheKey), ERROR_FILE_INVALID); } $bCacheHit = true; $oCacheValue = $$hCache{value}; $self->{iCacheIdx}++; } else { if ($self->{oManifest}{bCacheOnly}) { confess &log(ERROR, 'Cache only operation forced by --cache-only option'); } } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'bCacheHit', value => $bCacheHit, trace => true}, {name => 'strCacheType', value => $strCacheType, trace => true}, {name => 'hCacheKey', value => $hCacheKey, trace => true}, {name => 'oCacheValue', value => $oCacheValue, trace => true}, ); } #################################################################################################################################### # cachePush #################################################################################################################################### sub cachePush { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strType, $hCacheKey, $oCacheValue, ) = logDebugParam ( __PACKAGE__ . '->hostKey', \@_, {name => 'strType', trace => true}, {name => 'hCacheKey', trace => true}, {name => 'oCacheValue', required => false, trace => true}, ); if ($self->{bCache}) { confess &log(ASSERT, "cachePush should not be called when cache is already present"); } # Create the cache entry my $hCache = { key => $hCacheKey, type => $strType, }; if (defined($oCacheValue)) { $$hCache{value} = $oCacheValue; } push @{$self->{oSource}{hyCache}}, $hCache; # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # sectionChildProcess #################################################################################################################################### sub sectionChildProcess { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $oChild, $iDepth ) = logDebugParam ( __PACKAGE__ . '->sectionChildProcess', \@_, {name => 'oSection'}, {name => 'oChild'}, {name => 'iDepth'} ); &log(DEBUG, (' ' x ($iDepth + 1)) . 'process child: ' . $oChild->nameGet()); # Execute a command if ($oChild->nameGet() eq 'host-add') { if ($self->{bExe} && $self->isRequired($oSection)) { my ($bCacheHit, $strCacheType, $hCacheKey, $hCacheValue) = $self->cachePop('host', $self->hostKey($oChild)); if ($bCacheHit) { $self->{oManifest}->variableSet('host-' . $hCacheKey->{id} . '-ip', $hCacheValue->{ip}, true); } else { if (defined($self->{host}{$$hCacheKey{name}})) { confess &log(ERROR, 'cannot add host ${strName} because the host already exists'); } executeTest("rm -rf ~/data/$$hCacheKey{name}"); executeTest("mkdir -p ~/data/$$hCacheKey{name}/etc"); my $strHost = $hCacheKey->{name}; my $strImage = $hCacheKey->{image}; my $strHostUser = $self->{oManifest}->variableReplace($oChild->paramGet('user')); # Determine if a pre-built image should be created if (defined($self->preExecute($strHost))) { my $strPreImage = "${strImage}-${strHost}"; my $strFrom = $strImage; &log(INFO, "Build vm '${strPreImage}' from '${strFrom}'"); my $strCommandList; # Add all pre commands foreach my $oExecute ($self->preExecute($strHost)) { my $hExecuteKey = $self->executeKey($strHost, $oExecute); my $strCommand = join("\n", @{$hExecuteKey->{cmd}}) . (defined($hExecuteKey->{'cmd-extra'}) ? ' ' . $hExecuteKey->{'cmd-extra'} : ''); $strCommand =~ s/'/'\\''/g; $strCommand = "sudo -u ${strHostUser}" . ($hCacheKey->{'bash-wrap'} ? " bash" . ($hCacheKey->{'load-env'} ? ' -l' : '') . " -c '${strCommand}'" : " ${strCommand}"); if (defined($strCommandList)) { $strCommandList .= "\n"; } $strCommandList .= "RUN ${strCommand}"; &log(DETAIL, " Pre command $strCommand"); } # Build container my $strDockerfile = $self->{oManifest}{strDocPath} . "/output/doc-host.dockerfile"; $self->{oManifest}{oStorage}->put( $strDockerfile, "FROM ${strFrom}\n\n" . trim($self->{oManifest}->variableReplace($strCommandList)) . "\n"); executeTest( "docker build -f ${strDockerfile} -t ${strPreImage} " . $self->{oManifest}{oStorage}->pathGet(), {bSuppressStdErr => true}); # Use the pre-built image $strImage = $strPreImage; } my $strHostRepoPath = dirname(dirname(abs_path($0))); # Replace host repo path in mounts with if present my $strMount = undef; if (defined($oChild->paramGet('mount', false))) { $strMount = $self->{oManifest}->variableReplace($oChild->paramGet('mount')); $strMount =~ s/\{\[host\-repo\-path\]\}/${strHostRepoPath}/g; } # Replace host repo mount in params if present my $strOption = $$hCacheKey{option}; if (defined($strOption)) { $strOption =~ s/\{\[host\-repo\-path\]\}/${strHostRepoPath}/g; } my $oHost = new pgBackRestDoc::Common::Host( $$hCacheKey{name}, "doc-$$hCacheKey{name}", $strImage, $strHostUser, defined($strMount) ? [$strMount] : undef, $strOption, $$hCacheKey{param}, $$hCacheKey{'update-hosts'}); $self->{host}{$$hCacheKey{name}} = $oHost; $self->{oManifest}->variableSet('host-' . $hCacheKey->{id} . '-ip', $oHost->{strIP}, true); $$hCacheValue{ip} = $oHost->{strIP}; # Add to the host group my $oHostGroup = hostGroupGet(); $oHostGroup->hostAdd($oHost); # Execute initialize commands foreach my $oExecute ($oChild->nodeList('execute', false)) { $self->execute( $oSection, $$hCacheKey{name}, $oExecute, {iIndent => $iDepth + 1, bCache => false, bShow => false}); } $self->cachePush($strCacheType, $hCacheKey, $hCacheValue); } } } # Skip children that have already been processed and error on others elsif ($oChild->nameGet() ne 'title') { confess &log(ASSERT, 'unable to process child type ' . $oChild->nameGet()); } # Return from function and log return values if any return logDebugReturn ( $strOperation ); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/DocManifest.pm000066400000000000000000000642151500617037600253340ustar00rootroot00000000000000#################################################################################################################################### # DOC MANIFEST MODULE #################################################################################################################################### package pgBackRestDoc::Common::DocManifest; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Cwd qw(abs_path); use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); use JSON::PP; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; #################################################################################################################################### # File constants #################################################################################################################################### use constant FILE_MANIFEST => 'manifest.xml'; #################################################################################################################################### # Render constants #################################################################################################################################### use constant RENDER => 'render'; use constant RENDER_COMPACT => 'compact'; push @EXPORT, qw(RENDER_COMPACT); use constant RENDER_FILE => 'file'; use constant RENDER_MENU => 'menu'; push @EXPORT, qw(RENDER_MENU); use constant RENDER_PRETTY => 'pretty'; push @EXPORT, qw(RENDER_PRETTY); use constant RENDER_TYPE => 'type'; use constant RENDER_TYPE_HTML => 'html'; push @EXPORT, qw(RENDER_TYPE_HTML); use constant RENDER_TYPE_MARKDOWN => 'markdown'; push @EXPORT, qw(RENDER_TYPE_MARKDOWN); use constant RENDER_TYPE_PDF => 'pdf'; push @EXPORT, qw(RENDER_TYPE_PDF); #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{oStorage}, $self->{stryRequire}, $self->{stryInclude}, $self->{stryExclude}, $self->{rhKeyVariableOverride}, my $rhVariableOverride, $self->{strDocPath}, $self->{bDeploy}, $self->{bCacheOnly}, $self->{bPre}, ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oStorage'}, {name => 'stryRequire'}, {name => 'stryInclude'}, {name => 'stryExclude'}, {name => 'rhKeyVariableOverride', required => false}, {name => 'rhVariableOverride', required => false}, {name => 'strDocPath', required => false}, {name => 'bDeploy', required => false}, {name => 'bCacheOnly', required => false}, {name => 'bPre', required => false, default => false}, ); # Set the bin path $self->{strBinPath} = abs_path(dirname($0)); # Set the base path if it was not passed in if (!defined($self->{strDocPath})) { $self->{strDocPath} = $self->{strBinPath}; } # Set cache file names $self->{strExeCacheLocal} = $self->{strDocPath} . "/output/exe.cache"; $self->{strExeCacheDeploy} = $self->{strDocPath} . "/resource/exe.cache"; # Load the manifest $self->{oManifestXml} = new pgBackRestDoc::Common::Doc("$self->{strDocPath}/manifest.xml"); # Iterate the sources $self->{oManifest} = {}; foreach my $oSource ($self->{oManifestXml}->nodeGet('source-list')->nodeList('source')) { my $oSourceHash = {}; my $strKey = $oSource->paramGet('key'); my $strFile = $oSource->paramGet('file', false); my $strSourceType = $oSource->paramGet('type', false); logDebugMisc ( $strOperation, 'load source', {name => 'strKey', value => $strKey}, {name => 'strFile', value => $strFile}, {name => 'strSourceType', value => $strSourceType} ); # Skip sources in exclude list if (grep(/^$strKey$/, @{$self->{stryExclude}})) { next; } # If file is defined if (defined($strFile)) { $oSourceHash->{doc} = new pgBackRestDoc::Common::Doc($self->{strDocPath} . "/${strFile}"); } # Else should be in doc/xml else { $$oSourceHash{doc} = new pgBackRestDoc::Common::Doc("$self->{strDocPath}/xml/${strKey}.xml"); } # Read variables from source $self->variableListParse($$oSourceHash{doc}->nodeGet('variable-list', false), $rhVariableOverride); ${$self->{oManifest}}{source}{$strKey} = $oSourceHash; ${$self->{oManifest}}{source}{$strKey}{strSourceType} = $strSourceType; } # Iterate the renderers foreach my $oRender ($self->{oManifestXml}->nodeGet('render-list')->nodeList('render')) { my $oRenderHash = {}; my $strType = $oRender->paramGet(RENDER_TYPE); # Only one instance of each render type can be defined if (defined(${$self->{oManifest}}{&RENDER}{$strType})) { confess &log(ERROR, "render ${strType} has already been defined"); } # Get the file param $${oRenderHash}{file} = $oRender->paramGet(RENDER_FILE, false); $${oRenderHash}{&RENDER_COMPACT} = $oRender->paramGet(RENDER_COMPACT, false, 'n') eq 'y' ? true : false; $${oRenderHash}{&RENDER_PRETTY} = $oRender->paramGet(RENDER_PRETTY, false, 'n') eq 'y' ? true : false; $${oRenderHash}{&RENDER_MENU} = false; logDebugMisc ( $strOperation, ' load render', {name => 'strType', value => $strType}, {name => 'strFile', value => $${oRenderHash}{file}} ); # Error if file is set and render type is not pdf if (defined($${oRenderHash}{file}) && $strType ne RENDER_TYPE_PDF) { confess &log(ERROR, 'only the pdf render type can have file set') } # Iterate the render sources foreach my $oRenderOut ($oRender->nodeList('render-source')) { my $oRenderOutHash = {}; my $strKey = $oRenderOut->paramGet('key'); my $strSource = $oRenderOut->paramGet('source', false, $strKey); # Skip sources in exclude list if (grep(/^$strSource$/, @{$self->{stryExclude}})) { next; } # Skip sources not in include list if (@{$self->{stryInclude}} > 0 && !grep(/^$strSource$/, @{$self->{stryInclude}})) { next; } # Preserve natural order push(@{$${oRenderHash}{stryOrder}}, $strKey); $$oRenderOutHash{source} = $strSource; # Get the filename if (defined($oRenderOut->paramGet('file', false))) { if ($strType eq RENDER_TYPE_HTML || $strType eq RENDER_TYPE_MARKDOWN) { $$oRenderOutHash{file} = $oRenderOut->paramGet('file'); } else { confess &log(ERROR, "file is only valid with html or markdown render types"); } } # Get the menu caption if (defined($oRenderOut->paramGet('menu', false)) && $strType ne RENDER_TYPE_HTML) { confess &log(ERROR, "menu is only valid with html render type"); } if (defined($oRenderOut->paramGet('menu', false))) { $${oRenderHash}{&RENDER_MENU} = true; if ($strType eq RENDER_TYPE_HTML) { $$oRenderOutHash{menu} = $oRenderOut->paramGet('menu', false); } else { confess &log(ERROR, 'only the html render type can have menu set'); } } logDebugMisc ( $strOperation, ' load render source', {name => 'strKey', value => $strKey}, {name => 'strSource', value => $strSource}, {name => 'strMenu', value => $${oRenderOutHash}{menu}} ); $${oRenderHash}{out}{$strKey} = $oRenderOutHash; } ${$self->{oManifest}}{render}{$strType} = $oRenderHash; } # Set the doc path variable $self->variableSet('doc-path', $self->{strDocPath}); # Read variables from manifest $self->variableListParse($self->{oManifestXml}->nodeGet('variable-list', false), $rhVariableOverride); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # isBackRest # # Until all the backrest specific code can be abstracted, this function will identify when BackRest docs are being built. #################################################################################################################################### sub isBackRest { my $self = shift; return($self->variableTest('project-exe', 'pgbackrest')); } #################################################################################################################################### # Evaluate the if condition for a node #################################################################################################################################### sub evaluateIf { my $self = shift; my $oNode = shift; my $bIf = true; # Evaluate if condition if (defined($oNode->paramGet('if', false))) { my $strIf = $self->variableReplace($oNode->paramGet('if')); # In this case we really do want to evaluate the contents and not treat it as a literal $bIf = eval($strIf); # Error if the eval failed if ($@) { confess &log(ERROR, "unable to evaluate '${strIf}': $@"); } } return $bIf; } #################################################################################################################################### # variableListParse # # Parse a variable list and store variables. #################################################################################################################################### sub variableListParse { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oVariableList, $rhVariableOverride ) = logDebugParam ( __PACKAGE__ . '->variableListParse', \@_, {name => '$oVariableList', required => false}, {name => '$rhVariableOverride', required => false} ); if (defined($oVariableList)) { foreach my $oVariable ($oVariableList->nodeList('variable')) { if ($self->evaluateIf($oVariable)) { my $strKey = $oVariable->paramGet('key'); my $strValue = $self->variableReplace($oVariable->valueGet()); if ($oVariable->paramTest('eval', 'y')) { # In this case we really do want to evaluate the contents of strValue and not treat it as a literal. $strValue = eval($strValue); if ($@) { confess &log(ERROR, "unable to evaluate ${strKey}: $@\n" . $oVariable->valueGet()); } } $self->variableSet($strKey, defined($rhVariableOverride->{$strKey}) ? $rhVariableOverride->{$strKey} : $strValue); logDebugMisc ( $strOperation, ' load variable', {name => 'strKey', value => $strKey}, {name => 'strValue', value => $strValue} ); } } } # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # variableReplace # # Replace variables in the string. #################################################################################################################################### sub variableReplace { my $self = shift; my $strBuffer = shift; my $strType = shift; if (!defined($strBuffer)) { return; } foreach my $strName (sort(keys(%{$self->{oVariable}}))) { # If the value is not defined then replace it as an empty string. This means the key *was* defined but no value given. my $strValue = defined($self->{oVariable}{$strName}) ? $self->{oVariable}{$strName} : ''; $strBuffer =~ s/\{\[$strName\]\}/$strValue/g; } if (defined($strType) && $strType eq 'latex') { $strBuffer =~ s/\\\_/\_/g; $strBuffer =~ s/\_/\\\_/g; $strBuffer =~ s/\\\#/\#/g; $strBuffer =~ s/\#/\\\#/g; } return $strBuffer; } #################################################################################################################################### # variableSet # # Set a variable to be replaced later. #################################################################################################################################### sub variableSet { my $self = shift; my $strKey = shift; my $strValue = shift; my $bForce = shift; if (defined(${$self->{oVariable}}{$strKey}) && (!defined($bForce) || !$bForce)) { confess &log(ERROR, "${strKey} variable is already defined"); } ${$self->{oVariable}}{$strKey} = $self->variableReplace($strValue); } #################################################################################################################################### # variableGet # # Get the current value of a variable. #################################################################################################################################### sub variableGet { my $self = shift; my $strKey = shift; return ${$self->{oVariable}}{$strKey}; } #################################################################################################################################### # variableTest # # Test that a variable is defined or has an expected value. #################################################################################################################################### sub variableTest { my $self = shift; my $strKey = shift; my $strExpectedValue = shift; # Get the variable my $strValue = ${$self->{oVariable}}{$strKey}; # Return false if it is not defined if (!defined($strValue)) { return false; } # Return false if it does not equal the expected value if (defined($strExpectedValue) && $strValue ne $strExpectedValue) { return false; } return true; } #################################################################################################################################### # Get list of source documents #################################################################################################################################### sub sourceList { my $self = shift; # Assign function parameters, defaults, and log debug info my ($strOperation) = logDebugParam(__PACKAGE__ . '->sourceList'); # Check that sources exist my @strySource; if (defined(${$self->{oManifest}}{source})) { @strySource = sort(keys(%{${$self->{oManifest}}{source}})); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strySource', value => \@strySource} ); } #################################################################################################################################### # sourceGet #################################################################################################################################### sub sourceGet { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strSource ) = logDebugParam ( __PACKAGE__ . '->sourceGet', \@_, {name => 'strSource', trace => true} ); if (!defined(${$self->{oManifest}}{source}{$strSource})) { confess &log(ERROR, "source ${strSource} does not exist"); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oSource', value => ${$self->{oManifest}}{source}{$strSource}} ); } #################################################################################################################################### # renderList #################################################################################################################################### sub renderList { my $self = shift; # Assign function parameters, defaults, and log debug info my ($strOperation) = logDebugParam(__PACKAGE__ . '->renderList'); # Check that the render output exists my @stryRender; if (defined(${$self->{oManifest}}{render})) { @stryRender = sort(keys(%{${$self->{oManifest}}{render}})); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'stryRender', value => \@stryRender} ); } #################################################################################################################################### # renderGet #################################################################################################################################### sub renderGet { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strType ) = logDebugParam ( __PACKAGE__ . '->renderGet', \@_, {name => 'strType', trace => true} ); # Check that the render exists if (!defined(${$self->{oManifest}}{render}{$strType})) { confess &log(ERROR, "render type ${strType} does not exist"); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oRenderOut', value => ${$self->{oManifest}}{render}{$strType}} ); } #################################################################################################################################### # renderOutList #################################################################################################################################### sub renderOutList { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strType ) = logDebugParam ( __PACKAGE__ . '->renderOutList', \@_, {name => 'strType'} ); # Check that the render output exists my @stryRenderOut; if (defined(${$self->{oManifest}}{render}{$strType})) { @stryRenderOut = sort(keys(%{${$self->{oManifest}}{render}{$strType}{out}})); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'stryRenderOut', value => \@stryRenderOut} ); } #################################################################################################################################### # renderOutGet #################################################################################################################################### sub renderOutGet { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strType, $strKey, $bIgnoreMissing, ) = logDebugParam ( __PACKAGE__ . '->renderOutGet', \@_, {name => 'strType', trace => true}, {name => 'strKey', trace => true}, {name => 'bIgnoreMissing', default => false, trace => true}, ); if (!defined(${$self->{oManifest}}{render}{$strType}{out}{$strKey}) && !$bIgnoreMissing) { confess &log(ERROR, "render out ${strKey} does not exist"); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oRenderOut', value => ${$self->{oManifest}}{render}{$strType}{out}{$strKey}} ); } #################################################################################################################################### # cacheKey #################################################################################################################################### sub cacheKey { my $self = shift; # Assign function parameters, defaults, and log debug info my ($strOperation) = logDebugParam(__PACKAGE__ . '->cacheKey'); # Generate a cache key from the variable override my $strVariableKey = JSON::PP->new()->canonical()->allow_nonref()->encode($self->{rhKeyVariableOverride}); if ($strVariableKey eq '{}') { $strVariableKey = 'default'; } my $strRequire = defined($self->{stryRequire}) && @{$self->{stryRequire}} > 0 ? join("\n", @{$self->{stryRequire}}) : 'all'; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strVariableKey', value => $strVariableKey}, {name => 'strRequire', value => $strRequire}, ); } #################################################################################################################################### # cacheRead #################################################################################################################################### sub cacheRead { my $self = shift; # Assign function parameters, defaults, and log debug info my ($strOperation) = logDebugParam(__PACKAGE__ . '->cacheRead'); $self->{hCache} = undef; my $strCacheFile = $self->{bDeploy} ? $self->{strExeCacheDeploy} : $self->{strExeCacheLocal}; if (!$self->storage()->exists($strCacheFile) && !$self->{bDeploy}) { $strCacheFile = $self->{strExeCacheDeploy}; } if ($self->storage()->exists($strCacheFile)) { my ($strCacheKey, $strRequire) = $self->cacheKey(); my $oJSON = JSON::PP->new()->allow_nonref(); $self->{hCache} = $oJSON->decode(${$self->storage()->get($strCacheFile)}); foreach my $strSource (sort(keys(%{${$self->{oManifest}}{source}}))) { my $hSource = ${$self->{oManifest}}{source}{$strSource}; if (defined(${$self->{hCache}}{$strCacheKey}{$strRequire}{$strSource})) { $$hSource{hyCache} = ${$self->{hCache}}{$strCacheKey}{$strRequire}{$strSource}; &log(DETAIL, "cache load $strSource (key = ${strCacheKey}, require = ${strRequire})"); } } } # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # cacheWrite #################################################################################################################################### sub cacheWrite { my $self = shift; # Assign function parameters, defaults, and log debug info my ($strOperation) = logDebugParam(__PACKAGE__ . '->cacheWrite'); my $strCacheFile = $self->{bDeploy} ? $self->{strExeCacheDeploy} : $self->{strExeCacheLocal}; my ($strCacheKey, $strRequire) = $self->cacheKey(); foreach my $strSource (sort(keys(%{${$self->{oManifest}}{source}}))) { my $hSource = ${$self->{oManifest}}{source}{$strSource}; if (defined($$hSource{hyCache})) { ${$self->{hCache}}{$strCacheKey}{$strRequire}{$strSource} = $$hSource{hyCache}; &log(DETAIL, "cache load $strSource (key = ${strCacheKey}, require = ${strRequire})"); } } if (defined($self->{hCache})) { my $oJSON = JSON::PP->new()->canonical()->allow_nonref()->pretty(); $self->storage()->put($strCacheFile, $oJSON->encode($self->{hCache})); } # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # cacheReset #################################################################################################################################### sub cacheReset { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strSource ) = logDebugParam ( __PACKAGE__ . '->cacheReset', \@_, {name => 'strSource', trace => true} ); if ($self->{bCacheOnly}) { confess &log(ERROR, 'Cache reset disabled by --cache-only option'); } &log(WARN, "Cache will be reset for source ${strSource} and rendering retried automatically"); delete(${$self->{oManifest}}{source}{$strSource}{hyCache}); # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # Getters #################################################################################################################################### sub storage {shift->{oStorage}}; 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/DocRender.pm000066400000000000000000001064051500617037600250030ustar00rootroot00000000000000#################################################################################################################################### # DOC RENDER MODULE #################################################################################################################################### package pgBackRestDoc::Common::DocRender; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Exporter qw(import); our @EXPORT = qw(); use JSON::PP; use Storable qw(dclone); use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; #################################################################################################################################### # XML tag/param constants #################################################################################################################################### use constant XML_SECTION_PARAM_ANCHOR => 'anchor'; push @EXPORT, qw(XML_SECTION_PARAM_ANCHOR); use constant XML_SECTION_PARAM_ANCHOR_VALUE_NOINHERIT => 'no-inherit'; push @EXPORT, qw(XML_SECTION_PARAM_ANCHOR_VALUE_NOINHERIT); #################################################################################################################################### # Render tags for various output types #################################################################################################################################### my $oRenderTag = { 'markdown' => { 'br' => ['\n', ''], 'quote' => ['"', '"'], 'b' => ['**', '**'], 'i' => ['_', '_'], # 'bi' => ['_**', '**_'], 'list-item' => ["\n", ""], 'list-item' => ['- ', "\n"], 'id' => ['`', '`'], 'file' => ['`', '`'], 'path' => ['`', '`'], 'cmd' => ['`', '`'], 'param' => ['`', '`'], 'setting' => ['`', '`'], 'pg-setting' => ['`', '`'], 'code' => ['`', '`'], # 'code-block' => ['```', '```'], # 'exe' => [undef, ''], 'backrest' => [undef, ''], 'proper' => ['', ''], 'postgres' => ['PostgreSQL', ''], 'admonition' => ["\n> **", "\n"], }, 'text' => { 'br' => ['\n', ''], 'quote' => ['"', '"'], 'p' => ['', "\n\n"], 'b' => ['', ''], 'i' => ['', ''], # 'bi' => ['', ''], 'list' => ["", "\n"], 'list-item' => ['* ', "\n"], 'id' => ['', ''], 'host' => ['', ''], 'file' => ['', ''], 'path' => ['', ''], 'cmd' => ['', ''], 'br-option' => ['', ''], 'pg-setting' => ['', ''], 'param' => ['', ''], 'setting' => ['', ''], 'code' => ['', ''], 'code-block' => ['', ''], 'exe' => [undef, ''], 'backrest' => [undef, ''], 'proper' => ['', ''], 'postgres' => ['PostgreSQL', ''], 'admonition' => ['', "\n\n"], }, 'latex' => { 'br' => ['\\\vspace{1em}', ''], 'quote' => ['``', '"'], 'p' => ["\n\\begin{sloppypar}", "\\end{sloppypar}\n"], 'b' => ['\textbf{', '}'], 'i' => ['\textit{', '}'], # 'bi' => ['', ''], 'list' => ["\\begin{itemize}\n", "\\end{itemize}\n"], 'list-item' => ['\item ', "\n"], 'id' => ['\textnormal{\texttt{', '}}'], 'host' => ['\textnormal{\textbf{', '}}'], 'file' => ['\textnormal{\texttt{', '}}'], 'path' => ['\textnormal{\texttt{', '}}'], 'cmd' => ['\textnormal{\texttt{', "}}"], 'user' => ['\textnormal{\texttt{', '}}'], 'br-option' => ['', ''], # 'param' => ['\texttt{', '}'], # 'setting' => ['\texttt{', '}'], 'br-option' => ['\textnormal{\texttt{', '}}'], 'br-setting' => ['\textnormal{\texttt{', '}}'], 'pg-option' => ['\textnormal{\texttt{', '}}'], 'pg-setting' => ['\textnormal{\texttt{', '}}'], 'code' => ['\textnormal{\texttt{', '}}'], # 'code' => ['\texttt{', '}'], # 'code-block' => ['', ''], # 'exe' => [undef, ''], 'backrest' => [undef, ''], 'proper' => ['\textnormal{\texttt{', '}}'], 'postgres' => ['PostgreSQL', ''], 'admonition' => ["\n\\vspace{.5em}\\begin{leftbar}\n\\begin{sloppypar}\\textit{\\textbf{", "}\\end{sloppypar}\n\\end{leftbar}\n"], }, 'html' => { 'br' => ['
', ''], 'quote' => ['', ''], 'b' => ['', ''], 'i' => ['', ''], 'p' => ['', ''], # 'bi' => ['', ''], 'list' => ['
    ', '
'], 'list-item' => ['
  • ', '
  • '], 'id' => ['', ''], 'host' => ['', ''], 'file' => ['', ''], 'path' => ['', ''], 'cmd' => ['', ''], 'user' => ['', ''], 'br-option' => ['', ''], 'br-setting' => ['', ''], 'pg-option' => ['', ''], 'pg-setting' => ['', ''], 'code' => ['', ''], 'code-block' => ['', ''], 'exe' => [undef, ''], 'setting' => ['', ''], # ??? This will need to be fixed 'backrest' => [undef, ''], 'proper' => ['', ''], 'postgres' => ['PostgreSQL', ''], 'admonition' => ['
    ', '
    '], } }; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{strType}, $self->{oManifest}, $self->{bExe}, $self->{strRenderOutKey}, ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'strType'}, {name => 'oManifest', required => false}, {name => 'bExe', required => false}, {name => 'strRenderOutKey', required => false} ); # Create JSON object $self->{oJSON} = JSON::PP->new()->allow_nonref(); # Initialize project tags $$oRenderTag{markdown}{backrest}[0] = "{[project]}"; $$oRenderTag{markdown}{exe}[0] = "{[project-exe]}"; $$oRenderTag{text}{backrest}[0] = "{[project]}"; $$oRenderTag{text}{exe}[0] = "{[project-exe]}"; $$oRenderTag{latex}{backrest}[0] = "{[project]}"; $$oRenderTag{latex}{exe}[0] = "\\textnormal\{\\texttt\{[project-exe]}}\}\}"; $$oRenderTag{html}{backrest}[0] = "{[project]}"; $$oRenderTag{html}{exe}[0] = "{[project-exe]}"; if (defined($self->{strRenderOutKey})) { # Copy page data to self my $oRenderOut = $self->{oManifest}->renderOutGet($self->{strType} eq 'latex' ? 'pdf' : $self->{strType}, $self->{strRenderOutKey}); # If these are the backrest docs then load the help if ($self->{oManifest}->isBackRest()) { $self->{oReference} = new pgBackRestDoc::Common::DocConfig(${$self->{oManifest}->sourceGet('help')}{doc}, $self); } if (defined($$oRenderOut{source}) && $$oRenderOut{source} eq 'release' && $self->{oManifest}->isBackRest()) { require pgBackRestDoc::Custom::DocCustomRelease; pgBackRestDoc::Custom::DocCustomRelease->import(); $self->{oDoc} = (new pgBackRestDoc::Custom::DocCustomRelease( ${$self->{oManifest}->sourceGet('release')}{doc}, defined($self->{oManifest}->variableGet('dev')) && $self->{oManifest}->variableGet('dev') eq 'y'))->docGet(); } else { $self->{oDoc} = ${$self->{oManifest}->sourceGet($self->{strRenderOutKey})}{doc}; } $self->{oSource} = $self->{oManifest}->sourceGet($$oRenderOut{source}); } if (defined($self->{strRenderOutKey})) { # Build the doc $self->build($self->{oDoc}); # Get required sections foreach my $strPath (@{$self->{oManifest}->{stryRequire}}) { if (substr($strPath, 0, 1) ne '/') { confess &log(ERROR, "path ${strPath} must begin with a /"); } if (!defined($self->{oSection}->{$strPath})) { confess &log(ERROR, "required section '${strPath}' does not exist"); } if (defined(${$self->{oSection}}{$strPath})) { $self->required($strPath); } } } if (defined($self->{oDoc})) { $self->{bToc} = !defined($self->{oDoc}->paramGet('toc', false)) || $self->{oDoc}->paramGet('toc') eq 'y' ? true : false; $self->{bTocNumber} = $self->{bToc} && (!defined($self->{oDoc}->paramGet('toc-number', false)) || $self->{oDoc}->paramGet('toc-number') eq 'y') ? true : false; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # Set begin and end values for a tag #################################################################################################################################### sub tagSet { my $self = shift; my $strTag = shift; my $strBegin = shift; my $strEnd = shift; $oRenderTag->{$self->{strType}}{$strTag}[0] = defined($strBegin) ? $strBegin : ''; $oRenderTag->{$self->{strType}}{$strTag}[1] = defined($strEnd) ? $strEnd : ''; } #################################################################################################################################### # variableReplace # # Replace variables in the string. #################################################################################################################################### sub variableReplace { my $self = shift; return defined($self->{oManifest}) ? $self->{oManifest}->variableReplace(shift, $self->{strType}) : shift; } #################################################################################################################################### # variableSet # # Set a variable to be replaced later. #################################################################################################################################### sub variableSet { my $self = shift; return $self->{oManifest}->variableSet(shift, shift); } #################################################################################################################################### # variableGet # # Get the current value of a variable. #################################################################################################################################### sub variableGet { my $self = shift; return $self->{oManifest}->variableGet(shift); } #################################################################################################################################### # Get pre-execute list for a host #################################################################################################################################### sub preExecute { my $self = shift; my $strHost = shift; if (defined($self->{preExecute}{$strHost})) { return @{$self->{preExecute}{$strHost}}; } return; } #################################################################################################################################### # build # # Build the section map and perform filtering. #################################################################################################################################### sub build { my $self = shift; my $oNode = shift; my $oParent = shift; my $strPath = shift; my $strPathPrefix = shift; # &log(INFO, " node " . $oNode->nameGet()); my $strName = $oNode->nameGet(); if (defined($oParent)) { # Evaluate if condition -- when false the node will be removed if (!$self->{oManifest}->evaluateIf($oNode)) { my $strDescription; if (defined($oNode->nodeGet('title', false))) { $strDescription = $self->processText($oNode->nodeGet('title')->textGet()); } &log(DEBUG, " filtered ${strName}" . (defined($strDescription) ? ": ${strDescription}" : '')); $oParent->nodeRemove($oNode); return; } } else { &log(DEBUG, ' build document'); $self->{oSection} = {}; } # Build section if ($strName eq 'section') { my $strSectionId = $oNode->paramGet('id'); &log(DEBUG, "build section [${strSectionId}]"); # Set path and parent-path for this section if (defined($strPath)) { $oNode->paramSet('path-parent', $strPath); } $strPath .= '/' . $oNode->paramGet('id'); &log(DEBUG, " path ${strPath}"); ${$self->{oSection}}{$strPath} = $oNode; $oNode->paramSet('path', $strPath); # If depend is not set then set it to the last section my $strDepend = $oNode->paramGet('depend', false); my $oContainerNode = defined($oParent) ? $oParent : $self->{oDoc}; my $oLastChild; my $strDependPrev; foreach my $oChild ($oContainerNode->nodeList('section', false)) { if ($oChild->paramGet('id') eq $oNode->paramGet('id')) { if (defined($oLastChild)) { $strDependPrev = $oLastChild->paramGet('id'); } elsif (defined($oParent->paramGet('depend', false))) { $strDependPrev = $oParent->paramGet('depend'); } last; } $oLastChild = $oChild; } if (defined($strDepend)) { if (defined($strDependPrev) && $strDepend eq $strDependPrev && !$oNode->paramTest('depend-default')) { &log(WARN, "section '${strPath}' depend is set to '${strDepend}' which is the default, best to remove" . " because it may become obsolete if a new section is added in between"); } } else { $strDepend = $strDependPrev; } # If depend is defined make sure it exists if (defined($strDepend)) { # If this is a relative depend then prepend the parent section if (index($strDepend, '/') != 0) { if (defined($oParent->paramGet('path', false))) { $strDepend = $oParent->paramGet('path') . '/' . $strDepend; } else { $strDepend = "/${strDepend}"; } } if (!defined($self->{oSection}->{$strDepend})) { confess &log(ERROR, "section '${strSectionId}' depend '${strDepend}' is not valid"); } } if (defined($strDepend)) { $oNode->paramSet('depend', $strDepend); } if (defined($strDependPrev)) { $oNode->paramSet('depend-default', $strDependPrev); } # Set log to true if this section has an execute list. This helps reduce the info logging by only showing sections that are # likely to take a log time. $oNode->paramSet('log', $self->{bExe} && $oNode->nodeList('execute-list', false) > 0 ? true : false); # If section content is being pulled from elsewhere go get the content if ($oNode->paramTest('source')) { my $oSource = ${$self->{oManifest}->sourceGet($oNode->paramGet('source'))}{doc}; # Section should not already have title defined, it should come from the source doc if ($oNode->nodeTest('title')) { confess &log(ERROR, "cannot specify title in section that sources another document"); } # Set title from source doc's title $oNode->nodeAdd('title')->textSet($oSource->paramGet('title')); foreach my $oSection ($oSource->nodeList('section')) { push(@{${$oNode->{oDoc}}{children}}, $oSection->{oDoc}); } # Set path prefix to modify all section paths further down $strPathPrefix = $strPath; # Remove source so it is not included again later $oNode->paramSet('source', undef); } } # Build link elsif ($strName eq 'link') { &log(DEBUG, 'build link [' . $oNode->valueGet() . ']'); # If the path prefix is set and this is a section if (defined($strPathPrefix) && $oNode->paramTest('section')) { my $strNewPath = $strPathPrefix . $oNode->paramGet('section'); &log(DEBUG, "modify link section from '" . $oNode->paramGet('section') . "' to '${strNewPath}'"); $oNode->paramSet('section', $strNewPath); } } # Store block defines elsif ($strName eq 'block-define') { my $strBlockId = $oNode->paramGet('id'); if (defined($self->{oyBlockDefine}{$strBlockId})) { confess &log(ERROR, "block ${strBlockId} is already defined"); } $self->{oyBlockDefine}{$strBlockId} = dclone($oNode->{oDoc}{children}); $oParent->nodeRemove($oNode); } # Copy blocks elsif ($strName eq 'block') { my $strBlockId = $oNode->paramGet('id'); if (!defined($self->{oyBlockDefine}{$strBlockId})) { confess &log(ERROR, "block ${strBlockId} is not defined"); } my $strNodeJSON = $self->{oJSON}->encode($self->{oyBlockDefine}{$strBlockId}); foreach my $oVariable ($oNode->nodeList('block-variable-replace', false)) { my $strVariableKey = $oVariable->paramGet('key'); my $strVariableReplace = $oVariable->valueGet(); $strNodeJSON =~ s/\{\[$strVariableKey\]\}/$strVariableReplace/g; } my ($iReplaceIdx, $iReplaceTotal) = $oParent->nodeReplace($oNode, $self->{oJSON}->decode($strNodeJSON)); # Build any new children that were added my $iChildIdx = 0; foreach my $oChild ($oParent->nodeList(undef, false)) { if ($iChildIdx >= $iReplaceIdx && $iChildIdx < ($iReplaceIdx + $iReplaceTotal)) { $self->build($oChild, $oParent, $strPath, $strPathPrefix); } $iChildIdx++; } } # Check for pre-execute statements elsif ($strName eq 'execute') { if ($self->{oManifest}->{bPre} && $oNode->paramGet('pre', false, 'n') eq 'y') { # Add to pre-execute list my $strHost = $self->variableReplace($oParent->paramGet('host')); push(@{$self->{preExecute}{$strHost}}, $oNode); # Skip this command so it doesn't get executed twice $oNode->paramSet('skip', 'y') } } # Iterate all text nodes if (defined($oNode->textGet(false))) { foreach my $oChild ($oNode->textGet()->nodeList(undef, false)) { if (ref(\$oChild) ne "SCALAR") { $self->build($oChild, $oNode, $strPath, $strPathPrefix); } } } # Iterate all non-text nodes foreach my $oChild ($oNode->nodeList(undef, false)) { if (ref(\$oChild) ne "SCALAR") { $self->build($oChild, $oNode, $strPath, $strPathPrefix); # If the child should be logged then log the parent as well so the hierarchy is complete if ($oChild->nameGet() eq 'section' && $oChild->paramGet('log', false, false)) { $oNode->paramSet('log', true); } } } } #################################################################################################################################### # required # # Build a list of required sections #################################################################################################################################### sub required { my $self = shift; my $strPath = shift; my $bDepend = shift; # If node is not found that means the path is invalid my $oNode = ${$self->{oSection}}{$strPath}; if (!defined($oNode)) { confess &log(ERROR, "invalid path ${strPath}"); } # Only add sections that are listed dependencies if (!defined($bDepend) || $bDepend) { # Match section and all child sections foreach my $strChildPath (sort(keys(%{$self->{oSection}}))) { if ($strChildPath =~ /^$strPath$/ || $strChildPath =~ /^$strPath\/.*$/) { if (!defined(${$self->{oSectionRequired}}{$strChildPath})) { my @stryChildPath = split('/', $strChildPath); &log(INFO, (' ' x (scalar(@stryChildPath) - 2)) . " require section: ${strChildPath}"); ${$self->{oSectionRequired}}{$strChildPath} = true; } } } } # Get the path of the current section's parent my $strParentPath = $oNode->paramGet('path-parent', false); if ($oNode->paramTest('depend')) { foreach my $strDepend (split(',', $oNode->paramGet('depend'))) { if ($strDepend !~ /^\//) { if (!defined($strParentPath)) { $strDepend = "/${strDepend}"; } else { $strDepend = "${strParentPath}/${strDepend}"; } } $self->required($strDepend, true); } } elsif (defined($strParentPath)) { $self->required($strParentPath, false); } } #################################################################################################################################### # isRequired # # Is it required to execute the section statements? #################################################################################################################################### sub isRequired { my $self = shift; my $oSection = shift; if (!defined($self->{oSectionRequired})) { return true; } my $strPath = $oSection->paramGet('path'); defined(${$self->{oSectionRequired}}{$strPath}) ? true : false; } #################################################################################################################################### # processTag #################################################################################################################################### sub processTag { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oTag ) = logDebugParam ( __PACKAGE__ . '->processTag', \@_, {name => 'oTag', trace => true} ); my $strBuffer = ""; my $strType = $self->{strType}; my $strTag = $oTag->nameGet(); if (!defined($strTag)) { require Data::Dumper; confess Dumper($oTag); } if ($strTag eq 'link') { my $strUrl = $oTag->paramGet('url', false); if (!defined($strUrl)) { my $strPage = $self->variableReplace($oTag->paramGet('page', false)); my $strSection = $oTag->paramGet('section', false); # If a page/section link points to the current page then remove the page portion if (defined($strPage) && defined($strSection) && defined($self->{strRenderOutKey}) && $strPage eq $self->{strRenderOutKey}) { undef($strPage); } # If this is a page URL if (defined($strPage)) { # If the page wasn't rendered then point at the website if (!defined($self->{oManifest}->renderOutGet($strType, $strPage, true))) { $strUrl = '{[backrest-url-base]}/' . $oTag->paramGet('page') . '.html'; } # Else point locally else { if ($strType eq 'html') { $strUrl = "${strPage}.html". (defined($strSection) ? '#' . substr($strSection, 1) : ''); } elsif ($strType eq 'markdown') { if (defined($strSection)) { confess &log( ERROR, "page and section links not supported for type ${strType}, value '" . $oTag->valueGet() . "'"); } $strUrl = "${strPage}.md"; } else { confess &log(ERROR, "page links not supported for type ${strType}, value '" . $oTag->valueGet() . "'"); } } } else { my $strSection = $oTag->paramGet('section'); my $oSection = ${$self->{oSection}}{$strSection}; if (!defined($oSection)) { confess &log(ERROR, "section link '${strSection}' does not exist"); } if (!defined($strSection)) { confess &log(ERROR, "link with value '" . $oTag->valueGet() . "' must defined url, page, or section"); } if ($strType eq 'html') { $strUrl = '#' . substr($strSection, 1); } elsif ($strType eq 'latex') { $strUrl = $strSection; } else { $strUrl = lc($self->processText($oSection->nodeGet('title')->textGet())); $strUrl =~ s/[^\w\- ]//g; $strUrl =~ s/ /-/g; $strUrl = '#' . $strUrl; } } } if ($strType eq 'html') { $strBuffer = '' . $oTag->valueGet() . ''; } elsif ($strType eq 'markdown') { $strBuffer = '[' . $oTag->valueGet() . '](' . $strUrl . ')'; } elsif ($strType eq 'latex') { if ($oTag->paramTest('url')) { $strBuffer = "\\href{$strUrl}{" . $oTag->valueGet() . "}"; } else { $strBuffer = "\\hyperref[$strUrl]{" . $oTag->valueGet() . "}"; } } elsif ($strType eq 'text') { $strBuffer = $oTag->valueGet(); } else { confess "'link' tag not valid for type ${strType}"; } } else { my $strStart = $$oRenderTag{$strType}{$strTag}[0]; my $strStop = $$oRenderTag{$strType}{$strTag}[1]; if (!defined($strStart) || !defined($strStop)) { confess &log(ERROR, "invalid type ${strType} or tag ${strTag}"); } $strBuffer .= $strStart; # Admonitions in the help materials are tags of the text element rather than field elements of the document so special # handling is required if ($strTag eq 'admonition') { $strBuffer .= $self->processAdmonitionStart($oTag); } if ($strTag eq 'p' || $strTag eq 'title' || $strTag eq 'list-item' || $strTag eq 'code-block' || $strTag eq 'summary' || $strTag eq 'admonition') { $strBuffer .= $self->processText($oTag); } elsif (defined($oTag->valueGet())) { $strBuffer .= $oTag->valueGet(); } else { foreach my $oSubTag ($oTag->nodeList(undef, false)) { $strBuffer .= $self->processTag($oSubTag); } } if ($strTag eq 'admonition') { $strBuffer .= $self->processAdmonitionEnd($oTag); } $strBuffer .= $strStop; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strBuffer', value => $strBuffer, trace => true} ); } #################################################################################################################################### # processAdmonitionStart #################################################################################################################################### sub processAdmonitionStart { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oTag ) = logDebugParam ( __PACKAGE__ . '->processAdmonitionStart', \@_, {name => 'oTag', trace => true} ); my $strType = $self->{strType}; my $strBuffer = ''; # Note that any changes to the way the HTML, markdown or latex display tags may also need to be made here if ($strType eq 'html') { my $strType = $oTag->paramGet('type'); $strBuffer = '
    ' . uc($strType) . ':
    ' . '
    '; } elsif ($strType eq 'text' || $strType eq 'markdown') { $strBuffer = uc($oTag->paramGet('type')) . ": "; } elsif ($strType eq 'latex') { $strBuffer = uc($oTag->paramGet('type')) . ": }"; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strBuffer', value => $strBuffer, trace => true} ); } #################################################################################################################################### # processAdmonitionEnd #################################################################################################################################### sub processAdmonitionEnd { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oTag ) = logDebugParam ( __PACKAGE__ . '->processAdmonitionEnd', \@_, {name => 'oTag', trace => true} ); my $strType = $self->{strType}; my $strBuffer = ''; # Note that any changes to the way the HTML, markdown or latex display tags may also need to be made here if ($strType eq 'html') { $strBuffer = '
    '; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strBuffer', value => $strBuffer, trace => true} ); } #################################################################################################################################### # processText #################################################################################################################################### sub processText { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oText ) = logDebugParam ( __PACKAGE__ . '->processText', \@_, {name => 'oText', trace => true} ); my $strType = $self->{strType}; my $strBuffer = ''; my $strLastTag = 'body'; foreach my $oNode ($oText->nodeList(undef, false)) { if (ref(\$oNode) eq "SCALAR") { if ($oNode =~ /\"/) { confess &log(ERROR, "unable to process quotes in string (use instead):\n${oNode}"); } # Skip text nodes with linefeeds since they happen between tags if (index($oNode, "\n") == -1) { $strBuffer .= $oNode; } } else { # Add br tags to separate paragraphs and linefeeds to make the output more diffable. This is needed because of the hacky # way config text is being rendered in the final document, i.e. by passing rendered HTML into divs rather than XML to be # rendered at that time. if ($strLastTag eq 'p' && $strType eq 'html') { $strBuffer .= "
    \n"; if ($oNode->nameGet() eq 'p') { $strBuffer .= "
    \n"; } } $strBuffer .= $self->processTag($oNode); $strLastTag = $oNode->nameGet(); } } # # if ($strType eq 'html') # { # # $strBuffer =~ s/^\s+|\s+$//g; # # $strBuffer =~ s/\n/\\n/g; # } # if ($strType eq 'markdown') # { # $strBuffer =~ s/^\s+|\s+$//g; $strBuffer =~ s/ +/ /g; $strBuffer =~ s/^ //smg; # } if ($strType eq 'latex') { $strBuffer =~ s/\&mdash\;/---/g; $strBuffer =~ s/\<\;/\\=/\$\\geq\$/g; # $strBuffer =~ s/\_/\\_/g; # If not a code-block, which is to be taken AS IS, then escape special characters in latex if ($oText->nameGet() ne 'code-block') { # If the previous character is not already a slash (e.g. not already escaped) then insert a slash $strBuffer =~ s/(?nameGet() eq 'list-item') { $strBuffer =~ s/\[/\{\[/g; $strBuffer =~ s/\]/\]\}/g; } $strBuffer =~ s/\©\;/{\\textcopyright}/g; $strBuffer =~ s/\&trade\;/{\\texttrademark}/g; $strBuffer =~ s/\®\;/{\\textregistered}/g; $strBuffer =~ s/\&rarr\;/{\\textrightarrow}/g; # Escape all ampersands after making any other conversions above $strBuffer =~ s/(?\=/g; } $strBuffer = $self->variableReplace($strBuffer); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strBuffer', value => $strBuffer, trace => true} ); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/Exception.pm000066400000000000000000000253701500617037600250750ustar00rootroot00000000000000#################################################################################################################################### # COMMON EXCEPTION MODULE #################################################################################################################################### package pgBackRestDoc::Common::Exception; use strict; use warnings FATAL => qw(all); use Carp qw(confess longmess); use Scalar::Util qw(blessed); use Exporter qw(import); our @EXPORT = qw(); #################################################################################################################################### # Error Definitions #################################################################################################################################### use constant ERROR_MINIMUM => 25; push @EXPORT, qw(ERROR_MINIMUM); use constant ERROR_MAXIMUM => 125; push @EXPORT, qw(ERROR_MAXIMUM); use constant ERROR_ASSERT => 25; push @EXPORT, qw(ERROR_ASSERT); use constant ERROR_CHECKSUM => 26; push @EXPORT, qw(ERROR_CHECKSUM); use constant ERROR_CONFIG => 27; push @EXPORT, qw(ERROR_CONFIG); use constant ERROR_FILE_INVALID => 28; push @EXPORT, qw(ERROR_FILE_INVALID); use constant ERROR_FORMAT => 29; push @EXPORT, qw(ERROR_FORMAT); use constant ERROR_OPTION_INVALID_VALUE => 32; push @EXPORT, qw(ERROR_OPTION_INVALID_VALUE); use constant ERROR_PG_RUNNING => 38; push @EXPORT, qw(ERROR_PG_RUNNING); use constant ERROR_PATH_NOT_EMPTY => 40; push @EXPORT, qw(ERROR_PATH_NOT_EMPTY); use constant ERROR_FILE_OPEN => 41; push @EXPORT, qw(ERROR_FILE_OPEN); use constant ERROR_FILE_READ => 42; push @EXPORT, qw(ERROR_FILE_READ); use constant ERROR_ARCHIVE_MISMATCH => 44; push @EXPORT, qw(ERROR_ARCHIVE_MISMATCH); use constant ERROR_ARCHIVE_DUPLICATE => 45; push @EXPORT, qw(ERROR_ARCHIVE_DUPLICATE); use constant ERROR_PATH_CREATE => 47; push @EXPORT, qw(ERROR_PATH_CREATE); use constant ERROR_LOCK_ACQUIRE => 50; push @EXPORT, qw(ERROR_LOCK_ACQUIRE); use constant ERROR_BACKUP_MISMATCH => 51; push @EXPORT, qw(ERROR_BACKUP_MISMATCH); use constant ERROR_PATH_OPEN => 53; push @EXPORT, qw(ERROR_PATH_OPEN); use constant ERROR_PATH_SYNC => 54; push @EXPORT, qw(ERROR_PATH_SYNC); use constant ERROR_FILE_MISSING => 55; push @EXPORT, qw(ERROR_FILE_MISSING); use constant ERROR_DB_CONNECT => 56; push @EXPORT, qw(ERROR_DB_CONNECT); use constant ERROR_DB_QUERY => 57; push @EXPORT, qw(ERROR_DB_QUERY); use constant ERROR_DB_MISMATCH => 58; push @EXPORT, qw(ERROR_DB_MISMATCH); use constant ERROR_PATH_REMOVE => 61; push @EXPORT, qw(ERROR_PATH_REMOVE); use constant ERROR_STOP => 62; push @EXPORT, qw(ERROR_STOP); use constant ERROR_FILE_WRITE => 64; push @EXPORT, qw(ERROR_FILE_WRITE); use constant ERROR_FEATURE_NOT_SUPPORTED => 67; push @EXPORT, qw(ERROR_FEATURE_NOT_SUPPORTED); use constant ERROR_ARCHIVE_COMMAND_INVALID => 68; push @EXPORT, qw(ERROR_ARCHIVE_COMMAND_INVALID); use constant ERROR_LINK_EXPECTED => 69; push @EXPORT, qw(ERROR_LINK_EXPECTED); use constant ERROR_LINK_DESTINATION => 70; push @EXPORT, qw(ERROR_LINK_DESTINATION); use constant ERROR_PATH_MISSING => 73; push @EXPORT, qw(ERROR_PATH_MISSING); use constant ERROR_FILE_MOVE => 74; push @EXPORT, qw(ERROR_FILE_MOVE); use constant ERROR_PATH_TYPE => 77; push @EXPORT, qw(ERROR_PATH_TYPE); use constant ERROR_DB_MISSING => 80; push @EXPORT, qw(ERROR_DB_MISSING); use constant ERROR_DB_INVALID => 81; push @EXPORT, qw(ERROR_DB_INVALID); use constant ERROR_ARCHIVE_TIMEOUT => 82; push @EXPORT, qw(ERROR_ARCHIVE_TIMEOUT); use constant ERROR_ARCHIVE_DISABLED => 87; push @EXPORT, qw(ERROR_ARCHIVE_DISABLED); use constant ERROR_FILE_OWNER => 88; push @EXPORT, qw(ERROR_FILE_OWNER); use constant ERROR_PATH_EXISTS => 92; push @EXPORT, qw(ERROR_PATH_EXISTS); use constant ERROR_FILE_EXISTS => 93; push @EXPORT, qw(ERROR_FILE_EXISTS); use constant ERROR_CRYPTO => 95; push @EXPORT, qw(ERROR_CRYPTO); use constant ERROR_INVALID => 123; push @EXPORT, qw(ERROR_INVALID); use constant ERROR_UNHANDLED => 124; push @EXPORT, qw(ERROR_UNHANDLED); use constant ERROR_UNKNOWN => 125; push @EXPORT, qw(ERROR_UNKNOWN); #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name my $strLevel = shift; # Log level my $iCode = shift; # Error code my $strMessage = shift; # ErrorMessage my $strTrace = shift; # Stack trace my $rExtra = shift; # Extra info used exclusively by the logging system my $bErrorC = shift; # Is this a C error? if ($iCode < ERROR_MINIMUM || $iCode > ERROR_MAXIMUM) { $iCode = ERROR_INVALID; } # Create the class hash my $self = {}; bless $self, $class; # Initialize exception $self->{strLevel} = $strLevel; $self->{iCode} = $iCode; $self->{strMessage} = $strMessage; $self->{strTrace} = $strTrace; $self->{rExtra} = $rExtra; $self->{bErrorC} = $bErrorC ? 1 : 0; return $self; } #################################################################################################################################### # level #################################################################################################################################### sub level { my $self = shift; return $self->{strLevel}; } #################################################################################################################################### # CODE #################################################################################################################################### sub code { my $self = shift; return $self->{iCode}; } #################################################################################################################################### # extra #################################################################################################################################### sub extra { my $self = shift; return $self->{rExtra}; } #################################################################################################################################### # MESSAGE #################################################################################################################################### sub message { my $self = shift; return $self->{strMessage}; } #################################################################################################################################### # TRACE #################################################################################################################################### sub trace { my $self = shift; return $self->{strTrace}; } #################################################################################################################################### # isException - is this a structured exception or a default Perl exception? #################################################################################################################################### sub isException { my $roException = shift; # Only check if defined if (defined($roException) && defined($$roException)) { # If a standard Exception if (blessed($$roException)) { return $$roException->isa('pgBackRestDoc::Common::Exception') ? 1 : 0; } # Else if a specially formatted string from the C library elsif ($$roException =~ /^PGBRCLIB\:[0-9]+\:/) { # Split message and discard the first part used for identification my @stryException = split(/\:/, $$roException); shift(@stryException); # Construct exception fields my $iCode = shift(@stryException) + 0; my $strTrace = shift(@stryException) . qw{:} . shift(@stryException); my $strMessage = join(':', @stryException); # Create exception $$roException = new pgBackRestDoc::Common::Exception("ERROR", $iCode, $strMessage, $strTrace, undef, 1); return 1; } } return 0; } push @EXPORT, qw(isException); #################################################################################################################################### # exceptionCode # # Extract the error code from an exception - if a Perl exception return ERROR_UNKNOWN. #################################################################################################################################### sub exceptionCode { my $oException = shift; return isException(\$oException) ? $oException->code() : ERROR_UNKNOWN; } push @EXPORT, qw(exceptionCode); #################################################################################################################################### # exceptionMessage # # Extract the error message from an exception - if a Perl exception return bare exception. #################################################################################################################################### sub exceptionMessage { my $oException = shift; return isException(\$oException) ? $oException->message() : $oException; } push @EXPORT, qw(exceptionMessage); 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/Host.pm000066400000000000000000000250141500617037600240470ustar00rootroot00000000000000#################################################################################################################################### # HostTest.pm - Encapsulate a docker host #################################################################################################################################### package pgBackRestDoc::Common::Host; #################################################################################################################################### # Perl includes #################################################################################################################################### use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Cwd qw(abs_path); use Exporter qw(import); our @EXPORT = qw(); use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestTest::Common::ExecuteTest; #################################################################################################################################### # new #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{strName}, $self->{strContainer}, $self->{strImage}, $self->{strUser}, $self->{stryMount}, $self->{strOption}, $self->{strParam}, $self->{bHostUpdate}, $self->{strEntryPoint}, ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'strName', trace => true}, {name => 'strContainer', trace => true}, {name => 'strImage', trace => true}, {name => 'strUser', trace => true}, {name => 'stryMount', required => false, trace => true}, {name => 'strOption', required => false, trace => true}, {name => 'strParam', required => false, trace => true}, {name => 'bHostUpdate', required => false, trace => true, default => true}, {name => 'strEntryPoint', required => false, trace => true}, ); executeTest("docker rm -f $self->{strContainer}", {bSuppressError => true}); executeTest("docker run -itd -h $self->{strName} --name=$self->{strContainer}" . (defined($self->{strOption}) ? ' ' . $self->{strOption} : '') . (defined($self->{stryMount}) ? ' -v ' . join(' -v ', @{$self->{stryMount}}) : '') . (defined($self->{strEntryPoint}) ? " --entrypoint=$self->{strEntryPoint} --user=$self->{strUser}" : '') . " $self->{strImage} " . (defined($self->{strParam}) ? ' ' . $self->{strParam} : ''), {bSuppressStdErr => true}); # Get IP Address $self->{strIP} = trim(executeTest("docker inspect --format '\{\{ .NetworkSettings.IPAddress \}\}' $self->{strContainer}")); $self->{bActive} = true; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self, trace => true} ); } #################################################################################################################################### # remove #################################################################################################################################### sub remove { my $self = shift; # Assign function parameters, defaults, and log debug info my ($strOperation) = logDebugParam(__PACKAGE__ . '->remove'); if ($self->{bActive}) { executeTest("docker rm -f $self->{strContainer}", {bSuppressError => true}); $self->{bActive} = false; } # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # execute #################################################################################################################################### sub execute { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strCommand, $oParam, $strUser, $bLoadEnv, $bBashWrap, ) = logDebugParam ( __PACKAGE__ . '->execute', \@_, {name => 'strCommand'}, {name => 'oParam', required => false}, {name => 'strUser', required => false}, {name => 'bLoadEnv', optional => true, default => true}, {name => 'bBashWrap', optional => true, default => true}, ); # Set the user if (!defined($strUser)) { $strUser = $self->{strUser}; } $strCommand =~ s/'/'\\''/g; my $oExec = new pgBackRestTest::Common::ExecuteTest( "docker exec -u ${strUser} $self->{strContainer}" . ($bBashWrap ? " bash" . ($bLoadEnv ? ' -l' : '') . " -c '${strCommand}'" : " ${strCommand}"), $oParam); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oExec', value => $oExec, trace => true} ); } #################################################################################################################################### # executeSimple #################################################################################################################################### sub executeSimple { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strCommand, $oParam, $strUser, $bLoadEnv, $bBashWrap, ) = logDebugParam ( __PACKAGE__ . '->executeSimple', \@_, {name => 'strCommand', trace => true}, {name => 'oParam', required=> false, trace => true}, {name => 'strUser', required => false, trace => true}, {name => 'bLoadEnv', optional => true, default => true, trace => true}, {name => 'bBashWrap', optional => true, default => true}, ); my $oExec = $self->execute($strCommand, $oParam, $strUser, {bLoadEnv => $bLoadEnv}); $oExec->begin(); $oExec->end(); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strOutLog', value => $oExec->{strOutLog}, trace => true} ); } #################################################################################################################################### # copyTo #################################################################################################################################### sub copyTo { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strSource, $strDestination, $strOwner, $strMode ) = logDebugParam ( __PACKAGE__ . '->copyTo', \@_, {name => 'strSource'}, {name => 'strDestination'}, {name => 'strOwner', required => false}, {name => 'strMode', required => false} ); executeTest("docker cp ${strSource} $self->{strContainer}:${strDestination}"); if (defined($strOwner)) { $self->executeSimple("chown ${strOwner} ${strDestination}", undef, 'root'); } if (defined($strMode)) { $self->executeSimple("chmod ${strMode} ${strDestination}", undef, 'root'); } # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # copyFrom #################################################################################################################################### sub copyFrom { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strSource, $strDestination ) = logDebugParam ( __PACKAGE__ . '->copyFrom', \@_, {name => 'strSource'}, {name => 'strDestination'} ); executeTest("docker cp $self->{strContainer}:${strSource} ${strDestination}"); # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # hostUpdateGet #################################################################################################################################### sub hostUpdateGet { my $self = shift; return $self->{bHostUpdate}; } #################################################################################################################################### # ipGet #################################################################################################################################### sub ipGet { my $self = shift; return $self->{strIP}; } #################################################################################################################################### # nameGet #################################################################################################################################### sub nameGet { my $self = shift; return $self->{strName}; } #################################################################################################################################### # nameTest #################################################################################################################################### sub nameTest { my $self = shift; my $strName = shift; return $self->{strName} eq $strName; } #################################################################################################################################### # userGet #################################################################################################################################### sub userGet { my $self = shift; return $self->{strUser}; } #################################################################################################################################### # Getters #################################################################################################################################### sub container {shift->{strContainer}} 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/HostGroup.pm000066400000000000000000000140671500617037600250720ustar00rootroot00000000000000#################################################################################################################################### # HostGroupTest.pm - Encapsulate a group of docker containers #################################################################################################################################### package pgBackRestDoc::Common::HostGroup; #################################################################################################################################### # Perl includes #################################################################################################################################### use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Cwd qw(abs_path); use Exporter qw(import); our @EXPORT = qw(); use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestTest::Common::ExecuteTest; #################################################################################################################################### # Global host group variable #################################################################################################################################### my $oHostGroup; #################################################################################################################################### # new #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; # Assign function parameters, defaults, and log debug info my ($strOperation) = logDebugParam(__PACKAGE__ . '->new'); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self, trace => true} ); } #################################################################################################################################### # hostAdd #################################################################################################################################### sub hostAdd { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oHost, $rstryHostName, ) = logDebugParam ( __PACKAGE__ . '->hostAdd', \@_, {name => 'oHost'}, {name => 'rstryHostName', optional => true}, ); $self->{host}{$oHost->{strName}} = $oHost; if ($oHost->hostUpdateGet()) { $oHost->executeSimple("echo \"\" >> /etc/hosts", undef, 'root', {bLoadEnv => false}); $oHost->executeSimple("echo \"# Test Hosts\" >> /etc/hosts", undef, 'root', {bLoadEnv => false}); } my $strHostList = $oHost->{strName} . (defined($rstryHostName) ? ' ' . join(' ', @{$rstryHostName}) : ''); # Iterate hosts to add IP mappings foreach my $strOtherHostName (sort(keys(%{$self->{host}}))) { my $oOtherHost = $self->{host}{$strOtherHostName}; if ($strOtherHostName ne $oHost->{strName}) { # Add this host IP to all hosts if ($oOtherHost->hostUpdateGet()) { $oOtherHost->executeSimple( "echo \"$oHost->{strIP} ${strHostList}\" >> /etc/hosts", undef, 'root', {bLoadEnv => false}); } # Add all other host IPs to this host if ($oHost->hostUpdateGet()) { $oHost->executeSimple( "echo \"$oOtherHost->{strIP} ${strOtherHostName}\" >> /etc/hosts", undef, 'root', {bLoadEnv => false}); } } } # Return from function and log return values if any return logDebugReturn($strOperation); } #################################################################################################################################### # hostGet #################################################################################################################################### sub hostGet { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strName, $bIgnoreMissing, ) = logDebugParam ( __PACKAGE__ . '->hostGet', \@_, {name => 'strName', trace => true}, {name => 'bIgnoreMissing', default => false, trace => true}, ); my $oHost = $self->{host}{$strName}; if (!defined($oHost) && !$bIgnoreMissing) { confess &log(ERROR, "host ${strName} does not exist"); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oHost', value => $oHost} ); } #################################################################################################################################### # removeAll #################################################################################################################################### sub removeAll { my $self = shift; # Assign function parameters, defaults, and log debug info my ($strOperation) = logDebugParam(__PACKAGE__ . '->removeAll'); my $iTotal = 0; foreach my $strHostName (sort(keys(%{$self->{host}}))) { ${$self->{host}}{$strHostName}->remove(); delete($self->{host}{$strHostName}); $iTotal++; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'iTotal', value => $iTotal} ); } #################################################################################################################################### # hostGroupGet # # Get the global host group object. #################################################################################################################################### sub hostGroupGet { if (!defined($oHostGroup)) { $oHostGroup = new pgBackRestDoc::Common::HostGroup(); } return $oHostGroup; } push @EXPORT, qw(hostGroupGet); 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/Ini.pm000066400000000000000000000733551500617037600236640ustar00rootroot00000000000000#################################################################################################################################### # COMMON INI MODULE #################################################################################################################################### package pgBackRestDoc::Common::Ini; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use English '-no_match_vars'; use Digest::SHA qw(sha1_hex); use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); use JSON::PP; use Storable qw(dclone); use pgBackRestDoc::Common::Exception; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::ProjectInfo; #################################################################################################################################### # Boolean constants #################################################################################################################################### use constant INI_TRUE => JSON::PP::true; push @EXPORT, qw(INI_TRUE); use constant INI_FALSE => JSON::PP::false; push @EXPORT, qw(INI_FALSE); #################################################################################################################################### # Ini control constants #################################################################################################################################### use constant INI_SECTION_BACKREST => 'backrest'; push @EXPORT, qw(INI_SECTION_BACKREST); use constant INI_KEY_CHECKSUM => 'backrest-checksum'; push @EXPORT, qw(INI_KEY_CHECKSUM); use constant INI_KEY_FORMAT => 'backrest-format'; push @EXPORT, qw(INI_KEY_FORMAT); use constant INI_KEY_VERSION => 'backrest-version'; push @EXPORT, qw(INI_KEY_VERSION); use constant INI_SECTION_CIPHER => 'cipher'; push @EXPORT, qw(INI_SECTION_CIPHER); use constant INI_KEY_CIPHER_PASS => 'cipher-pass'; push @EXPORT, qw(INI_KEY_CIPHER_PASS); #################################################################################################################################### # Ini file copy extension #################################################################################################################################### use constant INI_COPY_EXT => '.copy'; push @EXPORT, qw(INI_COPY_EXT); #################################################################################################################################### # Ini sort orders #################################################################################################################################### use constant INI_SORT_FORWARD => 'forward'; push @EXPORT, qw(INI_SORT_FORWARD); use constant INI_SORT_REVERSE => 'reverse'; push @EXPORT, qw(INI_SORT_REVERSE); use constant INI_SORT_NONE => 'none'; push @EXPORT, qw(INI_SORT_NONE); #################################################################################################################################### # new() #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{oStorage}, $self->{strFileName}, my $bLoad, my $strContent, $self->{iInitFormat}, $self->{strInitVersion}, my $bIgnoreMissing, $self->{strCipherPass}, # Passphrase to read/write the file my $strCipherPassSub, # Passphrase to read/write subsequent files ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oStorage', trace => true}, {name => 'strFileName', trace => true}, {name => 'bLoad', optional => true, default => true, trace => true}, {name => 'strContent', optional => true, trace => true}, {name => 'iInitFormat', optional => true, default => REPOSITORY_FORMAT, trace => true}, {name => 'strInitVersion', optional => true, default => PROJECT_VERSION, trace => true}, {name => 'bIgnoreMissing', optional => true, default => false, trace => true}, {name => 'strCipherPass', optional => true, trace => true}, {name => 'strCipherPassSub', optional => true, trace => true}, ); # Set changed to false $self->{bModified} = false; # Set exists to false $self->{bExists} = false; # Load the file if requested if ($bLoad) { $self->load($bIgnoreMissing); } # Load from a string if provided elsif (defined($strContent)) { $self->{oContent} = iniParse($strContent); $self->headerCheck(); } # Initialize if not loading the file and not loading from string or if a load was attempted and the file does not exist if (!$self->{bExists} && !defined($strContent)) { $self->numericSet(INI_SECTION_BACKREST, INI_KEY_FORMAT, undef, $self->{iInitFormat}); $self->set(INI_SECTION_BACKREST, INI_KEY_VERSION, undef, $self->{strInitVersion}); # Determine if the passphrase section should be set if (defined($self->{strCipherPass}) && defined($strCipherPassSub)) { $self->set(INI_SECTION_CIPHER, INI_KEY_CIPHER_PASS, undef, $strCipherPassSub); } } return $self; } #################################################################################################################################### # loadVersion() - load a version (main or copy) of the ini file #################################################################################################################################### sub loadVersion { my $self = shift; my $bCopy = shift; my $bIgnoreError = shift; # Load main my $rstrContent = $self->{oStorage}->get( $self->{oStorage}->openRead($self->{strFileName} . ($bCopy ? INI_COPY_EXT : ''), {bIgnoreMissing => $bIgnoreError, strCipherPass => $self->{strCipherPass}})); # If the file exists then attempt to parse it if (defined($rstrContent)) { my $rhContent = iniParse($$rstrContent, {bIgnoreInvalid => $bIgnoreError}); # If the content is valid then check the header if (defined($rhContent)) { $self->{oContent} = $rhContent; # If the header is invalid then undef content if (!$self->headerCheck({bIgnoreInvalid => $bIgnoreError})) { delete($self->{oContent}); } } } return defined($self->{oContent}); } #################################################################################################################################### # load() - load the ini #################################################################################################################################### sub load { my $self = shift; my $bIgnoreMissing = shift; # If main was not loaded then try the copy if (!$self->loadVersion(false, true)) { if (!$self->loadVersion(true, true)) { return if $bIgnoreMissing; confess &log(ERROR, "unable to open $self->{strFileName} or $self->{strFileName}" . INI_COPY_EXT, ERROR_FILE_MISSING); } } $self->{bExists} = true; } #################################################################################################################################### # headerCheck() - check that version and checksum in header are as expected #################################################################################################################################### sub headerCheck { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $bIgnoreInvalid, ) = logDebugParam ( __PACKAGE__ . '->headerCheck', \@_, {name => 'bIgnoreInvalid', optional => true, default => false, trace => true}, ); # Eval so exceptions can be ignored on bIgnoreInvalid my $bValid = true; eval { # Make sure the ini is valid by testing checksum my $strChecksum = $self->get(INI_SECTION_BACKREST, INI_KEY_CHECKSUM, undef, false); my $strTestChecksum = $self->hash(); if (!defined($strChecksum) || $strChecksum ne $strTestChecksum) { confess &log(ERROR, "invalid checksum in '$self->{strFileName}', expected '${strTestChecksum}' but found " . (defined($strChecksum) ? "'${strChecksum}'" : '[undef]'), ERROR_CHECKSUM); } # Make sure that the format is current, otherwise error my $iFormat = $self->get(INI_SECTION_BACKREST, INI_KEY_FORMAT, undef, false, 0); if ($iFormat != $self->{iInitFormat}) { confess &log(ERROR, "invalid format in '$self->{strFileName}', expected $self->{iInitFormat} but found ${iFormat}", ERROR_FORMAT); } # Check if the version has changed if (!$self->test(INI_SECTION_BACKREST, INI_KEY_VERSION, undef, $self->{strInitVersion})) { $self->set(INI_SECTION_BACKREST, INI_KEY_VERSION, undef, $self->{strInitVersion}); } return true; } or do { # Confess the error if it should not be ignored if (!$bIgnoreInvalid) { confess $EVAL_ERROR; } # Return false when errors are ignored $bValid = false; }; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'bValid', value => $bValid, trace => true} ); } #################################################################################################################################### # iniParse() - parse from standard INI format to a hash. #################################################################################################################################### push @EXPORT, qw(iniParse); sub iniParse { # Assign function parameters, defaults, and log debug info my ( $strOperation, $strContent, $bRelaxed, $bIgnoreInvalid, ) = logDebugParam ( __PACKAGE__ . '::iniParse', \@_, {name => 'strContent', required => false, trace => true}, {name => 'bRelaxed', optional => true, default => false, trace => true}, {name => 'bIgnoreInvalid', optional => true, default => false, trace => true}, ); # Ini content my $oContent = undef; my $strSection; # Create the JSON object my $oJSON = JSON::PP->new()->allow_nonref(); # Eval so exceptions can be ignored on bIgnoreInvalid eval { # Read the INI file foreach my $strLine (split("\n", defined($strContent) ? $strContent : '')) { $strLine = trim($strLine); # Skip lines that are blank or comments if ($strLine ne '' && $strLine !~ '^[ ]*#.*') { # Get the section if (index($strLine, '[') == 0) { $strSection = substr($strLine, 1, length($strLine) - 2); } else { if (!defined($strSection)) { confess &log(ERROR, "key/value pair '${strLine}' found outside of a section", ERROR_CONFIG); } # Get key and value my $iIndex = index($strLine, '='); if ($iIndex == -1) { confess &log(ERROR, "unable to find '=' in '${strLine}'", ERROR_CONFIG); } my $strKey = substr($strLine, 0, $iIndex); my $strValue = substr($strLine, $iIndex + 1); # If relaxed then read the value directly if ($bRelaxed) { if (defined($oContent->{$strSection}{$strKey})) { if (ref($oContent->{$strSection}{$strKey}) ne 'ARRAY') { $oContent->{$strSection}{$strKey} = [$oContent->{$strSection}{$strKey}]; } push(@{$oContent->{$strSection}{$strKey}}, $strValue); } else { $oContent->{$strSection}{$strKey} = $strValue; } } # Else read the value as stricter JSON else { ${$oContent}{$strSection}{$strKey} = $oJSON->decode($strValue); } } } } # Error if the file is empty if (!($bRelaxed || defined($oContent))) { confess &log(ERROR, 'no key/value pairs found', ERROR_CONFIG); } return true; } or do { # Confess the error if it should not be ignored if (!$bIgnoreInvalid) { confess $EVAL_ERROR; } # Undef content when errors are ignored undef($oContent); }; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oContent', value => $oContent, trace => true} ); } #################################################################################################################################### # save() - save the file. #################################################################################################################################### sub save { my $self = shift; # Save only if modified if ($self->{bModified}) { # Calculate the hash $self->hash(); # Save the file $self->{oStorage}->put($self->{strFileName}, iniRender($self->{oContent}), {strCipherPass => $self->{strCipherPass}}); if ($self->{oStorage}->can('pathSync')) { $self->{oStorage}->pathSync(dirname($self->{strFileName})); } $self->{oStorage}->put($self->{strFileName} . INI_COPY_EXT, iniRender($self->{oContent}), {strCipherPass => $self->{strCipherPass}}); if ($self->{oStorage}->can('pathSync')) { $self->{oStorage}->pathSync(dirname($self->{strFileName})); } $self->{bModified} = false; # Indicate the file now exists $self->{bExists} = true; # File was saved return true; } # File was not saved return false; } #################################################################################################################################### # saveCopy - save only a copy of the file. #################################################################################################################################### sub saveCopy { my $self = shift; if ($self->{oStorage}->exists($self->{strFileName})) { confess &log(ASSERT, "cannot save copy only when '$self->{strFileName}' exists"); } $self->hash(); $self->{oStorage}->put($self->{strFileName} . INI_COPY_EXT, iniRender($self->{oContent}), {strCipherPass => $self->{strCipherPass}}); } #################################################################################################################################### # iniRender() - render hash to standard INI format. #################################################################################################################################### push @EXPORT, qw(iniRender); sub iniRender { # Assign function parameters, defaults, and log debug info my ( $strOperation, $oContent, $bRelaxed, ) = logDebugParam ( __PACKAGE__ . '::iniRender', \@_, {name => 'oContent', trace => true}, {name => 'bRelaxed', default => false, trace => true}, ); # Open the ini file for writing my $strContent = ''; my $bFirst = true; # Create the JSON object canonical so that fields are alpha ordered to pass unit tests my $oJSON = JSON::PP->new()->canonical()->allow_nonref(); # Write the INI file foreach my $strSection (sort(keys(%$oContent))) { # Add a linefeed between sections if (!$bFirst) { $strContent .= "\n"; } # Write the section $strContent .= "[${strSection}]\n"; # Iterate through all keys in the section foreach my $strKey (sort(keys(%{$oContent->{$strSection}}))) { # If the value is a hash then convert it to JSON, otherwise store as is my $strValue = ${$oContent}{$strSection}{$strKey}; # If relaxed then store as old-style config if ($bRelaxed) { # If the value is an array then save each element to a separate key/value pair if (ref($strValue) eq 'ARRAY') { foreach my $strArrayValue (@{$strValue}) { $strContent .= "${strKey}=${strArrayValue}\n"; } } # Else write a standard key/value pair else { $strContent .= "${strKey}=${strValue}\n"; } } # Else write as stricter JSON else { # Skip the checksum for now but write all other key/value pairs if (!($strSection eq INI_SECTION_BACKREST && $strKey eq INI_KEY_CHECKSUM)) { $strContent .= "${strKey}=" . $oJSON->encode($strValue) . "\n"; } } } $bFirst = false; } # If there is a checksum write it at the end of the file. Having the checksum at the end of the file allows some major # performance optimizations which we won't implement in Perl, but will make the C code much more efficient. if (!$bRelaxed && defined($oContent->{&INI_SECTION_BACKREST}) && defined($oContent->{&INI_SECTION_BACKREST}{&INI_KEY_CHECKSUM})) { $strContent .= "\n[" . INI_SECTION_BACKREST . "]\n" . INI_KEY_CHECKSUM . '=' . $oJSON->encode($oContent->{&INI_SECTION_BACKREST}{&INI_KEY_CHECKSUM}) . "\n"; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strContent', value => $strContent, trace => true} ); } #################################################################################################################################### # hash() - generate hash for the manifest. #################################################################################################################################### sub hash { my $self = shift; # Remove the old checksum delete($self->{oContent}{&INI_SECTION_BACKREST}{&INI_KEY_CHECKSUM}); # Set the new checksum $self->{oContent}{&INI_SECTION_BACKREST}{&INI_KEY_CHECKSUM} = sha1_hex(JSON::PP->new()->canonical()->allow_nonref()->encode($self->{oContent})); return $self->{oContent}{&INI_SECTION_BACKREST}{&INI_KEY_CHECKSUM}; } #################################################################################################################################### # get() - get a value. #################################################################################################################################### sub get { my $self = shift; my $strSection = shift; my $strKey = shift; my $strSubKey = shift; my $bRequired = shift; my $oDefault = shift; # Parameter constraints if (!defined($strSection)) { confess &log(ASSERT, 'strSection is required'); } if (defined($strSubKey) && !defined($strKey)) { confess &log(ASSERT, "strKey is required when strSubKey '${strSubKey}' is requested"); } # Get the result my $oResult = $self->{oContent}->{$strSection}; if (defined($strKey) && defined($oResult)) { $oResult = $oResult->{$strKey}; if (defined($strSubKey) && defined($oResult)) { $oResult = $oResult->{$strSubKey}; } } # When result is not defined if (!defined($oResult)) { # Error if a result is required if (!defined($bRequired) || $bRequired) { confess &log(ASSERT, "strSection '$strSection'" . (defined($strKey) ? ", strKey '$strKey'" : '') . (defined($strSubKey) ? ", strSubKey '$strSubKey'" : '') . ' is required but not defined'); } # Return default if specified if (defined($oDefault)) { return $oDefault; } } return $oResult } #################################################################################################################################### # boolGet() - get a boolean value. #################################################################################################################################### sub boolGet { my $self = shift; my $strSection = shift; my $strKey = shift; my $strSubKey = shift; my $bRequired = shift; my $bDefault = shift; return $self->get( $strSection, $strKey, $strSubKey, $bRequired, defined($bDefault) ? ($bDefault ? INI_TRUE : INI_FALSE) : undef) ? true : false; } #################################################################################################################################### # numericGet() - get a numeric value. #################################################################################################################################### sub numericGet { my $self = shift; my $strSection = shift; my $strKey = shift; my $strSubKey = shift; my $bRequired = shift; my $nDefault = shift; return $self->get($strSection, $strKey, $strSubKey, $bRequired, defined($nDefault) ? $nDefault + 0 : undef) + 0; } #################################################################################################################################### # set - set a value. #################################################################################################################################### sub set { my $self = shift; my $strSection = shift; my $strKey = shift; my $strSubKey = shift; my $oValue = shift; # Parameter constraints if (!(defined($strSection) && defined($strKey))) { confess &log(ASSERT, 'strSection and strKey are required'); } my $oCurrentValue; if (defined($strSubKey)) { $oCurrentValue = \$self->{oContent}{$strSection}{$strKey}{$strSubKey}; } else { $oCurrentValue = \$self->{oContent}{$strSection}{$strKey}; } if (!defined($$oCurrentValue) || defined($oCurrentValue) != defined($oValue) || ${dclone($oCurrentValue)} ne ${dclone(\$oValue)}) { $$oCurrentValue = $oValue; if (!$self->{bModified}) { $self->{bModified} = true; } return true; } return false; } #################################################################################################################################### # boolSet - set a boolean value. #################################################################################################################################### sub boolSet { my $self = shift; my $strSection = shift; my $strKey = shift; my $strSubKey = shift; my $bValue = shift; $self->set($strSection, $strKey, $strSubKey, $bValue ? INI_TRUE : INI_FALSE); } #################################################################################################################################### # numericSet - set a numeric value. #################################################################################################################################### sub numericSet { my $self = shift; my $strSection = shift; my $strKey = shift; my $strSubKey = shift; my $nValue = shift; $self->set($strSection, $strKey, $strSubKey, defined($nValue) ? $nValue + 0 : undef); } #################################################################################################################################### # remove - remove a value. #################################################################################################################################### sub remove { my $self = shift; my $strSection = shift; my $strKey = shift; my $strSubKey = shift; # Test if the value exists if ($self->test($strSection, $strKey, $strSubKey)) { # Remove a subkey if (defined($strSubKey)) { delete($self->{oContent}{$strSection}{$strKey}{$strSubKey}); } # Remove a key if (defined($strKey)) { if (!defined($strSubKey)) { delete($self->{oContent}{$strSection}{$strKey}); } # Remove the section if it is now empty if (keys(%{$self->{oContent}{$strSection}}) == 0) { delete($self->{oContent}{$strSection}); } } # Remove a section if (!defined($strKey)) { delete($self->{oContent}{$strSection}); } # Record changes if (!$self->{bModified}) { $self->{bModified} = true; } return true; } return false; } #################################################################################################################################### # keys - get the list of keys in a section. #################################################################################################################################### sub keys { my $self = shift; my $strSection = shift; my $strSortOrder = shift; if ($self->test($strSection)) { if (!defined($strSortOrder) || $strSortOrder eq INI_SORT_FORWARD) { return (sort(keys(%{$self->get($strSection)}))); } elsif ($strSortOrder eq INI_SORT_REVERSE) { return (sort {$b cmp $a} (keys(%{$self->get($strSection)}))); } elsif ($strSortOrder eq INI_SORT_NONE) { return (keys(%{$self->get($strSection)})); } else { confess &log(ASSERT, "invalid strSortOrder '${strSortOrder}'"); } } my @stryEmptyArray; return @stryEmptyArray; } #################################################################################################################################### # test - test a value. # # Test a value to see if it equals the supplied test value. If no test value is given, tests that the section, key, or subkey is # defined. #################################################################################################################################### sub test { my $self = shift; my $strSection = shift; my $strValue = shift; my $strSubValue = shift; my $strTest = shift; # Get the value my $strResult = $self->get($strSection, $strValue, $strSubValue, false); # Is there a result if (defined($strResult)) { # Is there a value to test against? if (defined($strTest)) { # Make sure these are explicit strings or Devel::Cover thinks they are equal if one side is a boolean return ($strResult . '') eq ($strTest . '') ? true : false; } return true; } return false; } #################################################################################################################################### # boolTest - test a boolean value, see test(). #################################################################################################################################### sub boolTest { my $self = shift; my $strSection = shift; my $strValue = shift; my $strSubValue = shift; my $bTest = shift; return $self->test($strSection, $strValue, $strSubValue, defined($bTest) ? ($bTest ? INI_TRUE : INI_FALSE) : undef); } #################################################################################################################################### # cipherPassSub - gets the passphrase (if it exists) used to read/write subsequent files #################################################################################################################################### sub cipherPassSub { my $self = shift; return $self->get(INI_SECTION_CIPHER, INI_KEY_CIPHER_PASS, undef, false); } #################################################################################################################################### # Properties. #################################################################################################################################### sub modified {shift->{bModified}} # Has the data been modified since last load/save? sub exists {shift->{bExists}} # Is the data persisted to file? sub cipherPass {shift->{strCipherPass}} # Return passphrase (will be undef if repo not encrypted) 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/Log.pm000066400000000000000000000670331500617037600236620ustar00rootroot00000000000000#################################################################################################################################### # COMMON LOG MODULE #################################################################################################################################### package pgBackRestDoc::Common::Log; use strict; use warnings FATAL => qw(all); use Carp qw(confess longmess); use English '-no_match_vars'; use Exporter qw(import); our @EXPORT = qw(); use Fcntl qw(:DEFAULT :flock); use File::Basename qw(dirname); use Scalar::Util qw(blessed reftype); use Time::HiRes qw(gettimeofday usleep); use pgBackRestDoc::Common::Exception; use pgBackRestDoc::Common::String; #################################################################################################################################### # Boolean constants #################################################################################################################################### use constant true => 1; push @EXPORT, qw(true); use constant false => 0; push @EXPORT, qw(false); #################################################################################################################################### # Log level constants #################################################################################################################################### use constant TRACE => 'TRACE'; push @EXPORT, qw(TRACE); use constant DEBUG => 'DEBUG'; push @EXPORT, qw(DEBUG); use constant DETAIL => 'DETAIL'; push @EXPORT, qw(DETAIL); use constant INFO => 'INFO'; push @EXPORT, qw(INFO); use constant WARN => 'WARN'; push @EXPORT, qw(WARN); use constant PROTOCOL => 'PROTOCOL'; push @EXPORT, qw(PROTOCOL); use constant ERROR => 'ERROR'; push @EXPORT, qw(ERROR); use constant ASSERT => 'ASSERT'; push @EXPORT, qw(ASSERT); use constant OFF => 'OFF'; push @EXPORT, qw(OFF); #################################################################################################################################### # Log levels ranked by severity #################################################################################################################################### my %oLogLevelRank; $oLogLevelRank{TRACE}{rank} = 8; $oLogLevelRank{DEBUG}{rank} = 7; $oLogLevelRank{DETAIL}{rank} = 6; $oLogLevelRank{INFO}{rank} = 5; $oLogLevelRank{WARN}{rank} = 4; $oLogLevelRank{PROTOCOL}{rank} = 3; $oLogLevelRank{ERROR}{rank} = 2; $oLogLevelRank{ASSERT}{rank} = 1; $oLogLevelRank{OFF}{rank} = 0; #################################################################################################################################### # Module globals #################################################################################################################################### my $hLogFile = undef; my $strLogFileCache = undef; my $strLogLevelFile = OFF; my $strLogLevelConsole = OFF; my $strLogLevelStdErr = WARN; my $bLogTimestamp = true; # Size of the process id log field my $iLogProcessSize = 2; # Flags to limit banner printing until there is actual output my $bLogFileExists; my $bLogFileFirst; # Allow log to be globally enabled or disabled with logEnable() and logDisable() my $bLogDisable = 0; # Allow errors to be logged as warnings my $bLogWarnOnError = 0; # Store the last logged error my $oErrorLast; #################################################################################################################################### # logFileSet - set the file messages will be logged to #################################################################################################################################### sub logFileSet { my $oStorage = shift; my $strFile = shift; my $bLogFileFirstParam = shift; # Only open the log file if file logging is enabled if ($strLogLevelFile ne OFF) { $oStorage->pathCreate(dirname($strFile), {strMode => '0750', bIgnoreExists => true, bCreateParent => true}); $strFile .= '.log'; $bLogFileExists = -e $strFile ? true : false; $bLogFileFirst = defined($bLogFileFirstParam) ? $bLogFileFirstParam : false; if (!sysopen($hLogFile, $strFile, O_WRONLY | O_CREAT | O_APPEND, oct('0640'))) { logErrorResult(ERROR_FILE_OPEN, "unable to open log file '${strFile}'", $OS_ERROR); } # Write out anything that was cached before the file was opened if (defined($strLogFileCache)) { logBanner(); syswrite($hLogFile, $strLogFileCache); undef($strLogFileCache); } } } push @EXPORT, qw(logFileSet); #################################################################################################################################### # logBanner # # Output a banner on the first log entry written to a file #################################################################################################################################### sub logBanner { if ($bLogFileFirst) { if ($bLogFileExists) { syswrite($hLogFile, "\n"); } syswrite($hLogFile, "-------------------PROCESS START-------------------\n"); } $bLogFileFirst = false; } #################################################################################################################################### # logLevelSet - set the log level for file and console #################################################################################################################################### sub logLevelSet { my $strLevelFileParam = shift; my $strLevelConsoleParam = shift; my $strLevelStdErrParam = shift; my $bLogTimestampParam = shift; my $iLogProcessMax = shift; if (defined($strLevelFileParam)) { if (!defined($oLogLevelRank{uc($strLevelFileParam)}{rank})) { confess &log(ERROR, "file log level ${strLevelFileParam} does not exist"); } $strLogLevelFile = uc($strLevelFileParam); } if (defined($strLevelConsoleParam)) { if (!defined($oLogLevelRank{uc($strLevelConsoleParam)}{rank})) { confess &log(ERROR, "console log level ${strLevelConsoleParam} does not exist"); } $strLogLevelConsole = uc($strLevelConsoleParam); } if (defined($strLevelStdErrParam)) { if (!defined($oLogLevelRank{uc($strLevelStdErrParam)}{rank})) { confess &log(ERROR, "stdout log level ${strLevelStdErrParam} does not exist"); } $strLogLevelStdErr = uc($strLevelStdErrParam); } if (defined($bLogTimestampParam)) { $bLogTimestamp = $bLogTimestampParam; } if (defined($iLogProcessMax)) { $iLogProcessSize = $iLogProcessMax > 99 ? 3 : 2; } } push @EXPORT, qw(logLevelSet); #################################################################################################################################### # logDisable #################################################################################################################################### sub logDisable { $bLogDisable++; } push @EXPORT, qw(logDisable); #################################################################################################################################### # logEnable #################################################################################################################################### sub logEnable { $bLogDisable--; } push @EXPORT, qw(logEnable); #################################################################################################################################### # logWarnOnErrorDisable #################################################################################################################################### sub logWarnOnErrorDisable { $bLogWarnOnError--; } push @EXPORT, qw(logWarnOnErrorDisable); #################################################################################################################################### # logWarnOnErrorEnable - when an error is thrown, log it as a warning instead #################################################################################################################################### sub logWarnOnErrorEnable { $bLogWarnOnError++; } push @EXPORT, qw(logWarnOnErrorEnable); #################################################################################################################################### # logDebugParam # # Log parameters passed to functions. #################################################################################################################################### use constant DEBUG_PARAM => '()'; sub logDebugParam { my $strFunction = shift; my $oyParamRef = shift; return logDebugProcess($strFunction, DEBUG_PARAM, undef, $oyParamRef, @_); } push @EXPORT, qw(logDebugParam); #################################################################################################################################### # logDebugReturn # # Log values returned from functions. #################################################################################################################################### use constant DEBUG_RETURN => '=>'; sub logDebugReturn { my $strFunction = shift; return logDebugProcess($strFunction, DEBUG_RETURN, undef, undef, @_); } push @EXPORT, qw(logDebugReturn); #################################################################################################################################### # logDebugMisc # # Log misc values and details during execution. #################################################################################################################################### use constant DEBUG_MISC => ''; sub logDebugMisc { my $strFunction = shift; my $strDetail = shift; return logDebugProcess($strFunction, DEBUG_MISC, $strDetail, undef, @_); } push @EXPORT, qw(logDebugMisc); #################################################################################################################################### # logDebugProcess #################################################################################################################################### sub logDebugProcess { my $strFunction = shift; my $strType = shift; my $strDetail = shift; my $oyParamRef = shift; my $iIndex = 0; my $oParamHash = {}; my @oyResult; my $bLogTrace = true; if ($strType eq DEBUG_PARAM) { push @oyResult, $strFunction; } # Process each parameter hash my $oParam = shift; my $bOptionalBlock = false; # Strip the package name off strFunction if it's pgBackRest $strFunction =~ s/^pgBackRest[^\:]*\:\://; while (defined($oParam)) { my $strParamName = $$oParam{name}; my $bParamOptional = defined($oParam->{optional}) && $oParam->{optional}; my $bParamRequired = !defined($oParam->{required}) || $oParam->{required}; my $oValue; # Should the param be redacted? $oParamHash->{$strParamName}{redact} = $oParam->{redact} ? true : false; # If param is optional then the optional block has been entered if ($bParamOptional) { if (defined($oParam->{required})) { confess &log(ASSERT, "cannot define 'required' for optional parameter '${strParamName}'"); } $bParamRequired = false; $bOptionalBlock = true; } # Don't allow non-optional parameters once optional block has started if ($bParamOptional != $bOptionalBlock) { confess &log(ASSERT, "non-optional parameter '${strParamName}' invalid after optional parameters"); } # Push the return value into the return value array if ($strType eq DEBUG_PARAM) { if ($bParamOptional) { $oValue = $$oyParamRef[$iIndex]->{$strParamName}; } else { $oValue = $$oyParamRef[$iIndex]; } if (defined($oValue)) { push(@oyResult, $oValue); } else { push(@oyResult, $${oParam}{default}); $$oParamHash{$strParamName}{default} = true; } $oValue = $oyResult[-1]; if (!defined($oValue) && $bParamRequired) { confess &log(ASSERT, "${strParamName} is required in ${strFunction}"); } } else { if (ref($$oParam{value}) eq 'ARRAY') { if (defined($$oParam{ref}) && $$oParam{ref}) { push(@oyResult, $$oParam{value}); } else { push(@oyResult, @{$$oParam{value}}); } } else { push(@oyResult, $$oParam{value}); } $oValue = $$oParam{value}; } if (!defined($$oParam{log}) || $$oParam{log}) { # If the parameter is a hash but not blessed then represent it as a string # ??? This should go away once the inputs to logDebug can be changed if (ref($oValue) eq 'HASH' && !blessed($oValue)) { $$oParamHash{$strParamName}{value} = '[hash]'; } # Else log the parameter value exactly else { $$oParamHash{$strParamName}{value} = $oValue; } # There are certain return values that it's wasteful to generate debug logging for if (!($strParamName eq 'self') && (!defined($$oParam{trace}) || !$$oParam{trace})) { $bLogTrace = false; } } # Get the next parameter hash $oParam = shift; if (!$bParamOptional) { $iIndex++; } } if (defined($strDetail) && $iIndex == 0) { $bLogTrace = false; } logDebugOut($strFunction, $strType, $strDetail, $oParamHash, $bLogTrace ? TRACE : DEBUG); # If there are one or zero return values then just return a scalar (this will be undef if there are no return values) if (@oyResult == 1) { return $oyResult[0]; } # Else return an array containing return values return @oyResult; } #################################################################################################################################### # logDebugBuild #################################################################################################################################### sub logDebugBuild { my $strValue = shift; my $rResult; # Value is undefined if (!defined($strValue)) { $rResult = \'[undef]'; } # Value is not a ref, but return it as a ref for efficiency elsif (!ref($strValue)) { $rResult = \$strValue; } # Value is a hash elsif (ref($strValue) eq 'HASH') { my $strValueHash; for my $strSubValue (sort(keys(%{$strValue}))) { $strValueHash .= (defined($strValueHash) ? ', ' : '{') . "${strSubValue} => " . ${logDebugBuild($strValue->{$strSubValue})}; } $rResult = \(defined($strValueHash) ? $strValueHash . '}' : '{}'); } # Value is an array elsif (ref($strValue) eq 'ARRAY') { my $strValueArray; for my $strSubValue (@{$strValue}) { $strValueArray .= (defined($strValueArray) ? ', ' : '(') . ${logDebugBuild($strSubValue)}; } $rResult = \(defined($strValueArray) ? $strValueArray . ')' : '()'); } # Else some other type ??? For the moment this is forced to object to not make big log changes else { $rResult = \('[object]'); } return $rResult; } push @EXPORT, qw(logDebugBuild); #################################################################################################################################### # logDebugOut #################################################################################################################################### use constant DEBUG_STRING_MAX_LEN => 1024; sub logDebugOut { my $strFunction = shift; my $strType = shift; my $strMessage = shift; my $oParamHash = shift; my $strLevel = shift; $strLevel = defined($strLevel) ? $strLevel : DEBUG; if ($oLogLevelRank{$strLevel}{rank} <= $oLogLevelRank{$strLogLevelConsole}{rank} || $oLogLevelRank{$strLevel}{rank} <= $oLogLevelRank{$strLogLevelFile}{rank} || $oLogLevelRank{$strLevel}{rank} <= $oLogLevelRank{$strLogLevelStdErr}{rank}) { if (defined($oParamHash)) { my $strParamSet; foreach my $strParam (sort(keys(%$oParamHash))) { if (defined($strParamSet)) { $strParamSet .= ', '; } my $strValueRef = defined($oParamHash->{$strParam}{value}) ? logDebugBuild($oParamHash->{$strParam}{value}) : undef; my $bDefault = defined($$strValueRef) && defined($$oParamHash{$strParam}{default}) ? $$oParamHash{$strParam}{default} : false; $strParamSet .= "${strParam} = " . ($oParamHash->{$strParam}{redact} && defined($$strValueRef) ? '' : ($bDefault ? '<' : '') . (defined($$strValueRef) ? ($strParam =~ /^(b|is)/ ? ($$strValueRef ? 'true' : 'false'): (length($$strValueRef) > DEBUG_STRING_MAX_LEN ? substr($$strValueRef, 0, DEBUG_STRING_MAX_LEN) . ' ... ': $$strValueRef)) : '[undef]') . ($bDefault ? '>' : '')); } if (defined($strMessage)) { $strMessage = $strMessage . (defined($strParamSet) ? ": ${strParamSet}" : ''); } else { $strMessage = $strParamSet; } } &log($strLevel, "${strFunction}${strType}" . (defined($strMessage) ? ": $strMessage" : '')); } } #################################################################################################################################### # logException #################################################################################################################################### sub logException { my $oException = shift; return &log($oException->level(), $oException->message(), $oException->code(), undef, undef, undef, $oException->extra()); } push @EXPORT, qw(logException); #################################################################################################################################### # logErrorResult #################################################################################################################################### sub logErrorResult { my $iCode = shift; my $strMessage = shift; my $strResult = shift; confess &log(ERROR, $strMessage . (defined($strResult) ? ': ' . trim($strResult) : ''), $iCode); } push @EXPORT, qw(logErrorResult); #################################################################################################################################### # LOG - log messages #################################################################################################################################### sub log { my $strLevel = shift; my $strMessage = shift; my $iCode = shift; my $bSuppressLog = shift; my $iIndent = shift; my $iProcessId = shift; my $rExtra = shift; # Set defaults $bSuppressLog = defined($bSuppressLog) ? $bSuppressLog : false; # Initialize rExtra if (!defined($rExtra)) { $rExtra = { bLogFile => false, bLogConsole => false, }; } # Set operational variables my $strMessageFormat = $strMessage; my $iLogLevelRank = $oLogLevelRank{$strLevel}{rank}; # Level rank must be valid if (!defined($iLogLevelRank)) { confess &log(ASSERT, "log level ${strLevel} does not exist"); } # If message was undefined then set default message if (!defined($strMessageFormat)) { $strMessageFormat = '(undefined)'; } # Set the error code if ($strLevel eq ASSERT) { $iCode = ERROR_ASSERT; } elsif ($strLevel eq ERROR && !defined($iCode)) { $iCode = ERROR_UNKNOWN; } $strMessageFormat = (defined($iCode) ? sprintf('[%03d]: ', $iCode) : '') . $strMessageFormat; # Indent subsequent lines of the message if it has more than one line - makes the log more readable if (defined($iIndent)) { my $strIndent = ' ' x $iIndent; $strMessageFormat =~ s/\n/\n${strIndent}/g; } else { # Indent subsequent message lines so they align $bLogTimestamp ? $strMessageFormat =~ s/\n/\n /g : $strMessageFormat =~ s/\n/\n /g } # Indent TRACE and debug levels so they are distinct from normal messages if ($strLevel eq TRACE) { $strMessageFormat =~ s/\n/\n /g; $strMessageFormat = ' ' . $strMessageFormat; } elsif ($strLevel eq DEBUG) { $strMessageFormat =~ s/\n/\n /g; $strMessageFormat = ' ' . $strMessageFormat; } # Format the message text my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime(time); # If logging warnings as errors then change the display level and rank. These will be used to determine if the message will be # displayed or not. my $strDisplayLevel = ($bLogWarnOnError && $strLevel eq ERROR ? WARN : $strLevel); my $iLogDisplayLevelRank = ($bLogWarnOnError && $strLevel eq ERROR ? $oLogLevelRank{$strDisplayLevel}{rank} : $iLogLevelRank); $strMessageFormat = ($bLogTimestamp ? timestampFormat() . sprintf('.%03d ', (gettimeofday() - int(gettimeofday())) * 1000) : '') . sprintf('P%0*d', $iLogProcessSize, defined($iProcessId) ? $iProcessId : 0) . (' ' x (7 - length($strDisplayLevel))) . "${strDisplayLevel}: ${strMessageFormat}\n"; # Skip output if disabled if (!$bLogDisable) { # Output to stderr if configured log level setting rank is greater than the display level rank. if (!$rExtra->{bLogConsole} && $iLogDisplayLevelRank <= $oLogLevelRank{$strLogLevelStdErr}{rank}) { if ($strLogLevelStdErr ne PROTOCOL) { syswrite(*STDERR, $strDisplayLevel . (defined($iCode) ? sprintf(' [%03d]: ', $iCode) : '') . ': '); } syswrite(*STDERR, "${strMessage}\n"); $rExtra->{bLogConsole} = true; } # Else output to stdout if configured log level setting rank is greater than the display level rank elsif (!$rExtra->{bLogConsole} && $iLogDisplayLevelRank <= $oLogLevelRank{$strLogLevelConsole}{rank}) { if (!$bSuppressLog) { syswrite(*STDOUT, $strMessageFormat); # This is here for debugging purposes - it's not clear how best to make it into a switch # if ($strLevel eq ASSERT || $strLevel eq ERROR) # { # my $strStackTrace = longmess() . "\n"; # $strStackTrace =~ s/\n/\n /g; # syswrite(*STDOUT, $strStackTrace); # } } $rExtra->{bLogConsole} = true; } # Output to file if configured log level setting rank is greater than the display level rank or test flag is set. if (!$rExtra->{bLogLogFile} && $iLogDisplayLevelRank <= $oLogLevelRank{$strLogLevelFile}{rank}) { if (defined($hLogFile) || (defined($strLogLevelFile) && $strLogLevelFile ne OFF)) { if (!$bSuppressLog) { if (defined($hLogFile)) { logBanner(); syswrite($hLogFile, $strMessageFormat); } else { $strLogFileCache .= $strMessageFormat; } if ($strDisplayLevel eq ASSERT || ($strDisplayLevel eq ERROR && ($strLogLevelFile eq DEBUG || $strLogLevelFile eq TRACE))) { my $strStackTrace = longmess() . "\n"; $strStackTrace =~ s/\n/\n /g; if (defined($hLogFile)) { syswrite($hLogFile, $strStackTrace); } else { $strLogFileCache .= $strStackTrace; } } } } $rExtra->{bLogFile} = true; } } # Return a typed exception if code is defined if (defined($iCode)) { $oErrorLast = new pgBackRestDoc::Common::Exception($strLevel, $iCode, $strMessage, longmess(), $rExtra); return $oErrorLast; } # Return the message so it can be used in a confess return $strMessage; } push @EXPORT, qw(log); #################################################################################################################################### # logErrorLast - get the last logged error #################################################################################################################################### sub logErrorLast { return $oErrorLast; } push @EXPORT, qw(logErrorLast); #################################################################################################################################### # logLevel - get the current log levels #################################################################################################################################### sub logLevel { return ($strLogLevelFile, $strLogLevelConsole, $strLogLevelStdErr, $bLogTimestamp); } push @EXPORT, qw(logLevel); #################################################################################################################################### # logFileCacheClear - Clear the log file cache #################################################################################################################################### sub logFileCacheClear { undef($strLogFileCache); } push @EXPORT, qw(logFileCacheClear); #################################################################################################################################### # logFileCache - Get the log file cache #################################################################################################################################### sub logFileCache { return $strLogFileCache; } push @EXPORT, qw(logFileCache); 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Common/String.pm000066400000000000000000000063171500617037600244050ustar00rootroot00000000000000#################################################################################################################################### # COMMON STRING MODULE #################################################################################################################################### package pgBackRestDoc::Common::String; use strict; use warnings FATAL => qw(all); use Carp qw(confess longmess); use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); #################################################################################################################################### # trim # # Trim whitespace. #################################################################################################################################### sub trim { my $strBuffer = shift; if (!defined($strBuffer)) { return; } $strBuffer =~ s/^\s+|\s+$//g; return $strBuffer; } push @EXPORT, qw(trim); #################################################################################################################################### # coalesce - return first defined parameter #################################################################################################################################### sub coalesce { foreach my $strParam (@_) { if (defined($strParam)) { return $strParam; } } return; } push @EXPORT, qw(coalesce); #################################################################################################################################### # timestampFormat # # Get standard timestamp format (or formatted as specified). #################################################################################################################################### sub timestampFormat { my $strFormat = shift; my $lTime = shift; if (!defined($strFormat)) { $strFormat = '%4d-%02d-%02d %02d:%02d:%02d'; } if (!defined($lTime)) { $lTime = time(); } my ($iSecond, $iMinute, $iHour, $iMonthDay, $iMonth, $iYear, $iWeekDay, $iYearDay, $bIsDst) = localtime($lTime); if ($strFormat eq "%4d") { return sprintf($strFormat, $iYear + 1900) } else { return sprintf($strFormat, $iYear + 1900, $iMonth + 1, $iMonthDay, $iHour, $iMinute, $iSecond); } } push @EXPORT, qw(timestampFormat); #################################################################################################################################### # stringSplit #################################################################################################################################### sub stringSplit { my $strString = shift; my $strChar = shift; my $iLength = shift; if (length($strString) <= $iLength) { return $strString, undef; } my $iPos = index($strString, $strChar); if ($iPos == -1) { return $strString, undef; } my $iNewPos = $iPos; while ($iNewPos != -1 && $iNewPos + 1 < $iLength) { $iPos = $iNewPos; $iNewPos = index($strString, $strChar, $iPos + 1); } return substr($strString, 0, $iPos + 1), substr($strString, $iPos + 1); } push @EXPORT, qw(stringSplit); 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Custom/000077500000000000000000000000001500617037600226145ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Custom/DocConfigData.pm000066400000000000000000000621451500617037600256070ustar00rootroot00000000000000#################################################################################################################################### # Configuration Definition Data # # The configuration is defined in src/build/config/config.yaml, which also contains the documentation. #################################################################################################################################### package pgBackRestDoc::Custom::DocConfigData; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Cwd qw(abs_path); use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname basename); use Getopt::Long qw(GetOptions); use Storable qw(dclone); use pgBackRestDoc::Common::Exception; use pgBackRestDoc::Common::Log; use pgBackRestDoc::ProjectInfo; use pgBackRestTest::Common::Wait; #################################################################################################################################### # Command constants #################################################################################################################################### use constant CFGCMD_BACKUP => 'backup'; push @EXPORT, qw(CFGCMD_BACKUP); use constant CFGCMD_HELP => 'help'; push @EXPORT, qw(CFGCMD_HELP); use constant CFGCMD_INFO => 'info'; push @EXPORT, qw(CFGCMD_INFO); use constant CFGCMD_VERSION => 'version'; #################################################################################################################################### # Command role constants - roles allowed for each command. Commands may have multiple processes that work together to implement # their functionality. These roles allow each process to know what it is supposed to do. #################################################################################################################################### # Called directly by the user. This is the main process of the command that may or may not spawn other command roles. use constant CFGCMD_ROLE_MAIN => 'main'; push @EXPORT, qw(CFGCMD_ROLE_MAIN); # Async worker that is spawned so the main process can return a result while work continues. An async worker may spawn local or # remote workers. use constant CFGCMD_ROLE_ASYNC => 'async'; push @EXPORT, qw(CFGCMD_ROLE_ASYNC); # Local worker for parallelizing jobs. A local work may spawn a remote worker. use constant CFGCMD_ROLE_LOCAL => 'local'; push @EXPORT, qw(CFGCMD_ROLE_LOCAL); # Remote worker for accessing resources on another host use constant CFGCMD_ROLE_REMOTE => 'remote'; push @EXPORT, qw(CFGCMD_ROLE_REMOTE); #################################################################################################################################### # Option constants - options that are allowed for commands #################################################################################################################################### # Command-line only options #----------------------------------------------------------------------------------------------------------------------------------- use constant CFGOPT_CONFIG => 'config'; push @EXPORT, qw(CFGOPT_CONFIG); use constant CFGOPT_STANZA => 'stanza'; push @EXPORT, qw(CFGOPT_STANZA); # Command-line only local/remote options #----------------------------------------------------------------------------------------------------------------------------------- # Paths use constant CFGOPT_LOCK_PATH => 'lock-path'; push @EXPORT, qw(CFGOPT_LOCK_PATH); use constant CFGOPT_LOG_PATH => 'log-path'; push @EXPORT, qw(CFGOPT_LOG_PATH); use constant CFGOPT_SPOOL_PATH => 'spool-path'; push @EXPORT, qw(CFGOPT_SPOOL_PATH); # Logging use constant CFGOPT_LOG_LEVEL_STDERR => 'log-level-stderr'; push @EXPORT, qw(CFGOPT_LOG_LEVEL_STDERR); use constant CFGOPT_LOG_TIMESTAMP => 'log-timestamp'; push @EXPORT, qw(CFGOPT_LOG_TIMESTAMP); # Repository options #----------------------------------------------------------------------------------------------------------------------------------- # Prefix that must be used by all repo options that allow multiple configurations use constant CFGDEF_PREFIX_REPO => 'repo'; # Repository General use constant CFGOPT_REPO_PATH => CFGDEF_PREFIX_REPO . '-path'; push @EXPORT, qw(CFGOPT_REPO_PATH); # Repository Host use constant CFGOPT_REPO_HOST => CFGDEF_PREFIX_REPO . '-host'; use constant CFGOPT_REPO_HOST_CMD => CFGOPT_REPO_HOST . '-cmd'; push @EXPORT, qw(CFGOPT_REPO_HOST_CMD); # Stanza options #----------------------------------------------------------------------------------------------------------------------------------- # Determines how many databases can be configured use constant CFGDEF_INDEX_PG => 8; push @EXPORT, qw(CFGDEF_INDEX_PG); # Prefix that must be used by all db options that allow multiple configurations use constant CFGDEF_PREFIX_PG => 'pg'; push @EXPORT, qw(CFGDEF_PREFIX_PG); # Set default PostgreSQL cluster use constant CFGOPT_PG_HOST => CFGDEF_PREFIX_PG . '-host'; use constant CFGOPT_PG_HOST_CMD => CFGOPT_PG_HOST . '-cmd'; push @EXPORT, qw(CFGOPT_PG_HOST_CMD); #################################################################################################################################### # Option definition constants - defines, types, sections, etc. #################################################################################################################################### # Command defines #----------------------------------------------------------------------------------------------------------------------------------- use constant CFGDEF_LOG_FILE => 'log-file'; push @EXPORT, qw(CFGDEF_LOG_FILE); use constant CFGDEF_LOG_LEVEL_DEFAULT => 'log-level-default'; push @EXPORT, qw(CFGDEF_LOG_LEVEL_DEFAULT); use constant CFGDEF_LOCK_REQUIRED => 'lock-required'; push @EXPORT, qw(CFGDEF_LOCK_REQUIRED); use constant CFGDEF_LOCK_REMOTE_REQUIRED => 'lock-remote-required'; push @EXPORT, qw(CFGDEF_LOCK_REMOTE_REQUIRED); use constant CFGDEF_LOCK_TYPE => 'lock-type'; push @EXPORT, qw(CFGDEF_LOCK_TYPE); use constant CFGDEF_LOCK_TYPE_NONE => 'none'; use constant CFGDEF_PARAMETER_ALLOWED => 'parameter-allowed'; push @EXPORT, qw(CFGDEF_PARAMETER_ALLOWED); # Option defines #----------------------------------------------------------------------------------------------------------------------------------- use constant CFGDEF_ALLOW_LIST => 'allow-list'; push @EXPORT, qw(CFGDEF_ALLOW_LIST); use constant CFGDEF_ALLOW_RANGE => 'allow-range'; push @EXPORT, qw(CFGDEF_ALLOW_RANGE); use constant CFGDEF_DEFAULT => 'default'; push @EXPORT, qw(CFGDEF_DEFAULT); use constant CFGDEF_DEFAULT_LITERAL => 'default-literal'; push @EXPORT, qw(CFGDEF_DEFAULT_LITERAL); # Group options together to share common configuration use constant CFGDEF_GROUP => 'group'; push @EXPORT, qw(CFGDEF_GROUP); use constant CFGDEF_BETA => 'beta'; push @EXPORT, qw(CFGDEF_BETA); use constant CFGDEF_INDEX => 'index'; push @EXPORT, qw(CFGDEF_INDEX); use constant CFGDEF_INHERIT => 'inherit'; push @EXPORT, qw(CFGDEF_INHERIT); use constant CFGDEF_INTERNAL => 'internal'; push @EXPORT, qw(CFGDEF_INTERNAL); use constant CFGDEF_DEPRECATE => 'deprecate'; push @EXPORT, qw(CFGDEF_DEPRECATE); use constant CFGDEF_NEGATE => 'negate'; push @EXPORT, qw(CFGDEF_NEGATE); use constant CFGDEF_COMMAND => 'command'; push @EXPORT, qw(CFGDEF_COMMAND); use constant CFGDEF_COMMAND_ROLE => 'command-role'; push @EXPORT, qw(CFGDEF_COMMAND_ROLE); use constant CFGDEF_REQUIRED => 'required'; push @EXPORT, qw(CFGDEF_REQUIRED); use constant CFGDEF_RESET => 'reset'; push @EXPORT, qw(CFGDEF_RESET); use constant CFGDEF_SECTION => 'section'; push @EXPORT, qw(CFGDEF_SECTION); use constant CFGDEF_SECURE => 'secure'; push @EXPORT, qw(CFGDEF_SECURE); use constant CFGDEF_TYPE => 'type'; push @EXPORT, qw(CFGDEF_TYPE); # Option types #----------------------------------------------------------------------------------------------------------------------------------- use constant CFGDEF_TYPE_BOOLEAN => 'boolean'; push @EXPORT, qw(CFGDEF_TYPE_BOOLEAN); use constant CFGDEF_TYPE_HASH => 'hash'; push @EXPORT, qw(CFGDEF_TYPE_HASH); use constant CFGDEF_TYPE_INTEGER => 'integer'; push @EXPORT, qw(CFGDEF_TYPE_INTEGER); use constant CFGDEF_TYPE_LIST => 'list'; push @EXPORT, qw(CFGDEF_TYPE_LIST); use constant CFGDEF_TYPE_PATH => 'path'; push @EXPORT, qw(CFGDEF_TYPE_PATH); use constant CFGDEF_TYPE_STRING => 'string'; push @EXPORT, qw(CFGDEF_TYPE_STRING); use constant CFGDEF_TYPE_SIZE => 'size'; push @EXPORT, qw(CFGDEF_TYPE_SIZE); use constant CFGDEF_TYPE_TIME => 'time'; push @EXPORT, qw(CFGDEF_TYPE_TIME); # Option config sections #----------------------------------------------------------------------------------------------------------------------------------- use constant CFGDEF_SECTION_GLOBAL => 'global'; push @EXPORT, qw(CFGDEF_SECTION_GLOBAL); use constant CFGDEF_SECTION_STANZA => 'stanza'; push @EXPORT, qw(CFGDEF_SECTION_STANZA); #################################################################################################################################### # Load configuration #################################################################################################################################### use YAML::XS qw(LoadFile); # Required so booleans are not read-only local $YAML::XS::Boolean = "JSON::PP"; my $rhConfig = LoadFile(dirname(dirname($0)) . '/src/build/config/config.yaml'); my $rhCommandDefine = $rhConfig->{'command'}; my $rhOptionGroupDefine = $rhConfig->{'optionGroup'}; my $rhConfigDefine = $rhConfig->{'option'}; #################################################################################################################################### # Fix errors introduced by YAML::XS::LoadFile. This is typically fixed by setting local $YAML::XS::Boolean = "JSON::PP", but older # Debian/Ubuntu versions do not support this fix. Some booleans get set read only and others also end up as empty strings. There is # no apparent pattern to what gets broken so it is important to be on the lookout for strange output when adding new options. # # ??? For now this code is commented out since packages for older Debians can be built using backports. It is being preserved just # in case it is needed before the migration to C is complete. #################################################################################################################################### # sub optionDefineFixup # { # my $strKey = shift; # my $rhDefine = shift; # # # Fix read-only required values so they are writable # if (defined($rhDefine->{&CFGDEF_REQUIRED})) # { # my $value = $rhDefine->{&CFGDEF_REQUIRED} ? true : false; # delete($rhDefine->{&CFGDEF_REQUIRED}); # $rhDefine->{&CFGDEF_REQUIRED} = $value; # } # # # If the default is an empty string set to false. This must be a mangled boolean since empty strings are not valid defaults. # if (defined($rhDefine->{&CFGDEF_DEFAULT}) && $rhDefine->{&CFGDEF_DEFAULT} eq '') # { # delete($rhDefine->{&CFGDEF_DEFAULT}); # $rhDefine->{&CFGDEF_DEFAULT} = false; # } # } # # # Fix all options # foreach my $strKey (sort(keys(%{$rhConfigDefine}))) # { # my $rhOption = $rhConfigDefine->{$strKey}; # optionDefineFixup($strKey, $rhOption); # # # Fix all option commands # if (ref($rhOption->{&CFGDEF_COMMAND})) # { # foreach my $strCommand (sort(keys(%{$rhOption->{&CFGDEF_COMMAND}}))) # { # optionDefineFixup("$strKey-$strCommand", $rhOption->{&CFGDEF_COMMAND}{$strCommand}); # } # } # } #################################################################################################################################### # Process command define defaults #################################################################################################################################### foreach my $strCommand (sort(keys(%{$rhCommandDefine}))) { # Commands are external by default if (!defined($rhCommandDefine->{$strCommand}{&CFGDEF_INTERNAL})) { $rhCommandDefine->{$strCommand}{&CFGDEF_INTERNAL} = false; } # Log files are created by default if (!defined($rhCommandDefine->{$strCommand}{&CFGDEF_LOG_FILE})) { $rhCommandDefine->{$strCommand}{&CFGDEF_LOG_FILE} = true; } # Default log level is INFO if (!defined($rhCommandDefine->{$strCommand}{&CFGDEF_LOG_LEVEL_DEFAULT})) { $rhCommandDefine->{$strCommand}{&CFGDEF_LOG_LEVEL_DEFAULT} = INFO; } # Default lock required is false if (!defined($rhCommandDefine->{$strCommand}{&CFGDEF_LOCK_REQUIRED})) { $rhCommandDefine->{$strCommand}{&CFGDEF_LOCK_REQUIRED} = false; } # Default lock remote required is false if (!defined($rhCommandDefine->{$strCommand}{&CFGDEF_LOCK_REMOTE_REQUIRED})) { $rhCommandDefine->{$strCommand}{&CFGDEF_LOCK_REMOTE_REQUIRED} = false; } # Lock type must be set if a lock is required if (!defined($rhCommandDefine->{$strCommand}{&CFGDEF_LOCK_TYPE})) { # Is a lock type required? if ($rhCommandDefine->{$strCommand}{&CFGDEF_LOCK_REQUIRED}) { confess &log(ERROR, "lock type is required for command '${strCommand}'"); } $rhCommandDefine->{$strCommand}{&CFGDEF_LOCK_TYPE} = CFGDEF_LOCK_TYPE_NONE; } else { if ($rhCommandDefine->{$strCommand}{&CFGDEF_LOCK_REQUIRED} && $rhCommandDefine->{$strCommand}{&CFGDEF_LOCK_TYPE} eq CFGDEF_LOCK_TYPE_NONE) { confess &log(ERROR, "lock type is required for command '${strCommand}' and cannot be 'none'"); } } # Default parameter allowed is false if (!defined($rhCommandDefine->{$strCommand}{&CFGDEF_PARAMETER_ALLOWED})) { $rhCommandDefine->{$strCommand}{&CFGDEF_PARAMETER_ALLOWED} = false; } # All commands have the default role if (!defined($rhCommandDefine->{$strCommand}{&CFGDEF_COMMAND_ROLE}{&CFGCMD_ROLE_MAIN})) { $rhCommandDefine->{$strCommand}{&CFGDEF_COMMAND_ROLE}{&CFGCMD_ROLE_MAIN} = {}; } } #################################################################################################################################### # Process option define defaults #################################################################################################################################### foreach my $strKey (sort(keys(%{$rhConfigDefine}))) { my $rhOption = $rhConfigDefine->{$strKey}; # If the define is a scalar then copy the entire define from the referenced option if (defined($rhConfigDefine->{$strKey}{&CFGDEF_INHERIT})) { # Make a copy in case there are overrides that need to be applied after inheriting my $hConfigDefineOverride = dclone($rhConfigDefine->{$strKey}); # Copy the option being inherited from $rhConfigDefine->{$strKey} = dclone($rhConfigDefine->{$rhConfigDefine->{$strKey}{&CFGDEF_INHERIT}}); # No need to copy the inheritance key delete($rhConfigDefine->{$strKey}{&CFGDEF_INHERIT}); # It makes no sense to inherit deprecations - they must be specified for each option delete($rhConfigDefine->{$strKey}{&CFGDEF_DEPRECATE}); # Apply overrides foreach my $strOptionDef (sort(keys(%{$hConfigDefineOverride}))) { $rhConfigDefine->{$strKey}{$strOptionDef} = $hConfigDefineOverride->{$strOptionDef}; } # Update option variable with new hash reference $rhOption = $rhConfigDefine->{$strKey} } # If command is not specified then the option is valid for all commands except version and help if (!defined($rhOption->{&CFGDEF_COMMAND})) { foreach my $strCommand (sort(keys(%{$rhCommandDefine}))) { next if $strCommand eq CFGCMD_HELP || $strCommand eq CFGCMD_VERSION; $rhOption->{&CFGDEF_COMMAND}{$strCommand} = {}; } } # Else if the command section is a scalar then copy the section from the referenced option elsif (defined($rhConfigDefine->{$strKey}{&CFGDEF_COMMAND}) && !ref($rhConfigDefine->{$strKey}{&CFGDEF_COMMAND})) { $rhConfigDefine->{$strKey}{&CFGDEF_COMMAND} = dclone($rhConfigDefine->{$rhConfigDefine->{$strKey}{&CFGDEF_COMMAND}}{&CFGDEF_COMMAND}); } # If the allow list is a scalar then copy the list from the referenced option if (defined($rhConfigDefine->{$strKey}{&CFGDEF_ALLOW_LIST}) && !ref($rhConfigDefine->{$strKey}{&CFGDEF_ALLOW_LIST})) { $rhConfigDefine->{$strKey}{&CFGDEF_ALLOW_LIST} = dclone($rhConfigDefine->{$rhConfigDefine->{$strKey}{&CFGDEF_ALLOW_LIST}}{&CFGDEF_ALLOW_LIST}); } # Default type is string if (!defined($rhConfigDefine->{$strKey}{&CFGDEF_TYPE})) { &log(ASSERT, "type is required for option '${strKey}'"); } # Default required is true if (!defined($rhConfigDefine->{$strKey}{&CFGDEF_REQUIRED})) { $rhConfigDefine->{$strKey}{&CFGDEF_REQUIRED} = true; } # Default internal is false if (!defined($rhConfigDefine->{$strKey}{&CFGDEF_INTERNAL})) { $rhConfigDefine->{$strKey}{&CFGDEF_INTERNAL} = false; } # All boolean config options can be negated. Boolean command-line options must be marked for negation individually. if ($rhConfigDefine->{$strKey}{&CFGDEF_TYPE} eq CFGDEF_TYPE_BOOLEAN && defined($rhConfigDefine->{$strKey}{&CFGDEF_SECTION})) { $rhConfigDefine->{$strKey}{&CFGDEF_NEGATE} = true; } # Default for negation is false if (!defined($rhConfigDefine->{$strKey}{&CFGDEF_NEGATE})) { $rhConfigDefine->{$strKey}{&CFGDEF_NEGATE} = false; } # All config options can be reset if (defined($rhConfigDefine->{$strKey}{&CFGDEF_SECTION})) { $rhConfigDefine->{$strKey}{&CFGDEF_RESET} = true; } elsif (!defined($rhConfigDefine->{$strKey}{&CFGDEF_RESET})) { $rhConfigDefine->{$strKey}{&CFGDEF_RESET} = false; } # By default options are not secure if (!defined($rhConfigDefine->{$strKey}{&CFGDEF_SECURE})) { $rhConfigDefine->{$strKey}{&CFGDEF_SECURE} = false; } # All size and time options must have an allow range if (($rhConfigDefine->{$strKey}{&CFGDEF_TYPE} eq CFGDEF_TYPE_TIME || $rhConfigDefine->{$strKey}{&CFGDEF_TYPE} eq CFGDEF_TYPE_SIZE) && !(defined($rhConfigDefine->{$strKey}{&CFGDEF_ALLOW_RANGE}) || defined($rhConfigDefine->{$strKey}{&CFGDEF_ALLOW_LIST}))) { confess &log(ASSERT, "int/size/time option '${strKey}' must have allow range or list"); } # Ensure all commands are valid foreach my $strCommand (sort(keys(%{$rhConfigDefine->{$strKey}{&CFGDEF_COMMAND}}))) { if (!defined($rhCommandDefine->{$strCommand})) { confess &log(ASSERT, "invalid command '${strCommand}'"); } } } # Generate valid command roles for each option foreach my $strOption (sort(keys(%{$rhConfigDefine}))) { my $rhOption = $rhConfigDefine->{$strOption}; # Generate valid command roles for each command in the option foreach my $strCommand (sort(keys(%{$rhOption->{&CFGDEF_COMMAND}}))) { # If command roles are defined in the option command override then check that they are valid if (defined($rhOption->{&CFGDEF_COMMAND}{$strCommand}{&CFGDEF_COMMAND_ROLE})) { foreach my $strCommandRole (sort(keys(%{$rhOption->{&CFGDEF_COMMAND}{$strCommand}{&CFGDEF_COMMAND_ROLE}}))) { if (!defined($rhCommandDefine->{$strCommand}{&CFGDEF_COMMAND_ROLE}{$strCommandRole})) { confess &log( ASSERT, "option '${strOption}', command '${strCommand}' has invalid command role '${strCommandRole}'"); } } } # Else if the option has command roles defined then use the intersection of command roles with the command elsif (defined($rhOption->{&CFGDEF_COMMAND_ROLE})) { foreach my $strCommandRole (sort(keys(%{$rhOption->{&CFGDEF_COMMAND_ROLE}}))) { if (defined($rhCommandDefine->{$strCommand}{&CFGDEF_COMMAND_ROLE}{$strCommandRole})) { $rhOption->{&CFGDEF_COMMAND}{$strCommand}{&CFGDEF_COMMAND_ROLE}{$strCommandRole} = {}; } } } # Else copy the command roles from the command else { foreach my $strCommandRole (sort(keys(%{$rhCommandDefine->{$strCommand}{&CFGDEF_COMMAND_ROLE}}))) { $rhOption->{&CFGDEF_COMMAND}{$strCommand}{&CFGDEF_COMMAND_ROLE}{$strCommandRole} = {}; } } } # Remove option command roles so they don't accidentally get used in processing (since they were copied to option commands) delete($rhOption->{&CFGDEF_COMMAND_ROLE}); } #################################################################################################################################### # Get option definition #################################################################################################################################### sub cfgDefine { return dclone($rhConfigDefine); } push @EXPORT, qw(cfgDefine); #################################################################################################################################### # Get command definition #################################################################################################################################### sub cfgDefineCommand { return dclone($rhCommandDefine); } push @EXPORT, qw(cfgDefineCommand); #################################################################################################################################### # Get option group definition #################################################################################################################################### sub cfgDefineOptionGroup { return dclone($rhOptionGroupDefine); } push @EXPORT, qw(cfgDefineOptionGroup); #################################################################################################################################### # Get list of all commands #################################################################################################################################### sub cfgDefineCommandList { # Return sorted list return (sort(keys(%{$rhCommandDefine}))); } push @EXPORT, qw(cfgDefineCommandList); #################################################################################################################################### # Get list of all option types #################################################################################################################################### sub cfgDefineOptionTypeList { my $rhOptionTypeMap; # Get unique list of types foreach my $strOption (sort(keys(%{$rhConfigDefine}))) { my $strOptionType = $rhConfigDefine->{$strOption}{&CFGDEF_TYPE}; if (!defined($rhOptionTypeMap->{$strOptionType})) { $rhOptionTypeMap->{$strOptionType} = true; } }; # Return sorted list return (sort(keys(%{$rhOptionTypeMap}))); } push @EXPORT, qw(cfgDefineOptionTypeList); 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Custom/DocCustomRelease.pm000066400000000000000000000670541500617037600263670ustar00rootroot00000000000000#################################################################################################################################### # DOC RELEASE MODULE #################################################################################################################################### package pgBackRestDoc::Custom::DocCustomRelease; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Cwd qw(abs_path); use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); use pgBackRestDoc::Common::DocRender; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::ProjectInfo; #################################################################################################################################### # XML node constants #################################################################################################################################### use constant XML_PARAM_ID => 'id'; use constant XML_CONTRIBUTOR_LIST => 'contributor-list'; use constant XML_CONTRIBUTOR => 'contributor'; use constant XML_CONTRIBUTOR_NAME_DISPLAY => 'contributor-name-display'; use constant XML_RELEASE_CORE_LIST => 'release-core-list'; use constant XML_RELEASE_DOC_LIST => 'release-doc-list'; use constant XML_RELEASE_TEST_LIST => 'release-test-list'; use constant XML_RELEASE_BUG_LIST => 'release-bug-list'; use constant XML_RELEASE_DEVELOPMENT_LIST => 'release-development-list'; use constant XML_RELEASE_FEATURE_LIST => 'release-feature-list'; use constant XML_RELEASE_IMPROVEMENT_LIST => 'release-improvement-list'; use constant XML_RELEASE_ITEM_CONTRIBUTOR_LIST => 'release-item-contributor-list'; use constant XML_RELEASE_ITEM_CONTRIBUTOR => 'release-item-contributor'; use constant XML_RELEASE_ITEM_IDEATOR => 'release-item-ideator'; use constant XML_RELEASE_ITEM_REVIEWER => 'release-item-reviewer'; #################################################################################################################################### # Contributor text constants #################################################################################################################################### use constant TEXT_CONTRIBUTED => 'Contributed'; use constant TEXT_FIXED => 'Fixed'; use constant TEXT_FOUND => 'Reported'; use constant TEXT_REVIEWED => 'Reviewed'; use constant TEXT_SUGGESTED => 'Suggested'; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{oDoc}, $self->{bDev}, ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oDoc'}, {name => 'bDev', required => false, default => false}, ); # Get contributor list foreach my $oContributor ($self->{oDoc}->nodeGet(XML_CONTRIBUTOR_LIST)->nodeList(XML_CONTRIBUTOR)) { my $strContributorId = $oContributor->paramGet(XML_PARAM_ID); if (!defined($self->{hContributor})) { $self->{hContributor} = {}; $self->{strContributorDefault} = $strContributorId; } ${$self->{hContributor}}{$strContributorId}{name} = $oContributor->fieldGet(XML_CONTRIBUTOR_NAME_DISPLAY); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # currentStableVersion # # Return the current stable version. #################################################################################################################################### sub currentStableVersion { my $self = shift; my $oDoc = $self->{oDoc}; foreach my $oRelease ($oDoc->nodeGet('release-list')->nodeList('release')) { my $strVersion = $oRelease->paramGet('version'); if ($strVersion !~ /dev$/) { return $strVersion; } } confess &log(ERROR, "unable to find non-development version"); } #################################################################################################################################### # releaseLast # # Get the last release. #################################################################################################################################### sub releaseLast { my $self = shift; my $oDoc = $self->{oDoc}; foreach my $oRelease ($oDoc->nodeGet('release-list')->nodeList('release')) { return $oRelease; } } #################################################################################################################################### # contributorTextGet # # Get a list of contributors for an item in text format. #################################################################################################################################### sub contributorTextGet { my $self = shift; my $oReleaseItem = shift; my $strItemType = shift; my $strContributorText; my $hItemContributorType = {}; # Create a the list of contributors foreach my $strContributorType (XML_RELEASE_ITEM_IDEATOR, XML_RELEASE_ITEM_CONTRIBUTOR, XML_RELEASE_ITEM_REVIEWER) { my $stryItemContributor = []; if ($oReleaseItem->nodeTest(XML_RELEASE_ITEM_CONTRIBUTOR_LIST)) { foreach my $oContributor ($oReleaseItem->nodeGet(XML_RELEASE_ITEM_CONTRIBUTOR_LIST)-> nodeList($strContributorType, false)) { push @{$stryItemContributor}, $oContributor->paramGet(XML_PARAM_ID); } } if (@$stryItemContributor == 0 && $strContributorType eq XML_RELEASE_ITEM_CONTRIBUTOR) { push @{$stryItemContributor}, $self->{strContributorDefault} } # Add the default user as a reviewer if there are no reviewers listed and default user is not already a contributor if (@$stryItemContributor == 0 && $strContributorType eq XML_RELEASE_ITEM_REVIEWER) { my $bFound = false; foreach my $strContributor (@{$$hItemContributorType{&XML_RELEASE_ITEM_CONTRIBUTOR}}) { if ($strContributor eq $self->{strContributorDefault}) { $bFound = true; last; } } if (!$bFound) { push @{$stryItemContributor}, $self->{strContributorDefault} } } $$hItemContributorType{$strContributorType} = $stryItemContributor; } # Error if a reviewer is also a contributor foreach my $strReviewer (@{$$hItemContributorType{&XML_RELEASE_ITEM_REVIEWER}}) { foreach my $strContributor (@{$$hItemContributorType{&XML_RELEASE_ITEM_CONTRIBUTOR}}) { if ($strReviewer eq $strContributor) { confess &log(ERROR, "${strReviewer} cannot be both a contributor and a reviewer"); } } } # Error if the ideator list is the same as the contributor list if (join(',', @{$$hItemContributorType{&XML_RELEASE_ITEM_IDEATOR}}) eq join(',', @{$$hItemContributorType{&XML_RELEASE_ITEM_CONTRIBUTOR}})) { confess &log(ERROR, 'cannot have same contributor and ideator list: ' . join(', ', @{$$hItemContributorType{&XML_RELEASE_ITEM_CONTRIBUTOR}})); } # Remove the default user if they are the only one in a group (to prevent the entire page from being splattered with one name) foreach my $strContributorType (XML_RELEASE_ITEM_IDEATOR, XML_RELEASE_ITEM_CONTRIBUTOR) { if (@{$$hItemContributorType{$strContributorType}} == 1 && @{$$hItemContributorType{$strContributorType}}[0] eq $self->{strContributorDefault}) { $$hItemContributorType{$strContributorType} = []; } } # Render the string foreach my $strContributorType (XML_RELEASE_ITEM_CONTRIBUTOR, XML_RELEASE_ITEM_REVIEWER, XML_RELEASE_ITEM_IDEATOR) { my $stryItemContributor = $$hItemContributorType{$strContributorType}; my $strContributorTypeText; foreach my $strContributor (@{$stryItemContributor}) { my $hContributor = ${$self->{hContributor}}{$strContributor}; if (!defined($hContributor)) { confess &log(ERROR, "contributor ${strContributor} does not exist"); } $strContributorTypeText .= (defined($strContributorTypeText) ? ', ' : '') . $$hContributor{name}; } if (defined($strContributorTypeText)) { $strContributorTypeText = ' by ' . $strContributorTypeText . '.'; if ($strContributorType eq XML_RELEASE_ITEM_CONTRIBUTOR) { $strContributorTypeText = ($strItemType eq 'bug' ? TEXT_FIXED : TEXT_CONTRIBUTED) . $strContributorTypeText; } elsif ($strContributorType eq XML_RELEASE_ITEM_IDEATOR) { $strContributorTypeText = ($strItemType eq 'bug' ? TEXT_FOUND : TEXT_SUGGESTED) . $strContributorTypeText; } elsif ($strContributorType eq XML_RELEASE_ITEM_REVIEWER) { $strContributorTypeText = TEXT_REVIEWED . $strContributorTypeText; } $strContributorText .= (defined($strContributorText) ? ' ' : '') . $strContributorTypeText; } } return $strContributorText; } #################################################################################################################################### # Find a commit by subject prefix. Error if the prefix appears more than once. #################################################################################################################################### sub commitFindSubject { my $self = shift; my $rhyCommit = shift; my $strSubjectPrefix = shift; my $bRegExp = shift; $bRegExp = defined($bRegExp) ? $bRegExp : true; my $rhResult = undef; foreach my $rhCommit (@{$rhyCommit}) { if (($bRegExp && $rhCommit->{subject} =~ /^$strSubjectPrefix/) || (!$bRegExp && length($rhCommit->{subject}) >= length($strSubjectPrefix) && substr($rhCommit->{subject}, 0, length($strSubjectPrefix)) eq $strSubjectPrefix)) { if (defined($rhResult)) { confess &log(ERROR, "subject prefix '${strSubjectPrefix}' already found in commit " . $rhCommit->{commit}); } $rhResult = $rhCommit; } } return $rhResult; } #################################################################################################################################### # Throw an error that includes a list of release commits #################################################################################################################################### sub commitError { my $self = shift; my $strMessage = shift; my $rstryCommitRemaining = shift; my $rhyCommit = shift; my $strList; foreach my $strCommit (@{$rstryCommitRemaining}) { $strList .= (defined($strList) ? "\n" : '') . substr($rhyCommit->{$strCommit}{date}, 0, length($rhyCommit->{$strCommit}{date}) - 15) . " $strCommit: " . $rhyCommit->{$strCommit}{subject}; } confess &log(ERROR, "${strMessage}:\n${strList}"); } #################################################################################################################################### # docGet # # Get the xml for release. #################################################################################################################################### sub docGet { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->docGet'); # Load the git history my $oStorageDoc = new pgBackRestTest::Common::Storage( dirname(abs_path($0)), new pgBackRestTest::Common::StoragePosix({bFileSync => false, bPathSync => false})); my @hyGitLog = @{(JSON::PP->new()->allow_nonref())->decode(${$oStorageDoc->get("resource/git-history.cache")})}; # Get renderer my $oRender = new pgBackRestDoc::Common::DocRender('text'); $oRender->tagSet('backrest', PROJECT_NAME); # Create the doc my $oDoc = new pgBackRestDoc::Common::Doc(); $oDoc->paramSet('title', $self->{oDoc}->paramGet('title')); $oDoc->paramSet('toc-number', $self->{oDoc}->paramGet('toc-number')); # Set the description for use as a meta tag $oDoc->fieldSet('description', $self->{oDoc}->fieldGet('description')); # Add the introduction my $oIntroSectionDoc = $oDoc->nodeAdd('section', undef, {id => 'introduction'}); $oIntroSectionDoc->nodeAdd('title')->textSet('Introduction'); $oIntroSectionDoc->textSet($self->{oDoc}->nodeGet('intro')->textGet()); # Add each release section my $oSection; my $iDevReleaseTotal = 0; my $iCurrentReleaseTotal = 0; my $iStableReleaseTotal = 0; my $iUnsupportedReleaseTotal = 0; my @oyRelease = $self->{oDoc}->nodeGet('release-list')->nodeList('release'); for (my $iReleaseIdx = 0; $iReleaseIdx < @oyRelease; $iReleaseIdx++) { my $oRelease = $oyRelease[$iReleaseIdx]; # Get the release version and dev flag my $strVersion = $oRelease->paramGet('version'); my $bReleaseDev = $strVersion =~ /dev$/ ? true : false; # Get a list of commits that apply to this release my @rhyReleaseCommit; my $rhReleaseCommitRemaining; my @stryReleaseCommitRemaining; my $bReleaseCheckCommit = false; # Check versions except for bug fix releases that are not the most recent release (since bug fixes on are separate branches) if ($strVersion ge '2.01' && !($strVersion =~ /^[0-9]+\.[0-9]+\.[0-9]+$/ && $iReleaseIdx != 0)) { # Should commits in the release be checked? $bReleaseCheckCommit = !$bReleaseDev ? true : false; # Get the begin commit my $rhReleaseCommitBegin = $self->commitFindSubject(\@hyGitLog, "Begin v${strVersion} development\\."); my $strReleaseCommitBegin = defined($rhReleaseCommitBegin) ? $rhReleaseCommitBegin->{commit} : undef; # Get the end commit of the last release (skipping bug fixes which are on separate branches) my $iReleaseLastVersionIdx = $iReleaseIdx + 1; my $strReleaseLastVersion = $oyRelease[$iReleaseLastVersionIdx]->paramGet('version'); while ($strReleaseLastVersion =~ /^[0-9]+\.[0-9]+\.[0-9]+$/) { $iReleaseLastVersionIdx++; $strReleaseLastVersion = $oyRelease[$iReleaseLastVersionIdx]->paramGet('version'); } my $rhReleaseLastCommitEnd = $self->commitFindSubject(\@hyGitLog, "v${strReleaseLastVersion}\\: .+"); if (!defined($rhReleaseLastCommitEnd)) { confess &log(ERROR, "release ${strReleaseLastVersion} must have an end commit"); } my $strReleaseLastCommitEnd = $rhReleaseLastCommitEnd->{commit}; # Get the end commit my $rhReleaseCommitEnd = $self->commitFindSubject(\@hyGitLog, "v${strVersion}\\: .+"); my $strReleaseCommitEnd = defined($rhReleaseCommitEnd) ? $rhReleaseCommitEnd->{commit} : undef; if ($bReleaseCheckCommit && !defined($rhReleaseCommitEnd) && $iReleaseIdx != 0) { confess &log(ERROR, "release ${strVersion} must have an end commit"); } # Make a list of commits for this release while ($hyGitLog[0]->{commit} ne $strReleaseLastCommitEnd) { # Don't add begin/end commits to the list since they are already accounted for if ((defined($strReleaseCommitEnd) && $hyGitLog[0]->{commit} eq $strReleaseCommitEnd) || (defined($strReleaseCommitBegin) && $hyGitLog[0]->{commit} eq $strReleaseCommitBegin)) { shift(@hyGitLog); } # Else add the commit to this releases' list else { push(@stryReleaseCommitRemaining, $hyGitLog[0]->{commit}); push(@rhyReleaseCommit, $hyGitLog[0]); $rhReleaseCommitRemaining->{$hyGitLog[0]->{commit}}{date} = $hyGitLog[0]->{date}; $rhReleaseCommitRemaining->{$hyGitLog[0]->{commit}}{subject} = $hyGitLog[0]->{subject}; shift(@hyGitLog); } } # At least one commit is required for non-dev releases if ($bReleaseCheckCommit && @stryReleaseCommitRemaining == 0) { confess &log(ERROR, "no commits found for release ${strVersion}"); } } # Display versions in TOC? my $bTOC = true; # Create a release section if ($bReleaseDev) { if ($iDevReleaseTotal > 1) { confess &log(ERROR, 'only one development release is allowed'); } $oSection = $oDoc->nodeAdd('section', undef, {id => 'development', if => "'{[dev]}' eq 'y'"}); $oSection->nodeAdd('title')->textSet("Development Notes"); $iDevReleaseTotal++; } elsif ($iCurrentReleaseTotal == 0) { $oSection = $oDoc->nodeAdd('section', undef, {id => 'current'}); $oSection->nodeAdd('title')->textSet("Current Stable Release"); $iCurrentReleaseTotal++; } elsif ($strVersion ge '1.00') { if ($iStableReleaseTotal == 0) { $oSection = $oDoc->nodeAdd('section', undef, {id => 'supported'}); $oSection->nodeAdd('title')->textSet("Stable Releases"); } $iStableReleaseTotal++; $bTOC = false; } else { if ($iUnsupportedReleaseTotal == 0) { $oSection = $oDoc->nodeAdd('section', undef, {id => 'unsupported'}); $oSection->nodeAdd('title')->textSet("Pre-Stable Releases"); } $iUnsupportedReleaseTotal++; $bTOC = false; } # Format the date my $strDate = $oRelease->paramGet('date'); my $strDateOut = ""; my @stryMonth = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'); if ($strDate =~ /^X/) { $strDateOut .= 'No Release Date Set'; } else { if ($strDate !~ /^(XXXX-XX-XX)|([0-9]{4}-[0-9]{2}-[0-9]{2})$/) { confess &log(ASSERT, "invalid date ${strDate} for release {$strVersion}"); } $strDateOut .= 'Released ' . $stryMonth[(substr($strDate, 5, 2) - 1)] . ' ' . (substr($strDate, 8, 2) + 0) . ', ' . substr($strDate, 0, 4); } # Add section and titles my $oReleaseSection = $oSection->nodeAdd('section', undef, {id => $strVersion, toc => !$bTOC ? 'n' : undef}); $oReleaseSection->paramSet(XML_SECTION_PARAM_ANCHOR, XML_SECTION_PARAM_ANCHOR_VALUE_NOINHERIT); $oReleaseSection->nodeAdd('title')->textSet( "v${strVersion} " . ($bReleaseDev ? '' : 'Release ') . 'Notes'); $oReleaseSection->nodeAdd('subtitle')->textSet($oRelease->paramGet('title')); $oReleaseSection->nodeAdd('subsubtitle')->textSet($strDateOut); # Add release sections my $bReleaseNote = false; my $hSectionType = { &XML_RELEASE_CORE_LIST => {title => 'Core', type => 'core'}, &XML_RELEASE_DOC_LIST => {title => 'Documentation', type => 'doc'}, &XML_RELEASE_TEST_LIST => {title => 'Test Suite', type => 'test'}, }; foreach my $strSectionType (XML_RELEASE_CORE_LIST, XML_RELEASE_DOC_LIST, XML_RELEASE_TEST_LIST) { if ($oRelease->nodeTest($strSectionType)) { # Add release item types my $hItemType = { &XML_RELEASE_BUG_LIST => {title => 'Bug Fixes', type => 'bug'}, &XML_RELEASE_FEATURE_LIST => {title => 'Features', type => 'feature'}, &XML_RELEASE_IMPROVEMENT_LIST => {title => 'Improvements', type => 'improvement'}, &XML_RELEASE_DEVELOPMENT_LIST => {title => 'Development', type => 'development'}, }; foreach my $strItemType ( XML_RELEASE_BUG_LIST, XML_RELEASE_FEATURE_LIST, XML_RELEASE_IMPROVEMENT_LIST, XML_RELEASE_DEVELOPMENT_LIST) { next if (!$self->{bDev} && $strItemType eq XML_RELEASE_DEVELOPMENT_LIST); if ($oRelease->nodeGet($strSectionType)->nodeTest($strItemType)) { # Add release note if present if (!$bReleaseNote && defined($oRelease->nodeGet($strSectionType)->textGet(false))) { $oReleaseSection->nodeAdd('p')->textSet($oRelease->nodeGet($strSectionType)->textGet()); $bReleaseNote = true; } my $strTypeText = ($strSectionType eq XML_RELEASE_CORE_LIST ? '' : $$hSectionType{$strSectionType}{title}) . ' ' . $$hItemType{$strItemType}{title} . ':'; $oReleaseSection-> nodeAdd('p')->textSet( {name => 'text', children=> [{name => 'b', value => $strTypeText}]}); my $oList = $oReleaseSection->nodeAdd('list'); # Add release items foreach my $oReleaseFeature ($oRelease->nodeGet($strSectionType)-> nodeGet($strItemType)->nodeList('release-item')) { my @rhyReleaseItemP = $oReleaseFeature->nodeList('p'); my $oReleaseItemText = $rhyReleaseItemP[0]->textGet(); # Check release item commits if ($bReleaseCheckCommit && $strItemType ne XML_RELEASE_DEVELOPMENT_LIST) { my @oyCommit = $oReleaseFeature->nodeList('commit', false); # If no commits found then try to use the description as the commit subject if (@oyCommit == 0) { my $strSubject = $oRender->processText($oReleaseItemText); my $rhCommit = $self->commitFindSubject(\@rhyReleaseCommit, $strSubject, false); if (!defined($rhCommit)) { $self->commitError( "unable to find commit or no subject match for release ${strVersion} item" . " '${strSubject}'", \@stryReleaseCommitRemaining, $rhReleaseCommitRemaining); my $strCommit = $rhCommit->{commit}; @stryReleaseCommitRemaining = grep(!/$strCommit/, @stryReleaseCommitRemaining); } } # Check the rest of the commits to ensure they exist foreach my $oCommit (@oyCommit) { my $strSubject = $oCommit->paramGet('subject'); my $rhCommit = $self->commitFindSubject(\@rhyReleaseCommit, $strSubject, false); if (defined($rhCommit)) { my $strCommit = $rhCommit->{commit}; @stryReleaseCommitRemaining = grep(!/$strCommit/, @stryReleaseCommitRemaining); } else { $self->commitError( "unable to find release ${strVersion} commit subject '${strSubject}' in list", \@stryReleaseCommitRemaining, $rhReleaseCommitRemaining); } } } # Append the rest of the text if (@rhyReleaseItemP > 1) { shift(@rhyReleaseItemP); push(@{$oReleaseItemText->{oDoc}{children}}, ' '); foreach my $rhReleaseItemP (@rhyReleaseItemP) { push(@{$oReleaseItemText->{oDoc}{children}}, @{$rhReleaseItemP->textGet()->{oDoc}{children}}); } } # Append contributor info my $strContributorText = $self->contributorTextGet($oReleaseFeature, $$hItemType{$strItemType}{type}); if (defined($strContributorText)) { push(@{$oReleaseItemText->{oDoc}{children}}, ' ('); push(@{$oReleaseItemText->{oDoc}{children}}, {name => 'i', value => $strContributorText}); push(@{$oReleaseItemText->{oDoc}{children}}, ')'); } # Add the list item $oList->nodeAdd('list-item')->textSet($oReleaseItemText); } } } } } # Error if there are commits left over # if ($bReleaseCheckCommit && @stryReleaseCommitRemaining != 0) # { # $self->commitError( # "unassigned commits for release ${strVersion}", \@stryReleaseCommitRemaining, $rhReleaseCommitRemaining); # } } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oDoc', value => $oDoc} ); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Html/000077500000000000000000000000001500617037600222465ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Html/DocHtmlBuilder.pm000066400000000000000000000246751500617037600254630ustar00rootroot00000000000000#################################################################################################################################### # DOC HTML BUILDER MODULE #################################################################################################################################### package pgBackRestDoc::Html::DocHtmlBuilder; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Exporter qw(import); our @EXPORT = qw(); use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::Html::DocHtmlElement; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; $self->{strClass} = $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{strName}, $self->{strTitle}, $self->{strFavicon}, $self->{strLogo}, $self->{strDescription}, $self->{bPretty}, $self->{bCompact}, $self->{strCss}, ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'strName'}, {name => 'strTitle'}, {name => 'strFavicon', required => false}, {name => 'strLogo', required => false}, {name => 'strDescription', required => false}, {name => 'bPretty', default => false}, {name => 'bCompact', default => false}, {name => 'strCss', required => false}, ); $self->{oBody} = new pgBackRestDoc::Html::DocHtmlElement(HTML_BODY); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # indent # # Indent html #################################################################################################################################### sub indent { my $self = shift; my $iDepth = shift; return $self->{bPretty} ? (' ' x $iDepth) : ''; } #################################################################################################################################### # lf # # Add a linefeed. #################################################################################################################################### sub lf { my $self = shift; return $self->{bPretty} ? "\n" : ''; } #################################################################################################################################### # bodyGet # # Get the body element. #################################################################################################################################### sub bodyGet { my $self = shift; return $self->{oBody}; } #################################################################################################################################### # htmlRender # # Render each html element. #################################################################################################################################### sub htmlRender { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oElement, $iDepth ) = logDebugParam ( __PACKAGE__ . '->htmlRender', \@_, {name => 'oElement', trace => true}, {name => 'iDepth', trace => true} ); # If a pre tag add a linefeed before the tag unless the prior tag was also pre. This makes the output more diffable. my $strHtml = ""; if ($oElement->{strType} eq HTML_PRE && !$self->{bPretty}) { if (!$self->{bPrePrior}) { $strHtml .= "\n"; } $self->{bPrePrior} = true; } else { $self->{bPrePrior} = false; } # Build the tag $strHtml .= $self->indent($iDepth) . "<$oElement->{strType}" . (defined($oElement->{strClass}) ? " class=\"$oElement->{strClass}\"": '') . (defined($oElement->{strRef}) ? " href=\"$oElement->{strRef}\"": '') . (defined($oElement->{strId}) ? " id=\"$oElement->{strId}\"": '') . (defined($oElement->{strExtra}) ? " $oElement->{strExtra}": '') . '>'; if (defined($oElement->{strContent})) { if (!defined($oElement->{bPre}) || !$oElement->{bPre}) { $oElement->{strContent} = trim($oElement->{strContent}); # Add a linefeed before the content if not pre. This makes the output more diffable. $strHtml .= "\n"; } else { $oElement->{strContent} =~ s/\&/\&\;/g; } $strHtml .= $oElement->{strContent}; # Add a linefeed after the content if not pre. This makes the output more diffable. if (!defined($oElement->{bPre}) || !$oElement->{bPre}) { $strHtml .= "\n" . $self->indent($iDepth); } } else { if (!($oElement->{strType} eq HTML_A && @{$oElement->{oyElement}} == 0)) { $strHtml .= $self->lf(); } foreach my $oChildElement (@{$oElement->{oyElement}}) { $strHtml .= $self->htmlRender($oChildElement, $iDepth + 1); } if (!($oElement->{strType} eq HTML_A && @{$oElement->{oyElement}} == 0)) { $strHtml .= $self->indent($iDepth); } } $strHtml .= "{strType}>"; # If a pre tag add an lf after the tag. This makes the output more diffable. $strHtml .= $oElement->{strType} eq HTML_PRE ? "\n" : $self->lf(); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strHtml', value => $strHtml, trace => true} ); } #################################################################################################################################### # escape # # Generate the HTML. #################################################################################################################################### sub escape { my $self = shift; my $strBuffer = shift; $strBuffer =~ s/\&/\&\;/g; $strBuffer =~ s/\htmlGet', \@_, {name => 'bAnalytics', optional => true, default => false, trace => true}, ); # Build the header my $strHtml = $self->indent(0) . "" . $self->lf() . $self->indent(0) . "" . $self->lf() . $self->indent(0) . "" . $self->lf() . $self->indent(1) . "\n" . $self->indent(2) . $self->escape($self->{strTitle}) . "\n" . $self->indent(1) . '' . $self->lf() . $self->indent(1) . "\n"; if (!$self->{bCompact}) { $strHtml .= # $self->indent(1) . "\n" . $self->indent(1) . '\n" . $self->indent(1) . '\n" . $self->indent(1) . "\n"; if (defined($self->{strFavicon})) { $strHtml .= $self->indent(1) . "{strFavicon}\" type=\"image/png\">\n"; } if (defined($self->{strLogo})) { $strHtml .= $self->indent(1) . "\n" . $self->indent(1) . "{strLogo}\">\n"; } if (defined($self->{strDescription})) { $strHtml .= $self->indent(1) . '\n" . $self->indent(1) . '\n"; } } if (defined($self->{strCss})) { my $strCss = $self->{strCss}; if (!$self->{bPretty}) { $strCss =~ s/^\s+//mg; $strCss =~ s/\n//g; $strCss =~ s/\/\*.*?\*\///g; } $strHtml .= $self->indent(1) . "\n"; } else { $strHtml .= $self->indent(1) . "\n"; } if ($bAnalytics) { $strHtml .= $self->indent(1) . "\n" . $self->indent(1) . "\n"; } $strHtml .= $self->indent(0) . "" . $self->lf() . $self->htmlRender($self->bodyGet(), 0); # Complete the html $strHtml .= $self->indent(0) . "" . $self->lf(); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strHtml', value => $strHtml, trace => true} ); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Html/DocHtmlElement.pm000066400000000000000000000125371500617037600254600ustar00rootroot00000000000000#################################################################################################################################### # DOC HTML ELEMENT MODULE #################################################################################################################################### package pgBackRestDoc::Html::DocHtmlElement; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Exporter qw(import); our @EXPORT = qw(); use Scalar::Util qw(blessed); use pgBackRestDoc::Common::Log; #################################################################################################################################### # Html Element Types #################################################################################################################################### use constant HTML_A => 'a'; push @EXPORT, qw(HTML_A); use constant HTML_BODY => 'body'; push @EXPORT, qw(HTML_BODY); use constant HTML_PRE => 'pre'; push @EXPORT, qw(HTML_PRE); use constant HTML_DIV => 'div'; push @EXPORT, qw(HTML_DIV); use constant HTML_SPAN => 'span'; push @EXPORT, qw(HTML_SPAN); use constant HTML_TABLE => 'table'; push @EXPORT, qw(HTML_TABLE); use constant HTML_TABLE_CAPTION => 'caption'; push @EXPORT, qw(HTML_TABLE_CAPTION); use constant HTML_TD => 'td'; push @EXPORT, qw(HTML_TD); use constant HTML_TH => 'th'; push @EXPORT, qw(HTML_TH); use constant HTML_TR => 'tr'; push @EXPORT, qw(HTML_TR); use constant HTML_UL => 'ul'; push @EXPORT, qw(HTML_UL); use constant HTML_LI => 'li'; push @EXPORT, qw(HTML_LI); #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; $self->{strClass} = $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{strType}, $self->{strClass}, my $oParam ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'strType', trace => true}, {name => 'strClass', required => false, trace => true}, {name => 'oParam', required => false, trace => true} ); $self->{oyElement} = []; $self->{strContent} = $$oParam{strContent}; $self->{strId} = $$oParam{strId}; $self->{strRef} = $$oParam{strRef}; $self->{strExtra} = $$oParam{strExtra}; $self->{bPre} = $$oParam{bPre}; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # addNew # # Create a new element and add it. #################################################################################################################################### sub addNew { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $strType, $strClass, $oParam ) = logDebugParam ( __PACKAGE__ . '->addNew', \@_, {name => 'strType', trace => true}, {name => 'strClass', required => false, trace => true}, {name => 'oParam', required => false, trace => true} ); my $oElement = new pgBackRestDoc::Html::DocHtmlElement($strType, $strClass, $oParam); $self->add($oElement); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oElement', value => $oElement, trace => true} ); } #################################################################################################################################### # add # # Add an element. #################################################################################################################################### sub add { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oElement ) = logDebugParam ( __PACKAGE__ . '->add', \@_, {name => 'oElement', trace => true} ); if (!(blessed($oElement) && $oElement->isa('pgBackRestDoc::Html::DocHtmlElement'))) { confess &log(ASSERT, 'oElement must be a valid element object'); } push(@{$self->{oyElement}}, $oElement); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oElement', value => $oElement, trace => true} ); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Html/DocHtmlPage.pm000066400000000000000000000631021500617037600247350ustar00rootroot00000000000000#################################################################################################################################### # DOC HTML PAGE MODULE #################################################################################################################################### package pgBackRestDoc::Html::DocHtmlPage; use parent 'pgBackRestDoc::Common::DocExecute'; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Data::Dumper; use Exporter qw(import); our @EXPORT = qw(); use pgBackRestDoc::Common::DocConfig; use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::DocRender; use pgBackRestDoc::Html::DocHtmlBuilder; use pgBackRestDoc::Html::DocHtmlElement; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Assign function parameters, defaults, and log debug info my ( $strOperation, $oManifest, $strRenderOutKey, $bMenu, $bExe, $bCompact, $strCss, $bPretty, ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oManifest'}, {name => 'strRenderOutKey'}, {name => 'bMenu'}, {name => 'bExe'}, {name => 'bCompact'}, {name => 'strCss'}, {name => 'bPretty'}, ); # Create the class hash my $self = $class->SUPER::new(RENDER_TYPE_HTML, $oManifest, $strRenderOutKey, $bExe); bless $self, $class; $self->{bMenu} = $bMenu; $self->{bCompact} = $bCompact; $self->{strCss} = $strCss; $self->{bPretty} = $bPretty; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # process # # Generate the site html #################################################################################################################################### sub process { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->process'); # Working variables my $oPage = $self->{oDoc}; my $oRender = $self->{oManifest}->renderGet(RENDER_TYPE_HTML); # Initialize page my $strTitle = $oPage->paramGet('title'); my $strSubTitle = $oPage->paramGet('subtitle', false); my $oHtmlBuilder = new pgBackRestDoc::Html::DocHtmlBuilder( $self->{oManifest}->variableReplace('{[project]}' . (defined($self->{oManifest}->variableGet('project-tagline')) ? ' - ' . $self->{oManifest}->variableGet('project-tagline') : '')), $self->{oManifest}->variableReplace($strTitle . (defined($strSubTitle) ? " - ${strSubTitle}" : '')), $self->{oManifest}->variableGet('project-favicon'), $self->{oManifest}->variableGet('project-logo'), $self->{oManifest}->variableReplace(trim($self->{oDoc}->fieldGet('description'))), $self->{bPretty}, $self->{bCompact}, $self->{bCompact} ? $self->{strCss} : undef); # Generate header my $oPageHeader = $oHtmlBuilder->bodyGet()->addNew(HTML_DIV, 'page-header'); # add the logo to the header if (defined($self->{oManifest}->variableGet('html-logo'))) { $oPageHeader-> addNew(HTML_DIV, 'page-header-logo', {strContent =>"{[html-logo]}"}); } $oPageHeader-> addNew(HTML_DIV, 'page-header-title', {strContent => $strTitle}); if (defined($strSubTitle)) { $oPageHeader-> addNew(HTML_DIV, 'page-header-subtitle', {strContent => $strSubTitle}); } # Generate menu if ($self->{bMenu}) { my $oMenuBody = $oHtmlBuilder->bodyGet()->addNew(HTML_DIV, 'page-menu')->addNew(HTML_DIV, 'menu-body'); # Get the menu in the order listed in the manifest.xml foreach my $strRenderOutKey (@{${$oRender}{stryOrder}}) { # Do not output the menu item for the page the user is on (e.g. on Command page, the Command menu item will not appear) if ($strRenderOutKey ne $self->{strRenderOutKey}) { my $oRenderOut = $self->{oManifest}->renderOutGet(RENDER_TYPE_HTML, $strRenderOutKey); if (defined($$oRenderOut{menu})) { $oMenuBody->addNew(HTML_DIV, 'menu')->addNew( HTML_A, 'menu-link', {strContent => $$oRenderOut{menu}, strRef => $strRenderOutKey eq 'index' ? '{[project-url-root]}' : "${strRenderOutKey}.html"}); } } } } # Generate table of contents my $oPageTocBody; if ($self->{bToc}) { my $oPageToc = $oHtmlBuilder->bodyGet()->addNew(HTML_DIV, 'page-toc'); $oPageToc->addNew(HTML_DIV, 'page-toc-header')->addNew(HTML_DIV, 'page-toc-title', {strContent => "Table of Contents"}); $oPageTocBody = $oPageToc-> addNew(HTML_DIV, 'page-toc-body'); } # Generate body my $oPageBody = $oHtmlBuilder->bodyGet()->addNew(HTML_DIV, 'page-body'); my $iSectionNo = 1; # Render sections foreach my $oSection ($oPage->nodeList('section')) { my ($oChildSectionElement, $oChildSectionTocElement) = $self->sectionProcess($oSection, undef, "${iSectionNo}", 1); $oPageBody->add($oChildSectionElement); if (defined($oPageTocBody) && defined($oChildSectionTocElement)) { $oPageTocBody->add($oChildSectionTocElement); } $iSectionNo++; } my $oPageFooter = $oHtmlBuilder->bodyGet()-> addNew(HTML_DIV, 'page-footer', {strContent => '{[html-footer]}'}); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strHtml', value => $oHtmlBuilder->htmlGet( {bAnalytics => defined($self->{oManifest}->variableGet('analytics')) && $self->{oManifest}->variableGet('analytics') eq 'y'}), trace => true} ); } #################################################################################################################################### # sectionProcess #################################################################################################################################### sub sectionProcess { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $strAnchor, $strSectionNo, $iDepth ) = logDebugParam ( __PACKAGE__ . '->sectionProcess', \@_, {name => 'oSection'}, {name => 'strAnchor', required => false}, {name => 'strSectionNo'}, {name => 'iDepth'} ); if ($oSection->paramGet('log')) { &log(INFO, (' ' x ($iDepth + 1)) . 'process section: ' . $oSection->paramGet('path')); } if ($iDepth > 3) { confess &log(ASSERT, "section depth of ${iDepth} exceeds maximum"); } # Working variables $strAnchor = ($oSection->paramTest(XML_SECTION_PARAM_ANCHOR, XML_SECTION_PARAM_ANCHOR_VALUE_NOINHERIT) ? '' : (defined($strAnchor) ? "${strAnchor}/" : '')) . $oSection->paramGet('id'); # Create the section toc element my $oSectionTocElement = new pgBackRestDoc::Html::DocHtmlElement(HTML_DIV, "section${iDepth}-toc"); # Create the section element my $oSectionElement = new pgBackRestDoc::Html::DocHtmlElement(HTML_DIV, "section${iDepth}"); # Add the section anchor $oSectionElement->addNew(HTML_A, undef, {strId => $strAnchor}); # Add the section title to section and toc my $oSectionHeaderElement = $oSectionElement->addNew(HTML_DIV, "section${iDepth}-header"); my $strSectionTitle = $self->processText($oSection->nodeGet('title')->textGet()); if ($self->{bTocNumber}) { $oSectionHeaderElement->addNew(HTML_DIV, "section${iDepth}-number"); } $oSectionHeaderElement->addNew(HTML_DIV, "section${iDepth}-title", {strContent => $strSectionTitle}); if ($self->{bTocNumber}) { $oSectionTocElement->addNew(HTML_DIV, "section${iDepth}-toc-number"); } my $oTocSectionTitleElement = $oSectionTocElement->addNew(HTML_DIV, "section${iDepth}-toc-title"); $oTocSectionTitleElement->addNew( HTML_A, undef, {strContent => $strSectionTitle, strRef => "#${strAnchor}"}); # Add the section intro if it exists if (defined($oSection->textGet(false))) { $oSectionElement-> addNew(HTML_DIV, "section-intro", {strContent => $self->processText($oSection->textGet())}); } # Add the section body my $oSectionBodyElement = $oSectionElement->addNew(HTML_DIV, "section-body"); # Process each child my $iSectionNo = 1; foreach my $oChild ($oSection->nodeList()) { &log(DEBUG, (' ' x ($iDepth + 2)) . 'process child ' . $oChild->nameGet()); # Execute a command if ($oChild->nameGet() eq 'execute-list') { my $bShow = $oChild->paramTest('show', 'n') ? false : true; my $oExecuteBodyElement; my $bFirst = true; my $strHostName = $self->{oManifest}->variableReplace($oChild->paramGet('host')); if ($bShow) { my $oSectionBodyExecute = $oSectionBodyElement->addNew(HTML_DIV, "execute"); $oSectionBodyExecute-> addNew(HTML_DIV, "execute-title", {strContent => "${strHostName} " . $self->processText($oChild->nodeGet('title')->textGet())}); $oExecuteBodyElement = $oSectionBodyExecute->addNew(HTML_DIV, "execute-body"); } foreach my $oExecute ($oChild->nodeList('execute')) { my $bExeShow = !$oExecute->paramTest('show', 'n'); my $bExeExpectedError = defined($oExecute->paramGet('err-expect', false)); my ($strCommand, $strOutput) = $self->execute( $oSection, $strHostName, $oExecute, {iIndent => $iDepth + 3, bShow => $bShow && $bExeShow}); if ($bShow && $bExeShow) { # Add continuation chars and proper spacing $strCommand =~ s/\n/\n /smg; $oExecuteBodyElement-> addNew(HTML_PRE, "execute-body-cmd", {strContent => $strCommand, bPre => true}); my $strHighLight = $self->{oManifest}->variableReplace($oExecute->fieldGet('exe-highlight', false)); my $bHighLightFound = false; if (defined($strOutput)) { my $bHighLightOld; my $strHighLightOutput; if ($oExecute->fieldTest('exe-highlight-type', 'error')) { $bExeExpectedError = true; } foreach my $strLine (split("\n", $strOutput)) { my $bHighLight = defined($strHighLight) && $strLine =~ /$strHighLight/; if (defined($bHighLightOld) && $bHighLight != $bHighLightOld) { $oExecuteBodyElement-> addNew(HTML_PRE, 'execute-body-output' . ($bHighLightOld ? '-highlight' . ($bExeExpectedError ? '-error' : '') : ''), {strContent => $strHighLightOutput, bPre => true}); undef($strHighLightOutput); } $strHighLightOutput .= (defined($strHighLightOutput) ? "\n" : '') . $strLine; $bHighLightOld = $bHighLight; $bHighLightFound = $bHighLightFound ? true : $bHighLight ? true : false; } if (defined($bHighLightOld)) { $oExecuteBodyElement-> addNew(HTML_PRE, 'execute-body-output' . ($bHighLightOld ? '-highlight' . ($bExeExpectedError ? '-error' : '') : ''), {strContent => $strHighLightOutput, bPre => true}); } $bFirst = true; } if ($self->{bExe} && $self->isRequired($oSection) && defined($strHighLight) && !$bHighLightFound) { confess &log(ERROR, "unable to find a match for highlight: ${strHighLight}"); } } $bFirst = false; } } # Add code block elsif ($oChild->nameGet() eq 'code-block') { my $strValue = $oChild->valueGet(); # Trim linefeeds from the beginning and all whitespace from the end $strValue =~ s/^\n+|\s+$//g; # Find the line with the fewest leading spaces my $iSpaceMin = undef; foreach my $strLine (split("\n", $strValue)) { $strLine =~ s/\s+$//; my $iSpaceMinTemp = length($strLine) - length(trim($strLine)); if (!defined($iSpaceMin) || $iSpaceMinTemp < $iSpaceMin) { $iSpaceMin = $iSpaceMinTemp; } } # Replace the leading spaces $strValue =~ s/^( ){$iSpaceMin}//smg; $oSectionBodyElement->addNew( HTML_PRE, 'code-block', {strContent => $strValue, bPre => true}); } # Add table elsif ($oChild->nameGet() eq 'table') { my $oTableTitle; if ($oChild->nodeTest('title')) { $oTableTitle = $oChild->nodeGet('title'); } my $oTableElement = $oSectionBodyElement->addNew(HTML_TABLE, 'table'); my @oyColumn; # If there is a title element then add it as the caption for the table if (defined($oTableTitle)) { # Print the label (e.g. Table 1:) in front of the title if one exists my $strTableTitle = $oTableTitle->paramTest('label') ? ($oTableTitle->paramGet('label') . ': '. $self->processText($oTableTitle->textGet())) : $self->processText($oTableTitle->textGet()); $oTableElement->addNew(HTML_TABLE_CAPTION, 'table-caption', {strContent => $strTableTitle}); } # Build the header if ($oChild->nodeTest('table-header')) { my $oHeader = $oChild->nodeGet('table-header'); @oyColumn = $oHeader->nodeList('table-column'); my $oHeaderRowElement = $oTableElement->addNew(HTML_TR, 'table-header-row'); foreach my $oColumn (@oyColumn) { # Each column can have different alignment properties - if not set, then default to align left my $strAlign = $oColumn->paramGet("align", false, 'left'); my $bFill = $oColumn->paramTest('fill', 'y'); $oHeaderRowElement->addNew( HTML_TH, "table-header-${strAlign}" . ($bFill ? " table-header-fill" : ""), {strContent => $self->processText($oColumn->textGet())}); } } # Build the rows foreach my $oRow ($oChild->nodeGet('table-data')->nodeList('table-row')) { my $oRowElement = $oTableElement->addNew(HTML_TR, 'table-row'); my @oRowCellList = $oRow->nodeList('table-cell'); for (my $iRowCellIdx = 0; $iRowCellIdx < @oRowCellList; $iRowCellIdx++) { my $oRowCell = $oRowCellList[$iRowCellIdx]; # If a header row was defined, then get the column alignment, else default to left my $strAlign = @oyColumn > 0 ? $oyColumn[$iRowCellIdx]->paramGet("align", false, 'left') : 'left'; $oRowElement->addNew( HTML_TD, "table-data-${strAlign}", {strContent => $self->processText($oRowCell->textGet())}); } } } # Add descriptive text elsif ($oChild->nameGet() eq 'p') { $oSectionBodyElement-> addNew(HTML_DIV, 'section-body-text', {strContent => $self->processText($oChild->textGet())}); } # Add option descriptive text elsif ($oChild->nameGet() eq 'option-description') { my $strOption = $oChild->paramGet("key"); my $oDescription = ${$self->{oReference}->{oConfigHash}}{&CONFIG_HELP_OPTION}{$strOption}{&CONFIG_HELP_DESCRIPTION}; if (!defined($oDescription)) { confess &log(ERROR, "unable to find ${strOption} option in sections - try adding option?"); } $oSectionBodyElement-> addNew(HTML_DIV, 'section-body-text', {strContent => $self->processText($oDescription)}); } # Add cmd descriptive text elsif ($oChild->nameGet() eq 'cmd-description') { my $strCommand = $oChild->paramGet("key"); my $oDescription = ${$self->{oReference}->{oConfigHash}}{&CONFIG_HELP_COMMAND}{$strCommand}{&CONFIG_HELP_DESCRIPTION}; if (!defined($oDescription)) { confess &log(ERROR, "unable to find ${strCommand} command in sections - try adding command?"); } $oSectionBodyElement-> addNew(HTML_DIV, 'section-body-text', {strContent => $self->processText($oDescription)}); } # Add/remove backrest config options elsif ($oChild->nameGet() eq 'backrest-config') { my $oConfigElement = $self->backrestConfigProcess($oSection, $oChild, $iDepth + 3); if (defined($oConfigElement)) { $oSectionBodyElement->add($oConfigElement); } } # Add/remove postgres config options elsif ($oChild->nameGet() eq 'postgres-config') { my $oConfigElement = $self->postgresConfigProcess($oSection, $oChild, $iDepth + 3); if (defined($oConfigElement)) { $oSectionBodyElement->add($oConfigElement); } } # Add a list elsif ($oChild->nameGet() eq 'list') { my $oList = $oSectionBodyElement->addNew(HTML_UL, 'list-unordered'); foreach my $oListItem ($oChild->nodeList()) { $oList->addNew(HTML_LI, 'list-unordered', {strContent => $self->processText($oListItem->textGet())}); } } # Add a subtitle elsif ($oChild->nameGet() eq 'subtitle') { $oSectionBodyElement-> addNew(HTML_DIV, "section${iDepth}-subtitle", {strContent => $self->processText($oChild->textGet())}); } # Add a subsubtitle elsif ($oChild->nameGet() eq 'subsubtitle') { $oSectionBodyElement-> addNew(HTML_DIV, "section${iDepth}-subsubtitle", {strContent => $self->processText($oChild->textGet())}); } # Add a subsection elsif ($oChild->nameGet() eq 'section') { my ($oChildSectionElement, $oChildSectionTocElement) = $self->sectionProcess($oChild, $strAnchor, "${strSectionNo}.${iSectionNo}", $iDepth + 1); $oSectionBodyElement->add($oChildSectionElement); if (defined($oChildSectionTocElement)) { $oSectionTocElement->add($oChildSectionTocElement); } $iSectionNo++; } # Add an admonition (e.g. NOTE, WARNING, etc) elsif ($oChild->nameGet() eq 'admonition') { my $oAdmonition = $oSectionBodyElement->addNew(HTML_DIV, 'admonition'); $oAdmonition->addNew(HTML_DIV, $oChild->paramGet('type'), {strContent => uc($oChild->paramGet('type')) . ": "}); $oAdmonition->addNew(HTML_DIV, $oChild->paramGet('type') . '-text', {strContent => $self->processText($oChild->textGet())}); } # Check if the child can be processed by a parent else { $self->sectionChildProcess($oSection, $oChild, $iDepth + 1); } } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oSectionElement', value => $oSectionElement, trace => true}, {name => 'oSectionTocElement', value => $oSection->paramTest('toc', 'n') ? undef : $oSectionTocElement, trace => true} ); } #################################################################################################################################### # backrestConfigProcess #################################################################################################################################### sub backrestConfigProcess { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $oConfig, $iDepth ) = logDebugParam ( __PACKAGE__ . '->backrestConfigProcess', \@_, {name => 'oSection'}, {name => 'oConfig'}, {name => 'iDepth'} ); # Generate the config my $oConfigElement; my ($strFile, $strConfig, $bShow) = $self->backrestConfig($oSection, $oConfig, $iDepth); if ($bShow) { my $strHostName = $self->{oManifest}->variableReplace($oConfig->paramGet('host')); # Render the config $oConfigElement = new pgBackRestDoc::Html::DocHtmlElement(HTML_DIV, "config"); $oConfigElement-> addNew(HTML_DIV, "config-title", {strContent => "${strHostName}:${strFile}" . " " . $self->processText($oConfig->nodeGet('title')->textGet())}); my $oConfigBodyElement = $oConfigElement->addNew(HTML_DIV, "config-body"); # # $oConfigBodyElement-> # addNew(HTML_DIV, "config-body-title", # {strContent => "${strFile}:"}); # Convert linefeeds to br tags $strConfig =~ s/\n/\n/g; $oConfigBodyElement-> addNew(HTML_DIV, "config-body-output", {strContent => $strConfig}); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oConfigElement', value => $oConfigElement, trace => true} ); } #################################################################################################################################### # postgresConfigProcess #################################################################################################################################### sub postgresConfigProcess { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $oConfig, $iDepth ) = logDebugParam ( __PACKAGE__ . '->postgresConfigProcess', \@_, {name => 'oSection'}, {name => 'oConfig'}, {name => 'iDepth'} ); # Generate the config my $oConfigElement; my ($strFile, $strConfig, $bShow) = $self->postgresConfig($oSection, $oConfig, $iDepth); if ($bShow) { # Render the config my $strHostName = $self->{oManifest}->variableReplace($oConfig->paramGet('host')); $oConfigElement = new pgBackRestDoc::Html::DocHtmlElement(HTML_DIV, "config"); $oConfigElement-> addNew(HTML_DIV, "config-title", {strContent => "${strHostName}:${strFile}" . " " . $self->processText($oConfig->nodeGet('title')->textGet())}); my $oConfigBodyElement = $oConfigElement->addNew(HTML_DIV, "config-body"); # $oConfigBodyElement-> # addNew(HTML_DIV, "config-body-title", # {strContent => "append to ${strFile}:"}); # Convert linefeeds to br tags $strConfig =~ s/\n/\n/g; $oConfigBodyElement-> addNew(HTML_DIV, "config-body-output", {strContent => defined($strConfig) ? $strConfig : ''}); $oConfig->fieldSet('actual-config', $strConfig); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'oConfigElement', value => $oConfigElement, trace => true} ); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Html/DocHtmlSite.pm000066400000000000000000000127771500617037600250010ustar00rootroot00000000000000#################################################################################################################################### # DOC HTML SITE MODULE #################################################################################################################################### package pgBackRestDoc::Html::DocHtmlSite; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use English '-no_match_vars'; use Data::Dumper; use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); use File::Copy; use POSIX qw(strftime); use Storable qw(dclone); use pgBackRestTest::Common::ExecuteTest; use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::Exception; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::Html::DocHtmlPage; use pgBackRestDoc::ProjectInfo; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; $self->{strClass} = $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{oManifest}, $self->{strXmlPath}, $self->{strHtmlPath}, $self->{strCssFile}, $self->{strFaviconFile}, $self->{strProjectLogoFile}, $self->{bExe} ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oManifest'}, {name => 'strXmlPath'}, {name => 'strHtmlPath'}, {name => 'strCssFile'}, {name => 'strFaviconFile', required => false}, {name => 'strProjectLogoFile', required => false}, {name => 'bExe'} ); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # process # # Generate the site html #################################################################################################################################### sub process { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->process'); # Get render options my $oRender = $self->{oManifest}->renderGet(RENDER_TYPE_HTML); my $bMenu = $$oRender{&RENDER_MENU}; my $bPretty = $$oRender{&RENDER_PRETTY}; my $bCompact = $$oRender{&RENDER_COMPACT}; if (!$bCompact) { # Copy the css file my $strCssFileDestination = "$self->{strHtmlPath}/default.css"; copy($self->{strCssFile}, $strCssFileDestination) or confess &log(ERROR, "unable to copy $self->{strCssFile} to ${strCssFileDestination}"); # Copy the favicon file if (defined($self->{strFaviconFile})) { my $strFaviconFileDestination = "$self->{strHtmlPath}/" . $self->{oManifest}->variableGet('project-favicon'); copy($self->{strFaviconFile}, $strFaviconFileDestination) or confess &log(ERROR, "unable to copy $self->{strFaviconFile} to ${strFaviconFileDestination}"); } # Copy the project logo file if (defined($self->{strProjectLogoFile})) { my $strProjectLogoFileDestination = "$self->{strHtmlPath}/" . $self->{oManifest}->variableGet('project-logo'); copy($self->{strProjectLogoFile}, $strProjectLogoFileDestination) or confess &log(ERROR, "unable to copy $self->{strProjectLogoFile} to ${strProjectLogoFileDestination}"); } } foreach my $strPageId ($self->{oManifest}->renderOutList(RENDER_TYPE_HTML)) { &log(INFO, " render out: ${strPageId}"); my $strHtml; my $oRenderOut = $self->{oManifest}->renderOutGet(RENDER_TYPE_HTML, $strPageId); eval { $strHtml = $self->{oManifest}->variableReplace( new pgBackRestDoc::Html::DocHtmlPage( $self->{oManifest}, $strPageId, $bMenu, $self->{bExe}, $bCompact, ${$self->{oManifest}->storage()->get($self->{strCssFile})}, $bPretty)->process()); return true; } or do { my $oException = $@; if (exceptionCode($oException) == ERROR_FILE_INVALID) { my $oRenderOut = $self->{oManifest}->renderOutGet(RENDER_TYPE_HTML, $strPageId); $self->{oManifest}->cacheReset($$oRenderOut{source}); $strHtml = $self->{oManifest}->variableReplace( new pgBackRestDoc::Html::DocHtmlPage( $self->{oManifest}, $strPageId, $bMenu, $self->{bExe}, $bCompact, ${$self->{oManifest}->storage()->get($self->{strCssFile})}, $bPretty)->process()); } else { confess $oException; } }; # Save the html page my $strFile = "$self->{strHtmlPath}/" . (defined($$oRenderOut{file}) ? $$oRenderOut{file} : "${strPageId}.html"); $self->{oManifest}->storage()->put($strFile, $strHtml); } # Return from function and log return values if any logDebugReturn($strOperation); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Latex/000077500000000000000000000000001500617037600224175ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Latex/DocLatex.pm000066400000000000000000000136271500617037600244710ustar00rootroot00000000000000#################################################################################################################################### # DOC LATEX MODULE #################################################################################################################################### package pgBackRestDoc::Latex::DocLatex; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use English '-no_match_vars'; use Cwd qw(abs_path); use Data::Dumper; use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname basename); use File::Copy; use POSIX qw(strftime); use Storable qw(dclone); use pgBackRestTest::Common::ExecuteTest; use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::Exception; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::Latex::DocLatexSection; use pgBackRestDoc::ProjectInfo; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; $self->{strClass} = $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{oManifest}, $self->{strXmlPath}, $self->{strLatexPath}, $self->{strPreambleFile}, $self->{bExe} ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oManifest'}, {name => 'strXmlPath'}, {name => 'strLatexPath'}, {name => 'strPreambleFile'}, {name => 'bExe'} ); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # process # # Generate the site html #################################################################################################################################### sub process { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->process'); my $oRender = $self->{oManifest}->renderGet(RENDER_TYPE_PDF); my $strLogo = $self->{oManifest}->variableGet('pdf-resource-logo'); if (!defined($strLogo)) { $strLogo = 'blank.eps'; } my ($strExt) = $strLogo =~ /(\.[^.]+)$/; my $strLogoPath = defined($self->{oManifest}->variableGet('pdf-resource-path')) ? $self->{oManifest}->variableGet('pdf-resource-path') : "$self->{oManifest}{strDocPath}/resource/latex/"; # Copy the logo copy($strLogoPath . $strLogo, "$self->{strLatexPath}/logo$strExt") or confess &log(ERROR, "unable to copy logo"); my $strLatex = $self->{oManifest}->variableReplace( ${$self->{oManifest}->storage()->get($self->{strPreambleFile})}, 'latex') . "\n"; # ??? Temp hack for underscores in filename $strLatex =~ s/pgaudit\\\_doc/pgaudit\_doc/g; # Process the sources in the order listed in the manifest.xml foreach my $strPageId (@{${$self->{oManifest}->renderGet(RENDER_TYPE_PDF)}{stryOrder}}) { &log(INFO, " render out: ${strPageId}"); eval { my $oDocLatexSection = new pgBackRestDoc::Latex::DocLatexSection($self->{oManifest}, $strPageId, $self->{bExe}); # Save the html page $strLatex .= $oDocLatexSection->process(); return true; } or do { my $oException = $EVAL_ERROR; if (exceptionCode($oException) == ERROR_FILE_INVALID) { my $oRenderOut = $self->{oManifest}->renderOutGet(RENDER_TYPE_HTML, $strPageId); $self->{oManifest}->cacheReset($$oRenderOut{source}); my $oDocLatexSection = new pgBackRestDoc::Latex::DocLatexSection($self->{oManifest}, $strPageId, $self->{bExe}); # Save the html page $strLatex .= $oDocLatexSection->process(); } else { confess $oException; } }; } $strLatex .= "\n% " . ('-' x 130) . "\n% End document\n% " . ('-' x 130) . "\n\\end{document}\n"; # Get base name of output file to use for processing (my $strLatexFileBase = basename($$oRender{file})) =~ s/\.[^.]+$//; $strLatexFileBase = $self->{oManifest}->variableReplace($strLatexFileBase); # Name of latex file to use for output and processing my $strLatexFileName = $self->{oManifest}->variableReplace("$self->{strLatexPath}/" . $strLatexFileBase . '.tex'); # Output latex and build PDF $self->{oManifest}->storage()->put($strLatexFileName, $strLatex); executeTest("pdflatex -output-directory=$self->{strLatexPath} -shell-escape $strLatexFileName", {bSuppressStdErr => true}); executeTest("pdflatex -output-directory=$self->{strLatexPath} -shell-escape $strLatexFileName", {bSuppressStdErr => true}); # Determine path of output file my $strLatexOutputName = $oRender->{file}; if ($strLatexOutputName !~ /^\//) { $strLatexOutputName = abs_path($self->{strLatexPath} . "/" . $oRender->{file}); } # Copy pdf file if it is not already in the correct place if ($strLatexOutputName ne "$self->{strLatexPath}/" . $strLatexFileBase . '.pdf') { copy("$self->{strLatexPath}/" . $strLatexFileBase . '.pdf', $strLatexOutputName) or confess &log(ERROR, "unable to copy pdf to " . $strLatexOutputName); } # Return from function and log return values if any logDebugReturn($strOperation); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Latex/DocLatexSection.pm000066400000000000000000000375701500617037600260210ustar00rootroot00000000000000#################################################################################################################################### # DOC LATEX SECTION MODULE #################################################################################################################################### package pgBackRestDoc::Latex::DocLatexSection; use parent 'pgBackRestDoc::Common::DocExecute'; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Exporter qw(import); our @EXPORT = qw(); use pgBackRestDoc::Common::DocConfig; use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Assign function parameters, defaults, and log debug info my ( $strOperation, $oManifest, $strRenderOutKey, $bExe ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oManifest'}, {name => 'strRenderOutKey'}, {name => 'bExe'} ); # Create the class hash my $self = $class->SUPER::new('latex', $oManifest, $strRenderOutKey, $bExe); bless $self, $class; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # process # # Generate the site html #################################################################################################################################### sub process { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->process'); # Working variables my $oPage = $self->{oDoc}; my $strLatex; # Initialize page my $strTitle = $oPage->paramGet('title'); my $strSubTitle = $oPage->paramGet('subtitle', false); # Render sections foreach my $oSection ($oPage->nodeList('section')) { $strLatex .= (defined($strLatex) ? "\n" : '') . $self->sectionProcess($oSection, undef, 1); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strHtml', value => $strLatex, trace => true} ); } #################################################################################################################################### # sectionProcess #################################################################################################################################### sub sectionProcess { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $strSection, $iDepth ) = logDebugParam ( __PACKAGE__ . '->sectionRender', \@_, {name => 'oSection'}, {name => 'strSection', required => false}, {name => 'iDepth'} ); if ($oSection->paramGet('log')) { &log(INFO, (' ' x ($iDepth + 1)) . 'process section: ' . $oSection->paramGet('path')); } # Create section type my $strSectionTitle = $self->processText($oSection->nodeGet('title')->textGet()); $strSection .= (defined($strSection) ? ', ' : '') . "'${strSectionTitle}' " . ('Sub' x ($iDepth - 1)) . "Section"; # Create section comment my $strLatex = "% ${strSection}\n% " . ('-' x 130) . "\n"; # Exclude from table of contents if requested if ($iDepth <= 3 && $oSection->paramTest('toc', 'n')) { $strLatex .= '\\addtocontents{toc}{\\protect\\setcounter{tocdepth}{' . ($iDepth - 1) . "}}\n"; } # Create section name $strLatex .= '\\'; if ($iDepth <= 3) { $strLatex .= ($iDepth > 1 ? ('sub' x ($iDepth - 1)) : '') . "section"; } elsif ($iDepth == 4) { $strLatex .= 'paragraph'; } else { confess &log(ASSERT, "section depth of ${iDepth} exceeds maximum"); } $strLatex .= "\{${strSectionTitle}\}\\label{" . $oSection->paramGet('path', false) . "}\n"; # Reset table of contents numbering if the section was excluded if ($iDepth <= 3 && $oSection->paramTest('toc', 'n')) { $strLatex .= '\\addtocontents{toc}{\\protect\\setcounter{tocdepth}{' . $iDepth . "}}\n"; } foreach my $oChild ($oSection->nodeList()) { &log(DEBUG, (' ' x ($iDepth + 2)) . 'process child ' . $oChild->nameGet()); # Execute a command if ($oChild->nameGet() eq 'execute-list') { my $bShow = $oChild->paramTest('show', 'n') ? false : true; my $strHostName = $self->{oManifest}->variableReplace($oChild->paramGet('host')); if ($bShow) { $strLatex .= "\n\\begin\{lstlisting\}[title=\{\\textnormal{\\textbf\{${strHostName}}} --- " . $self->processText($oChild->nodeGet('title')->textGet()) . "}]\n"; } foreach my $oExecute ($oChild->nodeList('execute')) { my $bExeShow = !$oExecute->paramTest('show', 'n'); my ($strCommand, $strOutput) = $self->execute( $oSection, $self->{oManifest}->variableReplace($oChild->paramGet('host')), $oExecute, {iIndent => $iDepth + 3, bShow => $bShow && $bExeShow}); if ($bShow && $bExeShow) { $strLatex .= "${strCommand}\n"; if (defined($strOutput)) { $strLatex .= "\nOutput:\n\n${strOutput}\n"; } } } if ($bShow) { $strLatex .= "\\end{lstlisting}\n"; } } # Add code block elsif ($oChild->nameGet() eq 'code-block') { my $strTitle = $self->{oManifest}->variableReplace($oChild->paramGet("title", false), 'latex'); if (defined($strTitle) && $strTitle eq '') { undef($strTitle) } # Begin the code listing if (!defined($strTitle)) { $strLatex .= "\\vspace{.75em}\n"; } $strLatex .= "\\begin\{lstlisting\}"; # Add the title if one is provided if (defined($strTitle)) { $strLatex .= "[title=\{${strTitle}:\}]"; } # End the code listing $strLatex .= "\n" . trim($oChild->valueGet()) . "\n" . "\\end{lstlisting}\n"; } # Add table elsif ($oChild->nameGet() eq 'table') { my $oHeader; my @oyColumn; if ($oChild->nodeTest('table-header')) { $oHeader = $oChild->nodeGet('table-header'); @oyColumn = $oHeader->nodeList('table-column'); } my $strWidth = '{' . (defined($oHeader) && $oHeader->paramTest('width') ? ($oHeader->paramGet('width') / 100) . '\textwidth' : '\textwidth') . '}'; # Build the table $strLatex .= "\\vspace{1em}\\newline\n\\begin{table}\n\\begin{tabularx}${strWidth}{|"; # Build the table header foreach my $oColumn (@oyColumn) { my $strAlignCode; my $strAlign = $oColumn->paramGet("align", false); # If fill is specified then use X or the custom designed alignments in the preamble to fill and justify the columns. if ($oColumn->paramTest('fill') && $oColumn->paramGet('fill', false) eq 'y') { if (!defined($strAlign) || $strAlign eq 'left') { $strAlignCode = 'X'; } elsif ($strAlign eq 'right') { $strAlignCode = 'R'; } elsif ($strAlign eq 'center') { $strAlignCode = 'C'; } else { confess &log(ERROR, "align '${strAlign}' not valid when fill=y"); } } else { if (!defined($strAlign) || $strAlign eq 'left') { $strAlignCode = 'l'; } elsif ($strAlign eq 'center') { $strAlignCode = 'c'; } elsif ($strAlign eq 'right') { $strAlignCode = 'r'; } else { confess &log(ERROR, "align '${strAlign}' not valid"); } } # $strLatex .= 'p{' . $oColumn->paramGet("width") . '} | '; $strLatex .= $strAlignCode . ' | '; } # If table-header not provided then default the column alignment and fill by using the number of columns in the 1st row if (!defined($oHeader)) { my @oyRow = $oChild->nodeGet('table-data')->nodeList('table-row'); foreach my $oRowCell ($oyRow[0]->nodeList('table-cell')) { $strLatex .= 'X|'; } } $strLatex .= "}\n"; my $strLine; if (defined($oHeader)) { $strLatex .= "\\hline"; $strLatex .= "\\rowcolor{ltgray}\n"; foreach my $oColumn (@oyColumn) { $strLine .= (defined($strLine) ? ' & ' : '') . '\textbf{' . $self->processText($oColumn->textGet()) . '}'; } $strLatex .= "${strLine}\\\\"; } # Build the rows foreach my $oRow ($oChild->nodeGet('table-data')->nodeList('table-row')) { $strLatex .= "\\hline\n"; undef($strLine); foreach my $oRowCell ($oRow->nodeList('table-cell')) { $strLine .= (defined($strLine) ? ' & ' : '') . $self->processText($oRowCell->textGet()); } $strLatex .= "${strLine}\\\\"; } $strLatex .= "\\hline\n\\end{tabularx}\n"; # If there is a title for the table, add it. Ignore the label since LaTex will automatically generate numbered labels. # e.g. Table 1: if ($oChild->nodeGet("title", false)) { $strLatex .= "\\caption{" . $self->processText($oChild->nodeGet("title")->textGet()) . "}\n"; } $strLatex .= "\\end{table}\n"; } # Add descriptive text elsif ($oChild->nameGet() eq 'p') { $strLatex .= "\n\\begin{sloppypar}" . $self->processText($oChild->textGet()) . "\\end{sloppypar}\n"; } # Add option descriptive text elsif ($oChild->nameGet() eq 'option-description') { my $strOption = $oChild->paramGet("key"); my $oDescription = ${$self->{oReference}->{oConfigHash}}{&CONFIG_HELP_OPTION}{$strOption}{&CONFIG_HELP_DESCRIPTION}; if (!defined($oDescription)) { confess &log(ERROR, "unable to find ${strOption} option in sections - try adding option?"); } $strLatex .= "\n\\begin{sloppypar}" . $self->processText($oDescription) . "\\end{sloppypar}\n"; } # Add cmd descriptive text elsif ($oChild->nameGet() eq 'cmd-description') { my $strCommand = $oChild->paramGet("key"); my $oDescription = ${$self->{oReference}->{oConfigHash}}{&CONFIG_HELP_COMMAND}{$strCommand}{&CONFIG_HELP_DESCRIPTION}; if (!defined($oDescription)) { confess &log(ERROR, "unable to find ${strCommand} command in sections - try adding command?"); } $strLatex .= "\n\\begin{sloppypar}" . $self->processText($oDescription) . "\\end{sloppypar}\n"; } # Add a list elsif ($oChild->nameGet() eq 'list') { $strLatex .= "\n\\begin{itemize}"; foreach my $oListItem ($oChild->nodeList()) { $strLatex .= "\n \\item " . $self->processText($oListItem->textGet()); } $strLatex .= "\n\\end{itemize}"; } # Add/remove config options elsif ($oChild->nameGet() eq 'backrest-config' || $oChild->nameGet() eq 'postgres-config') { $strLatex .= $self->configProcess($oSection, $oChild, $iDepth + 3); } # Add a subsection elsif ($oChild->nameGet() eq 'section') { $strLatex .= "\n" . $self->sectionProcess($oChild, $strSection, $iDepth + 1); } # Add an admonition (e.g. NOTE, WARNING, etc) elsif ($oChild->nameGet() eq 'admonition') { $strLatex .= "\n\\vspace{.5em}\\begin{leftbar}"; $strLatex .= "\n\\begin{sloppypar}\\textit{\\textbf{" . uc($oChild->paramGet('type')) . ": }"; $strLatex .= $self->processText($oChild->textGet()) . "}\\end{sloppypar}"; $strLatex .= "\n\\end{leftbar}\n"; } # Check if the child can be processed by a parent else { $self->sectionChildProcess($oSection, $oChild, $iDepth + 1); } } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strSection', value => $strLatex, trace => true} ); } #################################################################################################################################### # configProcess #################################################################################################################################### sub configProcess { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $oConfig, $iDepth ) = logDebugParam ( __PACKAGE__ . '->configProcess', \@_, {name => 'oSection'}, {name => 'oConfig'}, {name => 'iDepth'} ); # Working variables my $strLatex = ''; my $strFile; my $strConfig; my $bShow = true; # Generate the config if ($oConfig->nameGet() eq 'backrest-config') { ($strFile, $strConfig, $bShow) = $self->backrestConfig($oSection, $oConfig, $iDepth); } else { ($strFile, $strConfig, $bShow) = $self->postgresConfig($oSection, $oConfig, $iDepth); } if ($bShow) { my $strHostName = $self->{oManifest}->variableReplace($oConfig->paramGet('host')); # Replace _ in filename $strFile = $self->variableReplace($strFile); # Render the config $strLatex = "\n\\begin\{lstlisting\}[title=\{\\textnormal{\\textbf\{${strHostName}}}:\\textnormal{\\texttt\{${strFile}}} --- " . $self->processText($oConfig->nodeGet('title')->textGet()) . "}]\n" . (defined($strConfig) ? $strConfig : '') . "\\end{lstlisting}\n"; } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strConfig', value => $strLatex, trace => true} ); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Markdown/000077500000000000000000000000001500617037600231245ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Markdown/DocMarkdown.pm000066400000000000000000000057521500617037600257030ustar00rootroot00000000000000#################################################################################################################################### # DOC MARKDOWN MODULE #################################################################################################################################### package pgBackRestDoc::Markdown::DocMarkdown; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Data::Dumper; use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); use File::Copy; use POSIX qw(strftime); use Storable qw(dclone); use pgBackRestTest::Common::ExecuteTest; use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::Markdown::DocMarkdownRender; use pgBackRestDoc::ProjectInfo; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Create the class hash my $self = {}; bless $self, $class; $self->{strClass} = $class; # Assign function parameters, defaults, and log debug info ( my $strOperation, $self->{oManifest}, $self->{strXmlPath}, $self->{strMarkdownPath}, $self->{bExe} ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oManifest'}, {name => 'strXmlPath'}, {name => 'strMarkdownPath'}, {name => 'bExe'} ); # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # process # # Generate the site html #################################################################################################################################### sub process { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->process'); foreach my $strRenderOutId ($self->{oManifest}->renderOutList(RENDER_TYPE_MARKDOWN)) { my $oRenderOut = $self->{oManifest}->renderOutGet(RENDER_TYPE_MARKDOWN, $strRenderOutId); my $strFile = "$self->{strMarkdownPath}/" . (defined($$oRenderOut{file}) ? $$oRenderOut{file} : "${strRenderOutId}.md"); &log(INFO, " render out: ${strRenderOutId}"); # Save the html page $self->{oManifest}->storage()->put( $strFile, $self->{oManifest}->variableReplace((new pgBackRestDoc::Markdown::DocMarkdownRender($self->{oManifest}, $strRenderOutId, $self->{bExe}))->process())); } # Return from function and log return values if any logDebugReturn($strOperation); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/Markdown/DocMarkdownRender.pm000066400000000000000000000472551500617037600270470ustar00rootroot00000000000000#################################################################################################################################### # DOC MARKDOWN RENDER MODULE #################################################################################################################################### package pgBackRestDoc::Markdown::DocMarkdownRender; use parent 'pgBackRestDoc::Common::DocExecute'; use strict; use warnings FATAL => qw(all); use Carp qw(confess); use Data::Dumper; use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); use File::Copy; use Storable qw(dclone); use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; #################################################################################################################################### # CONSTRUCTOR #################################################################################################################################### sub new { my $class = shift; # Class name # Assign function parameters, defaults, and log debug info my ( $strOperation, $oManifest, $strRenderOutKey, $bExe ) = logDebugParam ( __PACKAGE__ . '->new', \@_, {name => 'oManifest'}, {name => 'strRenderOutKey'}, {name => 'bExe'} ); # Create the class hash my $self = $class->SUPER::new(RENDER_TYPE_MARKDOWN, $oManifest, $strRenderOutKey, $bExe); bless $self, $class; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'self', value => $self} ); } #################################################################################################################################### # process # # Generate the site html #################################################################################################################################### sub process { my $self = shift; # Assign function parameters, defaults, and log debug info my $strOperation = logDebugParam(__PACKAGE__ . '->process'); # Working variables my $oPage = $self->{oDoc}; # Initialize page my $strMarkdown = "# " . $oPage->paramGet('title'); if (defined($oPage->paramGet('subtitle', false))) { $strMarkdown .= '
    ' . $oPage->paramGet('subtitle') . ''; } # my $oHtmlBuilder = new pgBackRestDoc::Html::DocHtmlBuilder("{[project]} - Reliable PostgreSQL Backup", # $strTitle . (defined($strSubTitle) ? " - ${strSubTitle}" : ''), # $self->{bPretty}); # # # Generate header # my $oPageHeader = $oHtmlBuilder->bodyGet()->addNew(HTML_DIV, 'page-header'); # # $oPageHeader-> # addNew(HTML_DIV, 'page-header-title', # {strContent => $strTitle}); # # if (defined($strSubTitle)) # { # $oPageHeader-> # addNew(HTML_DIV, 'page-header-subtitle', # {strContent => $strSubTitle}); # } # # # Generate menu # my $oMenuBody = $oHtmlBuilder->bodyGet()->addNew(HTML_DIV, 'page-menu')->addNew(HTML_DIV, 'menu-body'); # # if ($self->{strRenderOutKey} ne 'index') # { # my $oRenderOut = $self->{oManifest}->renderOutGet(RENDER_TYPE_HTML, 'index'); # # $oMenuBody-> # addNew(HTML_DIV, 'menu')-> # addNew(HTML_A, 'menu-link', {strContent => $$oRenderOut{menu}, strRef => '{[project-url-root]}'}); # } # # foreach my $strRenderOutKey ($self->{oManifest}->renderOutList(RENDER_TYPE_HTML)) # { # if ($strRenderOutKey ne $self->{strRenderOutKey} && $strRenderOutKey ne 'index') # { # my $oRenderOut = $self->{oManifest}->renderOutGet(RENDER_TYPE_HTML, $strRenderOutKey); # # $oMenuBody-> # addNew(HTML_DIV, 'menu')-> # addNew(HTML_A, 'menu-link', {strContent => $$oRenderOut{menu}, strRef => "${strRenderOutKey}.html"}); # } # } # # # Generate table of contents # my $oPageTocBody; # # if (!defined($oPage->paramGet('toc', false)) || $oPage->paramGet('toc') eq 'y') # { # my $oPageToc = $oHtmlBuilder->bodyGet()->addNew(HTML_DIV, 'page-toc'); # # $oPageToc-> # addNew(HTML_DIV, 'page-toc-title', # {strContent => "Table of Contents"}); # # $oPageTocBody = $oPageToc-> # addNew(HTML_DIV, 'page-toc-body'); # } # # # Generate body # my $oPageBody = $oHtmlBuilder->bodyGet()->addNew(HTML_DIV, 'page-body'); # Render sections foreach my $oSection ($oPage->nodeList('section')) { $strMarkdown = trim($strMarkdown) . "\n\n" . $self->sectionProcess($oSection, 1); } $strMarkdown .= "\n"; # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strMarkdown', value => $strMarkdown, trace => true} ); } #################################################################################################################################### # sectionProcess #################################################################################################################################### sub sectionProcess { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $iDepth ) = logDebugParam ( __PACKAGE__ . '->sectionProcess', \@_, {name => 'oSection'}, {name => 'iDepth'} ); if ($oSection->paramGet('log')) { &log(INFO, (' ' x ($iDepth + 1)) . 'process section: ' . $oSection->paramGet('path')); } if ($iDepth > 3) { confess &log(ASSERT, "section depth of ${iDepth} exceeds maximum"); } my $strMarkdown = '#' . ('#' x $iDepth) . ' ' . $self->processText($oSection->nodeGet('title')->textGet()); my $strLastChild = undef; foreach my $oChild ($oSection->nodeList()) { &log(DEBUG, (' ' x ($iDepth + 2)) . 'process child ' . $oChild->nameGet()); # Execute a command if ($oChild->nameGet() eq 'execute-list') { my $bShow = $oChild->paramTest('show', 'n') ? false : true; my $bFirst = true; my $strHostName = $self->{oManifest}->variableReplace($oChild->paramGet('host')); my $bOutput = false; if ($bShow) { $strMarkdown .= "\n\n${strHostName} => " . $self->processText($oChild->nodeGet('title')->textGet()) . "\n```\n"; } foreach my $oExecute ($oChild->nodeList('execute')) { my $bExeShow = !$oExecute->paramTest('show', 'n'); my $bExeExpectedError = defined($oExecute->paramGet('err-expect', false)); if ($bOutput) { confess &log(ERROR, "only the last command can have output"); } my ($strCommand, $strOutput) = $self->execute( $oSection, $strHostName, $oExecute, {iIndent => $iDepth + 3, bShow => $bShow && $bExeShow}); if ($bShow && $bExeShow) { # Add continuation chars and proper spacing $strCommand =~ s/\n/\n /smg; $strMarkdown .= "${strCommand}\n"; my $strHighLight = $self->{oManifest}->variableReplace($oExecute->fieldGet('exe-highlight', false)); my $bHighLightFound = false; if (defined($strOutput)) { $strMarkdown .= "\n--- output ---\n\n"; if ($oExecute->fieldTest('exe-highlight-type', 'error')) { $bExeExpectedError = true; } foreach my $strLine (split("\n", $strOutput)) { my $bHighLight = defined($strHighLight) && $strLine =~ /$strHighLight/; if ($bHighLight) { $strMarkdown .= $bExeExpectedError ? "ERR" : "-->"; } else { $strMarkdown .= " "; } $strMarkdown .= " ${strLine}\n"; $bHighLightFound = $bHighLightFound ? true : $bHighLight ? true : false; } $bFirst = true; } if ($self->{bExe} && $self->isRequired($oSection) && defined($strHighLight) && !$bHighLightFound) { confess &log(ERROR, "unable to find a match for highlight: ${strHighLight}"); } } $bFirst = false; } $strMarkdown .= "```"; } # Add code block elsif ($oChild->nameGet() eq 'code-block') { if ($oChild->paramTest('title')) { if (defined($strLastChild) && $strLastChild ne 'code-block') { $strMarkdown .= "\n"; } $strMarkdown .= "\n_" . $oChild->paramGet('title') . "_:"; } $strMarkdown .= "\n```"; if ($oChild->paramTest('type')) { $strMarkdown .= $oChild->paramGet('type'); } $strMarkdown .= "\n" . trim($oChild->valueGet()) . "\n```"; } # Add descriptive text elsif ($oChild->nameGet() eq 'p') { if (defined($strLastChild) && $strLastChild ne 'code-block' && $strLastChild ne 'table') { $strMarkdown .= "\n"; } $strMarkdown .= "\n" . $self->processText($oChild->textGet()); } # Add option descriptive text elsif ($oChild->nameGet() eq 'option-description') { # my $strOption = $oChild->paramGet("key"); # my $oDescription = ${$self->{oReference}->{oConfigHash}}{&CONFIG_HELP_OPTION}{$strOption}{&CONFIG_HELP_DESCRIPTION}; # # if (!defined($oDescription)) # { # confess &log(ERROR, "unable to find ${strOption} option in sections - try adding command?"); # } # # $oSectionBodyElement-> # addNew(HTML_DIV, 'section-body-text', # {strContent => $self->processText($oDescription)}); } # Add/remove backrest config options elsif ($oChild->nameGet() eq 'backrest-config') { # my $oConfigElement = $self->backrestConfigProcess($oSection, $oChild, $iDepth + 3); # # if (defined($oConfigElement)) # { # $oSectionBodyElement->add($oConfigElement); # } } # Add/remove postgres config options elsif ($oChild->nameGet() eq 'postgres-config') { # my $oConfigElement = $self->postgresConfigProcess($oSection, $oChild, $iDepth + 3); # # if (defined($oConfigElement)) # { # $oSectionBodyElement->add($oConfigElement); # } } # Add a list elsif ($oChild->nameGet() eq 'list') { foreach my $oListItem ($oChild->nodeList()) { $strMarkdown .= "\n\n- " . $self->processText($oListItem->textGet()); } } # Add a subsection elsif ($oChild->nameGet() eq 'section') { $strMarkdown = trim($strMarkdown) . "\n\n" . $self->sectionProcess($oChild, $iDepth + 1); } elsif ($oChild->nameGet() eq 'table') { my $oTableTitle; if ($oChild->nodeTest('title')) { $oTableTitle = $oChild->nodeGet('title'); } my $oHeader; my @oyColumn; if ($oChild->nodeTest('table-header')) { $oHeader = $oChild->nodeGet('table-header'); @oyColumn = $oHeader->nodeList('table-column'); } if (defined($oTableTitle)) { # Print the label (e.g. Table 1:) in front of the title if one exists $strMarkdown .= "\n\n**" . ($oTableTitle->paramTest('label') ? ($oTableTitle->paramGet('label') . ': ' . $self->processText($oTableTitle->textGet())) : $self->processText($oTableTitle->textGet())) . "**\n\n"; } else { $strMarkdown .= "\n\n"; } my $strHeaderText = "| "; my $strHeaderIndicator = "| "; for (my $iColCellIdx = 0; $iColCellIdx < @oyColumn; $iColCellIdx++) { my $strAlign = $oyColumn[$iColCellIdx]->paramGet("align", false, 'left'); $strHeaderText .= $self->processText($oyColumn[$iColCellIdx]->textGet()) . (($iColCellIdx < @oyColumn - 1) ? " | " : " |\n"); $strHeaderIndicator .= ($strAlign eq 'left' || $strAlign eq 'center') ? ":---" : "---"; $strHeaderIndicator .= ($strAlign eq 'right' || $strAlign eq 'center') ? "---:" : ""; $strHeaderIndicator .= ($iColCellIdx < @oyColumn - 1) ? " | " : " |\n"; } # Markdown requires a table header so if not provided then create an empty header row and default the column alignment # left by using the number of columns in the 1st row if (!defined($oHeader)) { my @oyRow = $oChild->nodeGet('table-data')->nodeList('table-row'); foreach my $oRowCell ($oyRow[0]->nodeList('table-cell')) { $strHeaderText .= " | "; $strHeaderIndicator .= ":--- | "; } $strHeaderText .= "\n"; $strHeaderIndicator .= "\n"; } $strMarkdown .= (defined($strHeaderText) ? $strHeaderText : '') . $strHeaderIndicator; # Build the rows foreach my $oRow ($oChild->nodeGet('table-data')->nodeList('table-row')) { my @oRowCellList = $oRow->nodeList('table-cell'); $strMarkdown .= "| "; for (my $iRowCellIdx = 0; $iRowCellIdx < @oRowCellList; $iRowCellIdx++) { my $oRowCell = $oRowCellList[$iRowCellIdx]; $strMarkdown .= $self->processText($oRowCell->textGet()) . (($iRowCellIdx < @oRowCellList -1) ? " | " : " |\n"); } } } # Add an admonition (e.g. NOTE, WARNING, etc) elsif ($oChild->nameGet() eq 'admonition') { $strMarkdown .= "\n> **" . uc($oChild->paramGet('type')) . ":** " . $self->processText($oChild->textGet()); } # Check if the child can be processed by a parent else { $self->sectionChildProcess($oSection, $oChild, $iDepth + 1); } $strLastChild = $oChild->nameGet(); } # Return from function and log return values if any return logDebugReturn ( $strOperation, {name => 'strMarkdown', value => $strMarkdown, trace => true} ); } #################################################################################################################################### # backrestConfigProcess #################################################################################################################################### sub backrestConfigProcess { my $self = shift; # Assign function parameters, defaults, and log debug info my ( $strOperation, $oSection, $oConfig, $iDepth ) = logDebugParam ( __PACKAGE__ . '->backrestConfigProcess', \@_, {name => 'oSection'}, {name => 'oConfig'}, {name => 'iDepth'} ); # # Generate the config # my $oConfigElement; # my ($strFile, $strConfig, $bShow) = $self->backrestConfig($oSection, $oConfig, $iDepth); # # if ($bShow) # { # my $strHostName = $self->{oManifest}->variableReplace($oConfig->paramGet('host')); # # # Render the config # $oConfigElement = new pgBackRestDoc::Html::DocHtmlElement(HTML_DIV, "config"); # # $oConfigElement-> # addNew(HTML_DIV, "config-title", # {strContent => "${strHostName}:${strFile}" . # " " . $self->processText($oConfig->nodeGet('title')->textGet())}); # # my $oConfigBodyElement = $oConfigElement->addNew(HTML_DIV, "config-body"); # # # # $oConfigBodyElement-> # # addNew(HTML_DIV, "config-body-title", # # {strContent => "${strFile}:"}); # # $oConfigBodyElement-> # addNew(HTML_DIV, "config-body-output", # {strContent => $strConfig}); # } # # # Return from function and log return values if any # return logDebugReturn # ( # $strOperation, # {name => 'oConfigElement', value => $oConfigElement, trace => true} # ); } #################################################################################################################################### # postgresConfigProcess #################################################################################################################################### sub postgresConfigProcess { my $self = shift; # # Assign function parameters, defaults, and log debug info # my # ( # $strOperation, # $oSection, # $oConfig, # $iDepth # ) = # logDebugParam # ( # __PACKAGE__ . '->postgresConfigProcess', \@_, # {name => 'oSection'}, # {name => 'oConfig'}, # {name => 'iDepth'} # ); # # # Generate the config # my $oConfigElement; # my ($strFile, $strConfig, $bShow) = $self->postgresConfig($oSection, $oConfig, $iDepth); # # if ($bShow) # { # # Render the config # my $strHostName = $self->{oManifest}->variableReplace($oConfig->paramGet('host')); # $oConfigElement = new pgBackRestDoc::Html::DocHtmlElement(HTML_DIV, "config"); # # $oConfigElement-> # addNew(HTML_DIV, "config-title", # {strContent => "${strHostName}:${strFile}" . # " " . $self->processText($oConfig->nodeGet('title')->textGet())}); # # my $oConfigBodyElement = $oConfigElement->addNew(HTML_DIV, "config-body"); # # # $oConfigBodyElement-> # # addNew(HTML_DIV, "config-body-title", # # {strContent => "append to ${strFile}:"}); # # $oConfigBodyElement-> # addNew(HTML_DIV, "config-body-output", # {strContent => defined($strConfig) ? $strConfig : ''}); # # $oConfig->fieldSet('actual-config', $strConfig); # } # # # Return from function and log return values if any # return logDebugReturn # ( # $strOperation, # {name => 'oConfigElement', value => $oConfigElement, trace => true} # ); } 1; pgbackrest-release-2.55.1/doc/lib/pgBackRestDoc/ProjectInfo.pm000066400000000000000000000071601500617037600241260ustar00rootroot00000000000000#################################################################################################################################### # PROJECT INFO MODULE # # Contains project name, version and format. #################################################################################################################################### package pgBackRestDoc::ProjectInfo; use strict; use warnings FATAL => qw(all); use Cwd qw(abs_path); use Exporter qw(import); our @EXPORT = qw(); use File::Basename qw(dirname); # Project Name # # Defines the official project name, exe, and config file. #----------------------------------------------------------------------------------------------------------------------------------- push @EXPORT, qw(PROJECT_NAME); push @EXPORT, qw(PROJECT_EXE); push @EXPORT, qw(PROJECT_CONF); # Project Version Number # # Defines the current version of the BackRest executable. The version number is used to track features but does not affect what # repositories or manifests can be read - that's the job of the format number. #----------------------------------------------------------------------------------------------------------------------------------- push @EXPORT, qw(PROJECT_VERSION_MAJOR); push @EXPORT, qw(PROJECT_VERSION_MINOR); push @EXPORT, qw(PROJECT_VERSION_PATCH); push @EXPORT, qw(PROJECT_VERSION_SUFFIX); push @EXPORT, qw(PROJECT_VERSION); # Repository Format Number # # Defines format for info and manifest files as well as on-disk structure. If this number changes then the repository will be # invalid unless migration functions are written. #----------------------------------------------------------------------------------------------------------------------------------- push @EXPORT, qw(REPOSITORY_FORMAT); #################################################################################################################################### # Load project info from src/version.h #################################################################################################################################### require pgBackRestTest::Common::Storage; require pgBackRestTest::Common::StoragePosix; my $strProjectInfo = ${new pgBackRestTest::Common::Storage( dirname(dirname(abs_path($0))), new pgBackRestTest::Common::StoragePosix())->get('src/version.h')}; foreach my $strLine (split("\n", $strProjectInfo)) { if ($strLine =~ /^#define PROJECT_NAME/) { eval("use constant PROJECT_NAME => " . (split(" ", $strLine))[-1]); } elsif ($strLine =~ /^#define PROJECT_BIN/) { eval("use constant PROJECT_EXE => " . (split(" ", $strLine))[-1]); eval("use constant PROJECT_CONF => " . (split(" ", $strLine))[-1] . " . \'.conf\'"); } elsif ($strLine =~ /^#define PROJECT_VERSION_MAJOR/) { eval("use constant PROJECT_VERSION_MAJOR => \"" . (split(" ", $strLine))[-1] . "\""); } elsif ($strLine =~ /^#define PROJECT_VERSION_MINOR/) { eval("use constant PROJECT_VERSION_MINOR => " . (split(" ", $strLine))[-1]); } elsif ($strLine =~ /^#define PROJECT_VERSION_PATCH/) { eval("use constant PROJECT_VERSION_PATCH => " . (split(" ", $strLine))[-1]); } elsif ($strLine =~ /^#define PROJECT_VERSION_SUFFIX/) { eval("use constant PROJECT_VERSION_SUFFIX => " . (split(" ", $strLine))[-1]); } elsif ($strLine =~ /^#define REPOSITORY_FORMAT/) { eval("use constant REPOSITORY_FORMAT => " . (split(" ", $strLine))[-1]); } } eval( 'use constant PROJECT_VERSION => "' . PROJECT_VERSION_MAJOR() . '.' . PROJECT_VERSION_MINOR() . '.' . PROJECT_VERSION_PATCH() . PROJECT_VERSION_SUFFIX() . '"'); 1; pgbackrest-release-2.55.1/doc/manifest.xml000066400000000000000000000135351500617037600204600ustar00rootroot00000000000000 pgBackRest Reliable PostgreSQL Backup & Restore use pgBackRestDoc::ProjectInfo; PROJECT_VERSION use pgBackRestDoc::Custom::DocCustomRelease; (new pgBackRestDoc::Custom::DocCustomRelease( new pgBackRestDoc::Common::Doc("{[doc-path]}/xml/release.xml")))->currentStableVersion(); pgbackrest / PostgreSQL - logo.png favicon.png n n n n use Time::Local; use pgBackRestDoc::Custom::DocCustomRelease; my ($second, $minute , $hour, $mday, $month, $year) = localtime(); $year += 1900; if ('{[release-date-static]}' eq 'y') { my $strDate = (new pgBackRestDoc::Custom::DocCustomRelease( new pgBackRestDoc::Common::Doc("{[doc-path]}/xml/release.xml")))->releaseLast()->paramGet('date'); if ($strDate eq 'XXXX-XX-XX') { confess &log(ERROR, 'not possible to use static release dates on a dev build'); } else { ($year, $month, $mday) = split(/[\s.\-]+/, $strDate); $month -= 1; } } my @stryMonth = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'); $stryMonth[$month] . ' ' . $mday . ', ' . $year; 'Copyright &copy; 2015' . '-' . substr('{[release-date]}', length('{[release-date]}') - 4) . ', The PostgreSQL Global Development Group, <a href="{[github-url-license]}">MIT License</a>. Updated ' . '{[release-date]}'; {[doc-path]}/output/latex/logo {[project]} User Guide Open Source PostgreSQL Backup and Restore Utility Version {[version]} Crunchy Data Solutions, Inc. cds-logo.eps {[pdf-title1]}\\{[pdf-title3]} \ \\-\ \thepage\ - {[pdf-organization]}\\\today CrunchyBackRest-UserGuide-{[version]} pgbackrest-release-2.55.1/doc/release.pl000077500000000000000000000313641500617037600201100ustar00rootroot00000000000000#!/usr/bin/perl #################################################################################################################################### # release.pl - PgBackRest Release Manager #################################################################################################################################### #################################################################################################################################### # Perl includes #################################################################################################################################### use strict; use warnings FATAL => qw(all); use Carp qw(confess); use English '-no_match_vars'; $SIG{__DIE__} = sub { Carp::confess @_ }; use Cwd qw(abs_path); use File::Basename qw(dirname); use Getopt::Long qw(GetOptions); use Pod::Usage qw(pod2usage); use Storable; use lib dirname($0) . '/lib'; use lib dirname(dirname($0)) . '/build/lib'; use lib dirname(dirname($0)) . '/lib'; use lib dirname(dirname($0)) . '/test/lib'; use pgBackRestTest::Common::ExecuteTest; use pgBackRestTest::Common::Storage; use pgBackRestTest::Common::StoragePosix; use pgBackRestTest::Common::VmTest; use pgBackRestDoc::Common::Doc; use pgBackRestDoc::Common::DocConfig; use pgBackRestDoc::Common::DocManifest; use pgBackRestDoc::Common::DocRender; use pgBackRestDoc::Common::Exception; use pgBackRestDoc::Common::Log; use pgBackRestDoc::Common::String; use pgBackRestDoc::Custom::DocCustomRelease; use pgBackRestDoc::Html::DocHtmlSite; use pgBackRestDoc::Latex::DocLatex; use pgBackRestDoc::Markdown::DocMarkdown; use pgBackRestDoc::ProjectInfo; #################################################################################################################################### # Usage #################################################################################################################################### =head1 NAME release.pl - pgBackRest Release Manager =head1 SYNOPSIS release.pl [options] General Options: --help Display usage and exit --version Display pgBackRest version --quiet Sets log level to ERROR --log-level Log level for execution (e.g. ERROR, WARN, INFO, DEBUG) Release Options: --build Build the cache before release (should be included in the release commit) --deploy Deploy documentation to website (can be done as docs are updated) --no-gen Don't auto-generate --vm vm to build documentation for =cut #################################################################################################################################### # Load command line parameters and config (see usage above for details) #################################################################################################################################### my $bHelp = false; my $bVersion = false; my $bQuiet = false; my $strLogLevel = 'info'; my $bBuild = false; my $bDeploy = false; my $bNoGen = false; my $strVm = undef; GetOptions ('help' => \$bHelp, 'version' => \$bVersion, 'quiet' => \$bQuiet, 'log-level=s' => \$strLogLevel, 'build' => \$bBuild, 'deploy' => \$bDeploy, 'no-gen' => \$bNoGen, 'vm=s' => \$strVm) or pod2usage(2); #################################################################################################################################### # Run in eval block to catch errors #################################################################################################################################### eval { # Display version and exit if requested if ($bHelp || $bVersion) { print PROJECT_NAME . ' ' . PROJECT_VERSION . " Release Manager\n"; if ($bHelp) { print "\n"; pod2usage(); } exit 0; } # If neither build nor deploy is requested then error if (!$bBuild && !$bDeploy) { confess &log(ERROR, 'neither --build nor --deploy requested, nothing to do'); } # Set console log level if ($bQuiet) { $strLogLevel = 'error'; } logLevelSet(undef, uc($strLogLevel), OFF); # Set the paths my $strDocPath = dirname(abs_path($0)); my $strDocHtml = "${strDocPath}/output/html"; my $strDocExe = "${strDocPath}/doc.pl"; my $strTestExe = dirname($strDocPath) . "/test/test.pl"; my $oStorageDoc = new pgBackRestTest::Common::Storage( $strDocPath, new pgBackRestTest::Common::StoragePosix({bFileSync => false, bPathSync => false})); # Determine if this is a dev release my $bDev = PROJECT_VERSION =~ /dev$/; my $strVersion = $bDev ? 'dev' : PROJECT_VERSION; # Make sure version number matches the latest release &log(INFO, "check version info"); my $strReleaseFile = dirname(dirname(abs_path($0))) . '/doc/xml/release.xml'; my $oRelease = (new pgBackRestDoc::Custom::DocCustomRelease(new pgBackRestDoc::Common::Doc($strReleaseFile)))->releaseLast(); if ($oRelease->paramGet('version') ne PROJECT_VERSION) { confess 'unable to find version ' . PROJECT_VERSION . " as the most recent release in ${strReleaseFile}"; } if ($bBuild) { if (!$bNoGen) { # Update git history my $strGitCommand = 'git -C ' . $strDocPath . ' log --pretty=format:\'{^^^^commit^^^^:^^^^%H^^^^,^^^^date^^^^:^^^^%ci^^^^,^^^^subject^^^^:^^^^%s^^^^,^^^^body^^^^:^^^^%b^^^^},\''; my $strGitLog = qx($strGitCommand); $strGitLog =~ s/\^\^\^\^\}\,\n/\#\#\#\#/mg; $strGitLog =~ s/\\/\\\\/g; $strGitLog =~ s/\n/\\n/mg; $strGitLog =~ s/\r/\\r/mg; $strGitLog =~ s/\t/\\t/mg; $strGitLog =~ s/\"/\\\"/g; $strGitLog =~ s/\^\^\^\^/\"/g; $strGitLog =~ s/\#\#\#\#/\"\}\,\n/mg; $strGitLog = '[' . substr($strGitLog, 0, length($strGitLog) - 1) . ']'; my @hyGitLog = @{(JSON::PP->new()->allow_nonref())->decode($strGitLog)}; # Load prior history my @hyGitLogPrior = @{(JSON::PP->new()->allow_nonref())->decode( ${$oStorageDoc->get("${strDocPath}/resource/git-history.cache")})}; # Add new commits for (my $iGitLogIdx = @hyGitLog - 1; $iGitLogIdx >= 0; $iGitLogIdx--) { my $rhGitLog = $hyGitLog[$iGitLogIdx]; my $bFound = false; foreach my $rhGitLogPrior (@hyGitLogPrior) { if ($rhGitLog->{commit} eq $rhGitLogPrior->{commit}) { $bFound = true; } } next if $bFound; $rhGitLog->{body} = trim($rhGitLog->{body}); if ($rhGitLog->{body} eq '') { delete($rhGitLog->{body}); } unshift(@hyGitLogPrior, $rhGitLog); } # Write git log $strGitLog = undef; foreach my $rhGitLog (@hyGitLogPrior) { $strGitLog .= (defined($strGitLog) ? ",\n" : '') . " {\n" . ' "commit": ' . trim((JSON::PP->new()->allow_nonref()->pretty())->encode($rhGitLog->{commit})) . ",\n" . ' "date": ' . trim((JSON::PP->new()->allow_nonref()->pretty())->encode($rhGitLog->{date})) . ",\n" . ' "subject": ' . trim((JSON::PP->new()->allow_nonref()->pretty())->encode($rhGitLog->{subject})); # Skip the body if it is empty or a release (since we already have the release note content) if ($rhGitLog->{subject} !~ /^v[0-9]{1,2}\.[0-9]{1,2}(\.[0-9]+){0,1}\: /g && defined($rhGitLog->{body})) { $strGitLog .= ",\n" . ' "body": ' . trim((JSON::PP->new()->allow_nonref()->pretty())->encode($rhGitLog->{body})); } $strGitLog .= "\n" . " }"; } $oStorageDoc->put("${strDocPath}/resource/git-history.cache", "[\n${strGitLog}\n]\n"); # Generate coverage summary &log(INFO, "Generate Coverage Summary"); executeTest("${strTestExe} --vm=u22 --no-valgrind --clean --coverage-summary", {bShowOutputAsync => true}); } # Remove permanent cache file $oStorageDoc->remove("${strDocPath}/resource/exe.cache", {bIgnoreMissing => true}); # Remove all docker containers to get consistent IP address assignments executeTest('docker rm -f $(docker ps -a -q)', {bSuppressError => true}); # Generate deployment docs for RHEL if (!defined($strVm) || $strVm eq VM_RH8) { &log(INFO, "Generate RHEL documentation"); executeTest("${strDocExe} --deploy --key-var=os-type=rhel --out=html", {bShowOutputAsync => true}); if (!defined($strVm)) { executeTest("${strDocExe} --deploy --cache-only --key-var=os-type=rhel --out=pdf"); } } # Generate deployment docs for Debian if (!defined($strVm) || $strVm eq VM_U20) { &log(INFO, "Generate Debian/Ubuntu documentation"); executeTest("${strDocExe} --deploy --out=man --out=html --out=markdown", {bShowOutputAsync => true}); } # Generate a full copy of the docs for review if (!defined($strVm)) { &log(INFO, "Generate full documentation for review"); executeTest( "${strDocExe} --deploy --out-preserve --cache-only --key-var=os-type=rhel --out=html" . " --var=project-url-root=index.html"); $oStorageDoc->move("$strDocHtml/user-guide.html", "$strDocHtml/user-guide-rhel.html"); executeTest( "${strDocExe} --deploy --out-preserve --cache-only --out=man --out=html --var=project-url-root=index.html"); } } if ($bDeploy) { my $strDeployPath = "${strDocPath}/site"; # Generate docs for the website history &log(INFO, 'Generate website ' . ($bDev ? 'dev' : 'history') . ' documentation'); my $strDocExeVersion = ${strDocExe} . ($bDev ? ' --dev' : ' --deploy --cache-only') . ' --var=project-url-root=index.html --out=html'; executeTest("${strDocExeVersion} --out-preserve --key-var=os-type=rhel"); $oStorageDoc->move("$strDocHtml/user-guide.html", "$strDocHtml/user-guide-rhel.html"); $oStorageDoc->remove("$strDocHtml/release.html"); executeTest("${strDocExeVersion} --out-preserve --exclude=release"); # Deploy to repository &log(INFO, '...Deploy to repository'); executeTest("rm -rf ${strDeployPath}/prior/${strVersion}"); executeTest("mkdir ${strDeployPath}/prior/${strVersion}"); executeTest("cp ${strDocHtml}/* ${strDeployPath}/prior/${strVersion}"); # Generate docs for the main website if (!$bDev) { &log(INFO, "Generate website documentation"); executeTest("${strDocExe} --var=analytics=y --deploy --cache-only --key-var=os-type=rhel --out=html"); $oStorageDoc->move("$strDocHtml/user-guide.html", "$strDocHtml/user-guide-rhel.html"); executeTest("${strDocExe} --var=analytics=y --deploy --out-preserve --cache-only --out=html"); # Deploy to repository &log(INFO, '...Deploy to repository'); executeTest("rm -rf ${strDeployPath}/dev"); executeTest("find ${strDeployPath} -maxdepth 1 -type f -exec rm {} +"); executeTest("cp ${strDocHtml}/* ${strDeployPath}"); executeTest("cp ${strDocPath}/../README.md ${strDeployPath}"); executeTest("cp ${strDocPath}/../LICENSE ${strDeployPath}"); } # Update permissions executeTest("find ${strDeployPath} -path .git -prune -type d -exec chmod 750 {} +"); executeTest("find ${strDeployPath} -path .git -prune -type f -exec chmod 640 {} +"); } # Exit with success exit 0; } #################################################################################################################################### # Check for errors #################################################################################################################################### or do { # If a backrest exception then return the code exit $EVAL_ERROR->code() if (isException(\$EVAL_ERROR)); # Else output the unhandled error print $EVAL_ERROR; exit ERROR_UNHANDLED; }; # It shouldn't be possible to get here &log(ASSERT, 'execution reached invalid location in ' . __FILE__ . ', line ' . __LINE__); exit ERROR_ASSERT; pgbackrest-release-2.55.1/doc/resource/000077500000000000000000000000001500617037600177505ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/resource/exe.cache000066400000000000000000020747661500617037600215440ustar00rootroot00000000000000{ "default" : { "all" : { "contributing" : [ { "key" : { "id" : "contrib", "image" : "pgbackrest/doc:contrib", "name" : "pgbackrest-dev", "option" : "-v /var/run/docker.sock:/var/run/docker.sock -v /home/vagrant/test:/home/vagrant/test", "os" : "u20", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.10" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo apt-get update" ], "host" : "pgbackrest-dev", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo apt-get install rsync git devscripts build-essential valgrind autoconf \\", " autoconf-archive libssl-dev zlib1g-dev libxml2-dev libpq-dev pkg-config \\", " libxml-checker-perl libyaml-perl libdbd-pg-perl liblz4-dev liblz4-tool \\", " zstd libzstd-dev bzip2 libbz2-dev libyaml-dev ccache python3-distutils meson" ], "cmd-extra" : "-y 2>&1", "host" : "pgbackrest-dev", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "pgbackrest/test/test.pl --clean-only" ], "host" : "pgbackrest-dev", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "curl -fsSL https://get.docker.com | sudo sh" ], "cmd-extra" : "2>&1", "host" : "pgbackrest-dev", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo usermod -aG docker `whoami`" ], "host" : "pgbackrest-dev", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 666 /var/run/docker.sock" ], "host" : "pgbackrest-dev", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "pgbackrest/test/test.pl --dry-run" ], "cmd-extra" : "--no-log-timestamp", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "[0-9]+ tests selected|DRY RUN COMPLETED SUCCESSFULLY" ] }, "host" : "pgbackrest-dev", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: test begin on x86_64 - log level info", "P00 INFO: clean autogenerate code", "P00 INFO: 83 tests selected", " ", "P00 INFO: P1-T01/83 - vm=none, module=common, test=error", " [filtered 80 lines of output]", "P00 INFO: P1-T82/83 - vm=none, module=performance, test=type", "P00 INFO: P1-T83/83 - vm=none, module=performance, test=storage", "P00 INFO: DRY RUN COMPLETED SUCCESSFULLY" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "pgbackrest/test/test.pl --vm-out --module=common --test=wait" ], "cmd-extra" : "--no-log-timestamp", "host" : "pgbackrest-dev", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: test begin on x86_64 - log level info", "P00 INFO: cleanup old data", "P00 INFO: autogenerate code", "P00 INFO: build for none (/home/vagrant/test/build/none)", "P00 INFO: 1 test selected", " ", "P00 INFO: P1-T1/1 - vm=none, module=common, test=wait", " ", " P00 INFO: test command begin 2.55.1: [common/wait] --log-level=info --no-log-timestamp --repo-path=/home/vagrant/test/repo --scale=1 --test-path=/home/vagrant/test --vm=none --vm-id=0", " P00 INFO: test command end: completed successfully", " run 1 - waitNew(), waitMore, and waitFree()", " L0018 expect AssertError: assertion 'waitTime <= 999999000' failed", " ", " run 1/1 ----- L0021 0ms wait", " L0025 new wait", " L0026 check wait time", " L0027 check sleep time", " L0028 check sleep prev time", " L0029 no wait more", " ", " run 1/2 ----- L0032 100ms with retries after time expired", " L0034 new wait", " L0037 time expired, first retry", " L0038 time expired, second retry", " L0039 time expired, retries expired", " ", " run 1/3 ----- L0042 200ms wait", " L0046 new wait = 0.2 sec", " L0047 check wait time", " L0048 check sleep time", " L0049 check sleep prev time", " L0050 check begin time", " L0052 first retry", " L0053 check retry", " L0055 second retry", " L0056 check retry", " L0058 still going because of time", " L0064 lower range check", " L0065 upper range check", " L0067 free wait", " ", " run 1/4 ----- L0070 1100ms wait", " L0074 new wait = 1.1 sec", " L0075 check wait time", " L0076 check sleep time", " L0077 check sleep prev time", " L0078 check begin time", " L0084 lower range check", " L0085 upper range check", " L0087 free wait", " ", " run 1/5 ----- L0090 waitRemainder()", " L0092 new wait = 500ms", " L0093 check initial wait remainder", " L0094 check initial wait remainder", " L0098 check updated wait remainder", " L0099 check updated wait remainder", " ", " TESTS COMPLETED SUCCESSFULLY", "", "P00 INFO: P1-T1/1 - vm=none, module=common, test=wait", "P00 INFO: tested modules have full coverage", "P00 INFO: TESTS COMPLETED SUCCESSFULLY" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "pgbackrest/test/test.pl --module=postgres" ], "cmd-extra" : "--no-log-timestamp", "host" : "pgbackrest-dev", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: test begin on x86_64 - log level info", "P00 INFO: cleanup old data", "P00 INFO: autogenerate code", "P00 INFO: build for none (/home/vagrant/test/build/none)", "P00 INFO: 2 tests selected", " ", "P00 INFO: P1-T1/2 - vm=none, module=postgres, test=client", "P00 INFO: P1-T2/2 - vm=none, module=postgres, test=interface", "P00 INFO: tested modules have full coverage", "P00 INFO: TESTS COMPLETED SUCCESSFULLY" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "pgbackrest/test/test.pl --vm-build --vm=u20" ], "cmd-extra" : "--no-log-timestamp", "host" : "pgbackrest-dev", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: test begin on x86_64 - log level info", "P00 INFO: Using cached pgbackrest/test:u20-base-x86_64-20250228A image (862159b4d2169a4752b106639ca0f47c1ebb1f86) ...", "P00 INFO: Building pgbackrest/test:u20-test-x86_64 image ...", "P00 INFO: Build Complete" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "pgbackrest/test/test.pl --vm=u20 --module=postgres --test=interface --run=2" ], "cmd-extra" : "--no-log-timestamp", "host" : "pgbackrest-dev", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: test begin on x86_64 - log level info", "P00 INFO: cleanup old data and containers", "P00 INFO: autogenerate code", "P00 INFO: clean build for u20 (/home/vagrant/test/build/u20)", "P00 INFO: 1 test selected", " ", "P00 INFO: P1-T1/1 - vm=u20, module=postgres, test=interface, run=2", "P00 INFO: TESTS COMPLETED SUCCESSFULLY" ] } } ], "user-guide" : [ { "key" : { "id" : "azure", "image" : "mcr.microsoft.com/azure-storage/azurite", "name" : "azure-server", "option" : "-v {[host-repo-path]}/doc/resource/fake-cert/azure-server.crt:/root/public.crt:ro -v {[host-repo-path]}/doc/resource/fake-cert/azure-server.key:/root/private.key:ro -e AZURITE_ACCOUNTS='pgbackrest:YXpLZXk='", "os" : "debian", "param" : "azurite-blob --blobPort 443 --blobHost 0.0.0.0 --cert=/root/public.crt --key=/root/private.key", "update-hosts" : false }, "type" : "host", "value" : { "ip" : "172.17.0.2" } }, { "key" : { "id" : "s3", "image" : "minio/minio", "name" : "s3-server", "option" : "-v {[host-repo-path]}/doc/resource/fake-cert/s3-server.crt:/root/.minio/certs/public.crt:ro -v {[host-repo-path]}/doc/resource/fake-cert/s3-server.key:/root/.minio/certs/private.key:ro -e MINIO_REGION=us-east-1 -e MINIO_DOMAIN=s3.us-east-1.amazonaws.com -e MINIO_BROWSER=off -e MINIO_ACCESS_KEY=accessKey1 -e MINIO_SECRET_KEY=verySecretKey1", "os" : "debian", "param" : "server /data --address :443", "update-hosts" : false }, "type" : "host", "value" : { "ip" : "172.17.0.3" } }, { "key" : { "id" : "sftp", "image" : "pgbackrest/doc:debian", "name" : "sftp-server", "os" : "debian", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.4" } }, { "key" : { "id" : "build", "image" : "pgbackrest/doc:debian", "name" : "build", "os" : "debian", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.5" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /build/pgbackrest-release-2.55.1" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo cp -r /pgbackrest/* /build/pgbackrest-release-2.55.1" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown -R vagrant /build" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo apt-get update" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo apt-get install python3-distutils meson gcc libpq-dev libssl-dev libxml2-dev \\", " pkg-config liblz4-dev libzstd-dev libbz2-dev libz-dev libyaml-dev libssh2-1-dev" ], "cmd-extra" : "-y 2>&1", "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "meson setup /build/pgbackrest /build/pgbackrest-release-2.55.1" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "ninja -C /build/pgbackrest" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "id" : "pg1", "image" : "pgbackrest/doc:debian", "name" : "pg-primary", "option" : "-m 512m", "os" : "debian", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.6" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo apt-get install postgresql-client libxml2 libssh2-1" ], "cmd-extra" : "-y 2>&1", "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo scp build:/build/pgbackrest/src/pgbackrest /usr/bin" ], "cmd-extra" : "2>&1", "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 755 /usr/bin/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 770 /var/log/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/log/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest/conf.d" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo touch /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 640 /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "pgBackRest 2.55.1 - General help", "", "Usage:", " pgbackrest [options] [command]", "", "Commands:", " annotate add or modify backup annotation", " archive-get get a WAL segment from the archive", " archive-push push a WAL segment to the archive", " backup backup a database cluster", " check check the configuration", " expire expire backups that exceed retention", " help get help", " info retrieve information about backups", " repo-get get a file from a repository", " repo-ls list files in a repository", " restore restore a database cluster", " server pgBackRest server", " server-ping ping pgBackRest server", " stanza-create create the required stanza data", " stanza-delete delete a stanza", " stanza-upgrade upgrade a stanza", " start allow pgBackRest processes to run", " stop stop pgBackRest processes from running", " verify verify contents of a repository", " version get version", "", "Use 'pgbackrest help [command]' for more information." ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres /usr/lib/postgresql/16/bin/initdb \\", " -D /var/lib/postgresql/16/demo -k -A peer" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_createcluster 16 demo" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "Configuring already existing cluster (configuration: /etc/postgresql/16/demo, data: /var/lib/postgresql/16/demo, owner: 102:103)", "Ver Cluster Port Status Owner Data directory Log file", "16 demo 5432 down postgres /var/lib/postgresql/16/demo /var/log/postgresql/postgresql-16-demo.log" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "cat /root/postgresql.common.conf >> /etc/postgresql/16/demo/postgresql.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/postgresql/16/demo" } }, "global" : { "log-timestamp" : { "value" : "n" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres bash -c ' \\", " export PGBACKREST_LOG_PATH=/path/set/by/env && \\", " pgbackrest --log-level-console=error help backup log-path'" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "current\\: \\/path\\/set\\/by\\/env" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "pgBackRest 2.55.1 - 'backup' command - 'log-path' option help", "", "Path where log files are stored.", "", "The log path provides a location for pgBackRest to store log files. Note that", "if log-level-file=off then no log path is required.", "", "current: /path/set/by/env", "default: /var/log/pgbackrest" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /var/lib/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 750 /var/lib/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/lib/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-path" : { "value" : "/var/lib/pgbackrest" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-path=/var/lib/pgbackrest" ] } }, { "key" : { "file" : "/etc/postgresql/16/demo/postgresql.conf", "host" : "pg-primary", "option" : { "archive_command" : { "value" : "'pgbackrest --stanza=demo archive-push %p'" }, "archive_mode" : { "value" : "on" }, "max_wal_senders" : { "value" : "3" }, "wal_level" : { "value" : "replica" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "archive_command = 'pgbackrest --stanza=demo archive-push %p'", "archive_mode = on", "max_wal_senders = 3", "wal_level = replica" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo restart" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " create or replace function create_test_table(prefix int, scale int, data bool) returns void as \\$\\$ \\", " declare \\", " index int; \\", " begin \\", " for index in 1 .. scale loop \\", " execute 'create table test_' || prefix || '_' || index || ' (id int)'; \\", " \\", " if data then \\", " execute 'insert into test_' || prefix || '_' || index || ' values (' || (prefix * index) || ')'; \\", " end if; \\", " end loop; \\", " end \\$\\$ LANGUAGE plpgsql;\"" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global:archive-push" : { "compress-level" : { "value" : "3" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-path=/var/lib/pgbackrest", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-retention-full" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-cipher-pass" : { "value" : "zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO" }, "repo1-cipher-type" : { "value" : "aes-256-cbc" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-create command begin 2.55.1: --exec-id=377-87743616 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --stanza=demo", "P00 INFO: stanza-create for stanza 'demo' on repo1", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ " successfully archived to " ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=385-c96e0ee9 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --stanza=demo", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 000000010000000000000001 successfully archived to '/var/lib/pgbackrest/archive/demo/16-1/0000000100000000/000000010000000000000001-ce9289f79867819258af79ffa4d150deb16ca2d3.gz' on repo1", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "start-fast" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "no prior backup exists|full backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: backup command begin 2.55.1: --exec-id=409-09d7370f --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-full=2 --stanza=demo --start-fast", "P00 WARN: no prior backup exists, incr backup has been changed to full", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 000000010000000000000002, lsn = 0/2000028", " [filtered 3 lines of output]", "P00 INFO: check archive for segment(s) 000000010000000000000002:000000010000000000000003", "P00 INFO: new backup label = 20250505-153608F", "P00 INFO: full backup size = 22.0MB, file total = 963", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=409-09d7370f --log-level-console=info --no-log-timestamp --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-full=2 --stanza=demo" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-153608F" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=diff \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "diff backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 7 lines of output]", "P00 INFO: check archive for segment(s) 000000010000000000000004:000000010000000000000005", "P00 INFO: new backup label = 20250505-153608F_20250505-153612D", "P00 INFO: diff backup size = 8.3KB, file total = 963", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=434-a44db86f --log-level-console=info --no-log-timestamp --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-full=2 --stanza=demo" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest info" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "(full|incr|diff) backup" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: ok", " cipher: aes-256-cbc", "", " db (current)", " wal archive min/max (16): 000000010000000000000001/000000010000000000000005", "", " full backup: 20250505-153608F", " timestamp start/stop: 2025-05-05 15:36:08+00 / 2025-05-05 15:36:11+00", " wal start/stop: 000000010000000000000002 / 000000010000000000000003", " database size: 22.0MB, database backup size: 22.0MB", " repo1: backup set size: 2.9MB, backup size: 2.9MB", "", " diff backup: 20250505-153608F_20250505-153612D", " timestamp start/stop: 2025-05-05 15:36:12+00 / 2025-05-05 15:36:13+00", " wal start/stop: 000000010000000000000004 / 000000010000000000000005", " database size: 22.0MB, database backup size: 8.3KB", " repo1: backup set size: 2.9MB, backup size: 464B", " backup reference total: 1 full" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres rm /var/lib/postgresql/16/demo/global/pg_control" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "err-expect" : "1", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "could not find the database system" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "Error: /usr/lib/postgresql/16/bin/pg_ctl /usr/lib/postgresql/16/bin/pg_ctl start -D /var/lib/postgresql/16/demo -l /var/log/postgresql/postgresql-16-demo.log -s -o -c config_file=\"/etc/postgresql/16/demo/postgresql.conf\" exited with status 1: ", "postgres: could not find the database system", "Expected to find it in the directory \"/var/lib/postgresql/16/demo\",", "but could not open file \"/var/lib/postgresql/16/demo/global/pg_control\": No such file or directory", "Examine the log output." ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres find /var/lib/postgresql/16/demo -mindepth 1 -delete" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres mkdir -p /var/lib/postgresql/pgbackrest/doc/example" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cp -r /pgbackrest/doc/example/* \\", " /var/lib/postgresql/pgbackrest/doc/example" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat \\", " /var/lib/postgresql/pgbackrest/doc/example/pgsql-pgbackrest-info.sql" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "-- An example of monitoring pgBackRest from within PostgreSQL", "--", "-- Use copy to export data from the pgBackRest info command into the jsonb", "-- type so it can be queried directly by PostgreSQL.", "", "-- Create monitor schema", "create schema monitor;", "", "-- Get pgBackRest info in JSON format", "create function monitor.pgbackrest_info()", " returns jsonb AS $$", "declare", " data jsonb;", "begin", " -- Create a temp table to hold the JSON data", " create temp table temp_pgbackrest_data (data text);", "", " -- Copy data into the table directly from the pgBackRest info command", " copy temp_pgbackrest_data (data)", " from program", " 'pgbackrest --output=json info' (format text);", "", " select replace(temp_pgbackrest_data.data, E'\\n', '\\n')::jsonb", " into data", " from temp_pgbackrest_data;", "", " drop table temp_pgbackrest_data;", "", " return data;", "end $$ language plpgsql;" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -f \\", " /var/lib/postgresql/pgbackrest/doc/example/pgsql-pgbackrest-info.sql" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat \\", " /var/lib/postgresql/pgbackrest/doc/example/pgsql-pgbackrest-query.sql" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "-- Get last successful backup for each stanza", "--", "-- Requires the monitor.pgbackrest_info function.", "with stanza as", "(", " select data->'name' as name,", " data->'backup'->(", " jsonb_array_length(data->'backup') - 1) as last_backup,", " data->'archive'->(", " jsonb_array_length(data->'archive') - 1) as current_archive", " from jsonb_array_elements(monitor.pgbackrest_info()) as data", ")", "select name,", " to_timestamp(", " (last_backup->'timestamp'->>'stop')::numeric) as last_successful_backup,", " current_archive->>'max' as last_archived_wal", " from stanza;" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -f \\", " /var/lib/postgresql/pgbackrest/doc/example/pgsql-pgbackrest-query.sql" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " name | last_successful_backup | last_archived_wal ", "--------+------------------------+--------------------------", " \"demo\" | 2025-05-05 15:36:13+00 | 000000010000000000000005", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo apt-get install jq" ], "cmd-extra" : "-y 2>&1", "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --output=json --stanza=demo info | \\", " jq '.[0] | .backup[-1] | .timestamp.stop'" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "1746459373" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --output=json --stanza=demo info | \\", " jq '.[0] | .archive[-1] | .max'" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "\"000000010000000000000005\"" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-bundle" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=full backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres find /var/lib/pgbackrest/backup/demo/latest/ -type f | wc -l" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "5" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-block" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --annotation=source=\"demo backup\" \\", " --annotation=key=value --type=full backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-153628F" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --set=20250505-153628F info" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "annotation" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: ok", " cipher: aes-256-cbc", "", " db (current)", " wal archive min/max (16): 000000020000000000000007/000000020000000000000009", "", " full backup: 20250505-153628F", " timestamp start/stop: 2025-05-05 15:36:28+00 / 2025-05-05 15:36:29+00", " wal start/stop: 000000020000000000000008 / 000000020000000000000009", " lsn start/stop: 0/8000028 / 0/9000050", " database size: 22.0MB, database backup size: 22.0MB", " repo1: backup size: 2.9MB", " database list: postgres (5)", " annotation(s)", " key: value", " source: demo backup" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --set=20250505-153628F \\", " --annotation=key= --annotation=new_key=new_value annotate" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --set=20250505-153628F info" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "annotation" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: ok", " cipher: aes-256-cbc", "", " db (current)", " wal archive min/max (16): 000000020000000000000007/000000020000000000000009", "", " full backup: 20250505-153628F", " timestamp start/stop: 2025-05-05 15:36:28+00 / 2025-05-05 15:36:29+00", " wal start/stop: 000000020000000000000008 / 000000020000000000000009", " lsn start/stop: 0/8000028 / 0/9000050", " database size: 22.0MB, database backup size: 22.0MB", " repo1: backup size: 2.9MB", " database list: postgres (5)", " annotation(s)", " new_key: new_value", " source: demo backup" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-retention-full" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=full \\", " --log-level-console=detail backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "archive retention on backup 20250505-153608F|remove archive" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 975 lines of output]", "P00 INFO: repo1: remove expired backup 20250505-153625F", "P00 DETAIL: repo1: 16-1 archive retention on backup 20250505-153628F, start = 000000020000000000000008", "P00 INFO: repo1: 16-1 remove archive, start = 000000020000000000000007, stop = 000000020000000000000007", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-153631F" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=full \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "expire full backup set 20250505-153608F|archive retention on backup 20250505-153631F|remove archive" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 11 lines of output]", "P00 INFO: repo1: expire full backup 20250505-153628F", "P00 INFO: repo1: remove expired backup 20250505-153628F", "P00 INFO: repo1: 16-1 remove archive, start = 000000020000000000000008, stop = 00000002000000000000000A", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-retention-diff" : { "value" : "1" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=1", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=diff backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-153633F_20250505-153636D" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=incr backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=diff \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "expire diff backup set 20250505-153633F_20250505-153636D" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 10 lines of output]", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=874-b831e96e --log-level-console=info --no-log-timestamp --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-diff=1 --repo1-retention-full=2 --stanza=demo", "P00 INFO: repo1: expire diff backup set 20250505-153633F_20250505-153636D, 20250505-153633F_20250505-153638I", "P00 INFO: repo1: remove expired backup 20250505-153633F_20250505-153638I", "P00 INFO: repo1: remove expired backup 20250505-153633F_20250505-153636D", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-retention-diff" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-153633F_20250505-153640D" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " select pg_create_restore_point('generate WAL'); select pg_switch_wal(); \\", " select pg_create_restore_point('generate WAL'); select pg_switch_wal();\"" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=diff \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "new backup label" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 6 lines of output]", "P00 INFO: backup stop archive = 000000020000000000000017, lsn = 0/17000050", "P00 INFO: check archive for segment(s) 000000020000000000000016:000000020000000000000017", "P00 INFO: new backup label = 20250505-153633F_20250505-153643D", "P00 INFO: diff backup size = 8.3KB, file total = 963", "P00 INFO: backup command end: completed successfully", " [filtered 2 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-153633F_20250505-153643D" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=detail \\", " --repo1-retention-archive-type=diff --repo1-retention-archive=1 expire" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "archive retention on backup 20250505-153633F_20250505-153640D|remove archive" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: expire command begin 2.55.1: --exec-id=950-c6af69bb --log-level-console=detail --no-log-timestamp --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-archive=1 --repo1-retention-archive-type=diff --repo1-retention-diff=2 --repo1-retention-full=2 --stanza=demo", "P00 DETAIL: repo1: 16-1 archive retention on backup 20250505-153631F, start = 00000002000000000000000B, stop = 00000002000000000000000B", "P00 DETAIL: repo1: 16-1 archive retention on backup 20250505-153633F, start = 00000002000000000000000C, stop = 00000002000000000000000D", "P00 DETAIL: repo1: 16-1 archive retention on backup 20250505-153633F_20250505-153640D, start = 000000020000000000000012, stop = 000000020000000000000013", "P00 DETAIL: repo1: 16-1 archive retention on backup 20250505-153633F_20250505-153643D, start = 000000020000000000000016", "P00 INFO: repo1: 16-1 remove archive, start = 00000002000000000000000E, stop = 000000020000000000000011", "P00 INFO: repo1: 16-1 remove archive, start = 000000020000000000000014, stop = 000000020000000000000015", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta \\", " --log-level-console=detail restore" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "demo\\/PG_VERSION - exists and matches backup|remove invalid files|rename global\\/pg_control" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 2 lines of output]", "P00 DETAIL: check '/var/lib/postgresql/16/demo' exists", "P00 DETAIL: remove 'global/pg_control' so cluster will not start if restore does not complete", "P00 INFO: remove invalid files/links/paths from '/var/lib/postgresql/16/demo'", "P00 DETAIL: remove invalid file '/var/lib/postgresql/16/demo/backup_label.old'", "P00 DETAIL: remove invalid file '/var/lib/postgresql/16/demo/base/1/pg_internal.init'", " [filtered 13 lines of output]", "P00 DETAIL: remove invalid file '/var/lib/postgresql/16/demo/postmaster.opts'", "P01 DETAIL: restore file /var/lib/postgresql/16/demo/backup_label (260B, 0.00%) checksum ad24e14059cb8ecf184023253f8b5fca6f14af04", "P01 DETAIL: restore file /var/lib/postgresql/16/demo/PG_VERSION - exists and matches backup (bundle 20250505-153633F/1/0, 3B, 0.00%) checksum 3596ea087bfdaf52380eae441077572ed289d657", "P01 DETAIL: restore file /var/lib/postgresql/16/demo/pg_multixact/members/0000 - exists and matches backup (bundle 20250505-153633F/1/24, 8KB, 0.04%) checksum 0631457264ff7f8d5fb1edc2c0211992a67c73e6", "P01 DETAIL: restore file /var/lib/postgresql/16/demo/global/pg_filenode.map - exists and matches backup (bundle 20250505-153633F/1/64, 524B, 0.04%) checksum 3de878cf56cdb80345e88da11f91bc6d46f9f804", " [filtered 988 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"create database test1;\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "CREATE DATABASE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"create database test2;\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "CREATE DATABASE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"create table test1_table (id int); \\", " insert into test1_table (id) values (1);\" test1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "CREATE TABLE", "INSERT 0 1" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"create table test2_table (id int); \\", " insert into test2_table (id) values (2);\" test2" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "CREATE TABLE", "INSERT 0 1" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=incr backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -Atc \"select oid from pg_database where datname = 'test1'\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "32768" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres du -sh /var/lib/postgresql/16/demo/base/32768" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "7.4M\t/var/lib/postgresql/16/demo/base/32768" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-153633F_20250505-153651I" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo \\", " --set=20250505-153633F_20250505-153651I info" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "database list" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 12 lines of output]", " repo1: backup size: 2.0MB", " backup reference list: 20250505-153633F, 20250505-153633F_20250505-153643D", " database list: postgres (5), test1 (32768), test2 (32769)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta \\", " --db-include=test2 --type=immediate --target-action=promote restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select * from test2_table;\" test2" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " id ", "----", " 2", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select * from test1_table;\" test1" ], "err-expect" : "2", "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "relation mapping file.*contains invalid data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "psql: error: connection to server on socket \"/var/run/postgresql/.s.PGSQL.5432\" failed: FATAL: relation mapping file \"base/32768/pg_filenode.map\" contains invalid data" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres du -sh /var/lib/postgresql/16/demo/base/32768" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "8.0K\t/var/lib/postgresql/16/demo/base/32768" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"drop database test1;\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "DROP DATABASE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select oid, datname from pg_database order by oid;\"" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "test2" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " oid | datname ", "-------+-----------", " 1 | template1", " 4 | template0", " 5 | postgres", " 32769 | test2", "(4 rows)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"begin; \\", " create table important_table (message text); \\", " insert into important_table values ('Important Data'); \\", " commit; \\", " select * from important_table;\"" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 4 lines of output]", " message ", "----------------", " Important Data", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 1" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -Atc \"select current_timestamp\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "2025-05-05 15:37:03.157376+00" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 1" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"begin; \\", " drop table important_table; \\", " commit; \\", " select * from important_table;\"" ], "err-expect" : "1", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "does not exist" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "BEGIN", "DROP TABLE", "COMMITERROR: relation \"important_table\" does not exist", "LINE 1: ...le important_table; commit; select * from important_...", " ^" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=incr backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-153633F_20250505-153704I" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest info" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "20250505-153633F_20250505-153704I" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 38 lines of output]", " backup reference total: 1 full, 1 diff", "", " incr backup: 20250505-153633F_20250505-153704I", " timestamp start/stop: 2025-05-05 15:37:04+00 / 2025-05-05 15:37:05+00", " wal start/stop: 00000004000000000000001A / 00000004000000000000001A", " [filtered 2 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta \\", " --set=20250505-153633F_20250505-153704I --target-timeline=current \\", " --type=time \"--target=2025-05-05 15:37:03.157376+00\" --target-action=promote restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm /var/log/postgresql/postgresql-16-demo.log" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "err-expect" : "1", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "recovery ended before configured recovery target was reached" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 13 lines of output]", "LOG: database system is ready to accept read-only connections", "LOG: redo done at 0/1A000100 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.02 s", "FATAL: recovery ended before configured recovery target was reached", "LOG: startup process (PID 1321) exited with exit code 1", "LOG: terminating any other active server processes", "LOG: database system is shut down" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta \\", " --type=time \"--target=2025-05-05 15:37:03.157376+00\" \\", " --target-action=promote restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm /var/log/postgresql/postgresql-16-demo.log" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/postgresql/16/demo/postgresql.auto.conf" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "recovery_target_time" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 9 lines of output]", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:37:08", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "recovery_target_time = '2025-05-05 15:37:03.157376+00'", "recovery_target_action = 'promote'" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select * from important_table\"" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " message ", "----------------", " Important Data", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/log/postgresql/postgresql-16-demo.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "recovery stopping before|last completed transaction|starting point-in-time recovery" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 4 lines of output]", "LOG: database system was interrupted; last known up at 2025-05-05 15:36:52 UTC", "LOG: restored log file \"00000004.history\" from archive", "LOG: starting point-in-time recovery to 2025-05-05 15:37:03.157376+00", "LOG: starting backup recovery with redo LSN 0/19000028, checkpoint LSN 0/19000060, on timeline ID 3", "LOG: restored log file \"00000004.history\" from archive", " [filtered 5 lines of output]", "LOG: database system is ready to accept read-only connections", "LOG: restored log file \"00000004000000000000001A\" from archive", "LOG: recovery stopping before commit of transaction 740, time 2025-05-05 15:37:04.47197+00", "LOG: redo done at 0/19026050 system usage: CPU: user: 0.00 s, system: 0.01 s, elapsed: 0.07 s", "LOG: last completed transaction was at log time 2025-05-05 15:37:01.902745+00", "LOG: restored log file \"000000040000000000000019\" from archive", "LOG: selected new timeline ID: 5", " [filtered 5 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stop" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stop command begin 2.55.1: --exec-id=1443-49e13bce --log-level-console=info --no-log-timestamp --stanza=demo", "P00 INFO: stop command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=1 \\", " --log-level-console=info stanza-delete" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-delete command begin 2.55.1: --exec-id=1450-c989c5ff --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo=1 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --stanza=demo", "P00 INFO: stanza-delete command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo2-azure-account" : { "value" : "pgbackrest" }, "repo2-azure-container" : { "value" : "demo-container" }, "repo2-azure-key" : { "value" : "YXpLZXk=" }, "repo2-path" : { "value" : "/demo-repo" }, "repo2-retention-full" : { "value" : "4" }, "repo2-type" : { "value" : "azure" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "repo2-azure-account=pgbackrest", "repo2-azure-container=demo-container", "repo2-azure-key=YXpLZXk=", "repo2-path=/demo-repo", "repo2-retention-full=4", "repo2-type=azure", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "echo \"172.17.0.2 pgbackrest.blob.core.windows.net\" | tee -a /etc/hosts" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres bash -c 'export AZURE_CLI_DISABLE_CONNECTION_VERIFICATION=1;az storage container create -n demo-container \\", " --connection-string \"DefaultEndpointsProtocol=https;AccountName=pgbackrest;AccountKey=YXpLZXk=\" \\", " 2>&1'" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-create command begin 2.55.1: --exec-id=1521-8d36a4a8 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo2-type=azure --stanza=demo", "P00 INFO: stanza-create for stanza 'demo' on repo1", "P00 INFO: stanza-create for stanza 'demo' on repo2", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=2 \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "no prior backup exists|full backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: backup command begin 2.55.1: --exec-id=1529-81f60a40 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo=2 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-block --repo1-bundle --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo2-type=azure --stanza=demo --start-fast", "P00 WARN: no prior backup exists, incr backup has been changed to full", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 00000005000000000000001B, lsn = 0/1B000028", " [filtered 3 lines of output]", "P00 INFO: check archive for segment(s) 00000005000000000000001B:00000005000000000000001B", "P00 INFO: new backup label = 20250505-153719F", "P00 INFO: full backup size = 29.1MB, file total = 1265", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=1529-81f60a40 --log-level-console=info --no-log-timestamp --repo=2 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo2-type=azure --stanza=demo" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo3-path" : { "value" : "/demo-repo" }, "repo3-retention-full" : { "value" : "4" }, "repo3-s3-bucket" : { "value" : "demo-bucket" }, "repo3-s3-endpoint" : { "value" : "s3.us-east-1.amazonaws.com" }, "repo3-s3-key" : { "value" : "accessKey1" }, "repo3-s3-key-secret" : { "value" : "verySecretKey1" }, "repo3-s3-region" : { "value" : "us-east-1" }, "repo3-type" : { "value" : "s3" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "repo2-azure-account=pgbackrest", "repo2-azure-container=demo-container", "repo2-azure-key=YXpLZXk=", "repo2-path=/demo-repo", "repo2-retention-full=4", "repo2-type=azure", "repo3-path=/demo-repo", "repo3-retention-full=4", "repo3-s3-bucket=demo-bucket", "repo3-s3-endpoint=s3.us-east-1.amazonaws.com", "repo3-s3-key=accessKey1", "repo3-s3-key-secret=verySecretKey1", "repo3-s3-region=us-east-1", "repo3-type=s3", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "echo \"172.17.0.3 demo-bucket.s3.us-east-1.amazonaws.com s3.us-east-1.amazonaws.com\" | tee -a /etc/hosts" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "mc --insecure alias set s3 https://127.0.0.1 accessKey1 verySecretKey1" ], "host" : "s3-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "mc --insecure mb --with-versioning s3/demo-bucket" ], "host" : "s3-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 4 lines of output]", "P00 INFO: stanza 'demo' already exists on repo2 and is valid", "P00 INFO: stanza-create for stanza 'demo' on repo3", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "no prior backup exists|full backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: backup command begin 2.55.1: --exec-id=1577-45157b4a --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo=3 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-block --repo1-bundle --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo3-retention-full=4 --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo2-type=azure --repo3-type=s3 --stanza=demo --start-fast", "P00 WARN: no prior backup exists, incr backup has been changed to full", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 00000005000000000000001C, lsn = 0/1C000028", " [filtered 3 lines of output]", "P00 INFO: check archive for segment(s) 00000005000000000000001C:00000005000000000000001D", "P00 INFO: new backup label = 20250505-153728F", "P00 INFO: full backup size = 29.1MB, file total = 1265", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=1577-45157b4a --log-level-console=info --no-log-timestamp --repo=3 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo3-retention-full=4 --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo2-type=azure --repo3-type=s3 --stanza=demo" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -Atc \"select date_trunc('seconds', current_timestamp)\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "2025-05-05 15:37:33+00" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "process-max" : { "value" : "4" }, "repo4-bundle" : { "value" : "y" }, "repo4-path" : { "value" : "/demo-repo" }, "repo4-sftp-host" : { "value" : "sftp-server" }, "repo4-sftp-host-key-hash-type" : { "value" : "sha1" }, "repo4-sftp-host-user" : { "value" : "pgbackrest" }, "repo4-sftp-private-key-file" : { "value" : "/var/lib/postgresql/.ssh/id_rsa_sftp" }, "repo4-sftp-public-key-file" : { "value" : "/var/lib/postgresql/.ssh/id_rsa_sftp.pub" }, "repo4-type" : { "value" : "sftp" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "process-max=4", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "repo2-azure-account=pgbackrest", "repo2-azure-container=demo-container", "repo2-azure-key=YXpLZXk=", "repo2-path=/demo-repo", "repo2-retention-full=4", "repo2-type=azure", "repo3-path=/demo-repo", "repo3-retention-full=4", "repo3-s3-bucket=demo-bucket", "repo3-s3-endpoint=s3.us-east-1.amazonaws.com", "repo3-s3-key=accessKey1", "repo3-s3-key-secret=verySecretKey1", "repo3-s3-region=us-east-1", "repo3-type=s3", "repo4-bundle=y", "repo4-path=/demo-repo", "repo4-sftp-host=sftp-server", "repo4-sftp-host-key-hash-type=sha1", "repo4-sftp-host-user=pgbackrest", "repo4-sftp-private-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp", "repo4-sftp-public-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp.pub", "repo4-type=sftp", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres mkdir -m 750 -p /var/lib/postgresql/.ssh" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres ssh-keygen -f /var/lib/postgresql/.ssh/id_rsa_sftp \\", " -t rsa -b 4096 -N \"\" -m PEM" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "id -u pgbackrest > /dev/null 2>&1 || adduser --disabled-password --gecos \"\" pgbackrest" ], "host" : "sftp-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "mkdir -m 750 -p /demo-repo && chown pgbackrest /demo-repo" ], "host" : "sftp-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest mkdir -m 750 -p /home/pgbackrest/.ssh" ], "host" : "sftp-server", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "(sudo ssh root@pg-primary cat /var/lib/postgresql/.ssh/id_rsa_sftp.pub) | \\", " sudo -u pgbackrest tee -a /home/pgbackrest/.ssh/authorized_keys" ], "host" : "sftp-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "ssh-keyscan -H sftp-server >> /var/lib/postgresql/.ssh/known_hosts 2>/dev/null" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "postgres" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 6 lines of output]", "P00 INFO: stanza 'demo' already exists on repo3 and is valid", "P00 INFO: stanza-create for stanza 'demo' on repo4", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=4 \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "no prior backup exists|full backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: backup command begin 2.55.1: --exec-id=1656-747a1781 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --process-max=4 --repo=4 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-block --repo1-bundle --repo4-bundle --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo4-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo3-retention-full=4 --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo4-sftp-host=sftp-server --repo4-sftp-host-key-hash-type=sha1 --repo4-sftp-host-user=pgbackrest --repo4-sftp-private-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp --repo4-sftp-public-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp.pub --repo2-type=azure --repo3-type=s3 --repo4-type=sftp --stanza=demo --start-fast", "P00 WARN: option 'repo4-retention-full' is not set for 'repo4-retention-full-type=count', the repository may run out of space", " HINT: to retain full backups indefinitely (without warning), set option 'repo4-retention-full' to the maximum.", "P00 WARN: no prior backup exists, incr backup has been changed to full", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 00000005000000000000001E, lsn = 0/1E000028", " [filtered 3 lines of output]", "P00 INFO: check archive for segment(s) 00000005000000000000001E:00000005000000000000001F", "P00 INFO: new backup label = 20250505-153736F", "P00 INFO: full backup size = 29.1MB, file total = 1265", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=1656-747a1781 --log-level-console=info --no-log-timestamp --repo=4 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo4-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo3-retention-full=4 --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo4-sftp-host=sftp-server --repo4-sftp-host-key-hash-type=sha1 --repo4-sftp-host-user=pgbackrest --repo4-sftp-private-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp --repo4-sftp-public-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp.pub --repo2-type=azure --repo3-type=s3 --repo4-type=sftp --stanza=demo", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo5-gcs-bucket" : { "value" : "demo-bucket" }, "repo5-gcs-key" : { "value" : "/etc/pgbackrest/gcs-key.json" }, "repo5-path" : { "value" : "/demo-repo" }, "repo5-type" : { "value" : "gcs" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "process-max=4", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "repo2-azure-account=pgbackrest", "repo2-azure-container=demo-container", "repo2-azure-key=YXpLZXk=", "repo2-path=/demo-repo", "repo2-retention-full=4", "repo2-type=azure", "repo3-path=/demo-repo", "repo3-retention-full=4", "repo3-s3-bucket=demo-bucket", "repo3-s3-endpoint=s3.us-east-1.amazonaws.com", "repo3-s3-key=accessKey1", "repo3-s3-key-secret=verySecretKey1", "repo3-s3-region=us-east-1", "repo3-type=s3", "repo4-bundle=y", "repo4-path=/demo-repo", "repo4-sftp-host=sftp-server", "repo4-sftp-host-key-hash-type=sha1", "repo4-sftp-host-user=pgbackrest", "repo4-sftp-private-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp", "repo4-sftp-public-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp.pub", "repo4-type=sftp", "repo5-gcs-bucket=demo-bucket", "repo5-gcs-key=/etc/pgbackrest/gcs-key.json", "repo5-path=/demo-repo", "repo5-type=gcs", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 1" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 stanza-delete" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 info" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "missing stanza data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: error (missing stanza data)", " cipher: none" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "mc ls --versions s3/demo-bucket/demo-repo/backup/demo/backup.info" ], "cmd-extra" : "--insecure", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "backup\\.info$" ] }, "host" : "s3-server", "load-env" : true, "output" : true, "run-as-user" : "root" }, "type" : "exe", "value" : { "output" : [ "[2025-05-05 15:37:42 UTC] 0B STANDARD 4b8a6087-660b-4454-b54b-f23b4c99166d v3 DEL backup.info", "[2025-05-05 15:37:33 UTC] 1.0KiB STANDARD ff3f0b2c-36f0-4a39-bd98-95e231304bf7 v2 PUT backup.info", "[2025-05-05 15:37:27 UTC] 372B STANDARD 3665e998-7670-4201-be92-72f1f448dbea v1 PUT backup.info", "[2025-05-05 15:37:42 UTC] 0B STANDARD 15063baa-a307-498c-95df-0c2c6c0e74f6 v3 DEL backup.info.copy", "[2025-05-05 15:37:33 UTC] 1.0KiB STANDARD 487bbb33-04f6-4b6d-81d7-84bb3521b2f5 v2 PUT backup.info.copy" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 \\", " --repo-target-time=\"2025-05-05 15:37:33+00\" info" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "full backup" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 5 lines of output]", " wal archive min/max (16): 00000005000000000000001C/00000005000000000000001D", "", " full backup: 20250505-153728F", " timestamp start/stop: 2025-05-05 15:37:28+00 / 2025-05-05 15:37:33+00", " wal start/stop: 00000005000000000000001C / 00000005000000000000001D", " repo3: backup set size: 3.9MB, backup size: 3.9MB" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 --delta \\", " --repo-target-time=\"2025-05-05 15:37:33+00\" --log-level-console=info restore" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ " restore backup set " ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: restore command begin 2.55.1: --delta --exec-id=1733-c6bc056e --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --process-max=4 --repo=3 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo5-gcs-bucket=demo-bucket --repo5-gcs-key= --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo4-path=/demo-repo --repo5-path=/demo-repo --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo4-sftp-host=sftp-server --repo4-sftp-host-key-hash-type=sha1 --repo4-sftp-host-user=pgbackrest --repo4-sftp-private-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp --repo4-sftp-public-key-file=/var/lib/postgresql/.ssh/id_rsa_sftp.pub --repo-target-time=\"2025-05-05 15:37:33+00\" --repo2-type=azure --repo3-type=s3 --repo4-type=sftp --repo5-type=gcs --stanza=demo", "P00 INFO: repo3: restore backup set 20250505-153728F, recovery will start at 2025-05-05 15:37:28", "P00 INFO: remove invalid files/links/paths from '/var/lib/postgresql/16/demo'", "P00 INFO: write updated /var/lib/postgresql/16/demo/postgresql.auto.conf", " [filtered 2 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "id" : "repo1", "image" : "pgbackrest/doc:debian", "name" : "repository", "option" : "-m 512m", "os" : "debian", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.7" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo adduser --disabled-password --gecos \"\" pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo apt-get install postgresql-client libxml2 libssh2-1" ], "cmd-extra" : "-y 2>&1", "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo scp build:/build/pgbackrest/src/pgbackrest /usr/bin" ], "cmd-extra" : "2>&1", "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 755 /usr/bin/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 770 /var/log/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown pgbackrest:pgbackrest /var/log/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest/conf.d" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo touch /etc/pgbackrest/pgbackrest.conf" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 640 /etc/pgbackrest/pgbackrest.conf" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown pgbackrest:pgbackrest /etc/pgbackrest/pgbackrest.conf" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /var/lib/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 750 /var/lib/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown pgbackrest:pgbackrest /var/lib/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest mkdir -m 750 /home/pgbackrest/.ssh" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest ssh-keygen -f /home/pgbackrest/.ssh/id_rsa \\", " -t rsa -b 4096 -N \"\"" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres mkdir -m 750 -p /var/lib/postgresql/.ssh" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres ssh-keygen -f /var/lib/postgresql/.ssh/id_rsa \\", " -t rsa -b 4096 -N \"\"" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "(echo -n 'no-agent-forwarding,no-X11-forwarding,no-port-forwarding,' && \\", " echo -n 'command=\"/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }\" ' && \\", " sudo ssh root@pg-primary cat /var/lib/postgresql/.ssh/id_rsa.pub) | \\", " sudo -u pgbackrest tee -a /home/pgbackrest/.ssh/authorized_keys" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "(echo -n 'no-agent-forwarding,no-X11-forwarding,no-port-forwarding,' && \\", " echo -n 'command=\"/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }\" ' && \\", " sudo ssh root@repository cat /home/pgbackrest/.ssh/id_rsa.pub) | \\", " sudo -u postgres tee -a /var/lib/postgresql/.ssh/authorized_keys" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest ssh postgres@pg-primary" ], "cmd-extra" : "-o StrictHostKeyChecking=no", "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres ssh pgbackrest@repository" ], "cmd-extra" : "-o StrictHostKeyChecking=no", "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "global" : { "repo1-path" : { "value" : "/var/lib/pgbackrest" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[global]", "repo1-path=/var/lib/pgbackrest" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "demo" : { "pg1-host" : { "value" : "pg-primary" }, "pg1-path" : { "value" : "/var/lib/postgresql/16/demo" } }, "global" : { "log-timestamp" : { "value" : "n" }, "repo1-retention-full" : { "value" : "2" }, "start-fast" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/postgresql/16/demo" } }, "global" : { "log-level-file" : { "value" : "detail" }, "log-timestamp" : { "value" : "n" }, "repo1-host" : { "value" : "repository" } } }, "reset" : true }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "log-level-file=detail", "repo1-host=repository" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo stanza-create" ], "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo check" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo check" ], "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo backup" ], "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: no prior backup exists, incr backup has been changed to full" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo --type=full backup" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "global" : { "process-max" : { "value" : "3" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo --type=full backup" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest info" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "timestamp start/stop" ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: ok", " cipher: none", "", " db (current)", " wal archive min/max (16): 000000070000000000000023/000000070000000000000025", "", " full backup: 20250505-153818F", " timestamp start/stop: 2025-05-05 15:38:18+00 / 2025-05-05 15:38:21+00", " wal start/stop: 000000070000000000000023 / 000000070000000000000023", " database size: 29.1MB, database backup size: 29.1MB", " repo1: backup set size: 3.9MB, backup size: 3.9MB", "", " full backup: 20250505-153823F", " timestamp start/stop: 2025-05-05 15:38:23+00 / 2025-05-05 15:38:28+00", " wal start/stop: 000000070000000000000024 / 000000070000000000000025", " database size: 29.1MB, database backup size: 29.1MB", " repo1: backup set size: 3.9MB, backup size: 3.9MB" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo backup" ], "err-expect" : "56", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "\\: stop file exists for all stanzas" ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: unable to check pg1: [StopError] raised from remote-0 ssh protocol on 'pg-primary': stop file exists for all stanzas", "P00 ERROR: [056]: unable to find primary cluster - cannot proceed", " HINT: are all available clusters in recovery?" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest stop" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: stop file already exists for all stanzas" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo backup" ], "err-expect" : "56", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "\\: stop file exists for stanza demo" ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: unable to check pg1: [StopError] raised from remote-0 ssh protocol on 'pg-primary': stop file exists for stanza demo", "P00 ERROR: [056]: unable to find primary cluster - cannot proceed", " HINT: are all available clusters in recovery?" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "id" : "pg2", "image" : "pgbackrest/doc:debian", "name" : "pg-standby", "option" : "-m 512m", "os" : "debian", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.8" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo apt-get install postgresql-client libxml2 libssh2-1" ], "cmd-extra" : "-y 2>&1", "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo scp build:/build/pgbackrest/src/pgbackrest /usr/bin" ], "cmd-extra" : "2>&1", "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 755 /usr/bin/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 770 /var/log/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/log/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest/conf.d" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo touch /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 640 /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres mkdir -m 750 -p /var/lib/postgresql/.ssh" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres ssh-keygen -f /var/lib/postgresql/.ssh/id_rsa \\", " -t rsa -b 4096 -N \"\"" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "(echo -n 'no-agent-forwarding,no-X11-forwarding,no-port-forwarding,' && \\", " echo -n 'command=\"/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }\" ' && \\", " sudo ssh root@pg-standby cat /var/lib/postgresql/.ssh/id_rsa.pub) | \\", " sudo -u pgbackrest tee -a /home/pgbackrest/.ssh/authorized_keys" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "(echo -n 'no-agent-forwarding,no-X11-forwarding,no-port-forwarding,' && \\", " echo -n 'command=\"/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }\" ' && \\", " sudo ssh root@repository cat /home/pgbackrest/.ssh/id_rsa.pub) | \\", " sudo -u postgres tee -a /var/lib/postgresql/.ssh/authorized_keys" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest ssh postgres@pg-standby" ], "cmd-extra" : "-o StrictHostKeyChecking=no", "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres ssh pgbackrest@repository" ], "cmd-extra" : "-o StrictHostKeyChecking=no", "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-standby", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/postgresql/16/demo" } }, "global" : { "log-level-file" : { "value" : "detail" }, "log-timestamp" : { "value" : "n" }, "repo1-host" : { "value" : "repository" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "log-level-file=detail", "repo1-host=repository" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_createcluster 16 demo" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta --type=standby restore" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/postgresql/16/demo/postgresql.auto.conf" ], "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "# Do not edit this file manually!", "# It will be overwritten by the ALTER SYSTEM command.", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:36:15", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:36:45", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:37:08", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "# Removed by pgBackRest restore on 2025-05-05 15:37:46 # recovery_target_time = '2025-05-05 15:37:03.157376+00'", "# Removed by pgBackRest restore on 2025-05-05 15:37:46 # recovery_target_action = 'promote'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:37:46", "restore_command = 'pgbackrest --repo=3 --repo-target-time=\"2025-05-05 15:37:33+00\" --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:38:11", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:38:43", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "cat /root/postgresql.common.conf >> /etc/postgresql/16/demo/postgresql.conf" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "file" : "/etc/postgresql/16/demo/postgresql.conf", "host" : "pg-standby", "option" : { "archive_command" : { "value" : "'pgbackrest --stanza=demo archive-push %p'" }, "archive_mode" : { "value" : "on" }, "hot_standby" : { "value" : "on" }, "max_wal_senders" : { "value" : "3" }, "wal_level" : { "value" : "replica" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "archive_command = 'pgbackrest --stanza=demo archive-push %p'", "archive_mode = on", "hot_standby = on", "max_wal_senders = 3", "wal_level = replica" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm /var/log/postgresql/postgresql-16-demo.log" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/log/postgresql/postgresql-16-demo.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "entering standby mode|database system is ready to accept read only connections" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 3 lines of output]", "LOG: listening on Unix socket \"/var/run/postgresql/.s.PGSQL.5432\"", "LOG: database system was interrupted; last known up at 2025-05-05 15:38:23 UTC", "LOG: entering standby mode", "LOG: starting backup recovery with redo LSN 0/24000028, checkpoint LSN 0/24000060, on timeline ID 7", "LOG: restored log file \"00000007.history\" from archive", " [filtered 6 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " begin; \\", " create table replicated_table (message text); \\", " insert into replicated_table values ('Important Data'); \\", " commit; \\", " select * from replicated_table\";" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 4 lines of output]", " message ", "----------------", " Important Data", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select * from replicated_table;\"" ], "err-expect" : "1", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "does not exist" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "ERROR: relation \"replicated_table\" does not exist", "LINE 1: select * from replicated_table;", " ^" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select *, current_timestamp from pg_switch_wal()\";" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " pg_switch_wal | current_timestamp ", "---------------+-------------------------------", " 0/2601A838 | 2025-05-05 15:38:50.596767+00", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " select *, current_timestamp from replicated_table\"" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " message | current_timestamp ", "----------------+-------------------------------", " Important Data | 2025-05-05 15:38:56.632927+00", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "because this is a standby" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=586-efbb5ee1 --log-level-console=info --log-level-file=detail --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo1-host=repository --stanza=demo", "P00 INFO: check repo1 (standby)", "P00 INFO: switch wal not performed because this is a standby", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " create user replicator password 'jw8s0F4' replication\";" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "CREATE ROLE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sh -c 'echo \\", " \"host replication replicator 172.17.0.8/32 md5\" \\", " >> /etc/postgresql/16/demo/pg_hba.conf'" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo reload" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-standby", "option" : { "demo" : { "recovery-option" : { "value" : "primary_conninfo=host=172.17.0.6 port=5432 user=replicator" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "recovery-option=primary_conninfo=host=172.17.0.6 port=5432 user=replicator", "", "[global]", "log-level-file=detail", "repo1-host=repository" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sh -c 'echo \\", " \"172.17.0.6:*:replication:replicator:jw8s0F4\" \\", " >> /var/lib/postgresql/.pgpass'" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres chmod 600 /var/lib/postgresql/.pgpass" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta --type=standby restore" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/postgresql/16/demo/postgresql.auto.conf" ], "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "# Do not edit this file manually!", "# It will be overwritten by the ALTER SYSTEM command.", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:36:15", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:36:45", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:37:08", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "# Removed by pgBackRest restore on 2025-05-05 15:37:46 # recovery_target_time = '2025-05-05 15:37:03.157376+00'", "# Removed by pgBackRest restore on 2025-05-05 15:37:46 # recovery_target_action = 'promote'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:37:46", "restore_command = 'pgbackrest --repo=3 --repo-target-time=\"2025-05-05 15:37:33+00\" --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:38:11", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:38:59", "primary_conninfo = 'host=172.17.0.6 port=5432 user=replicator'", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm /var/log/postgresql/postgresql-16-demo.log" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo start" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/log/postgresql/postgresql-16-demo.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "started streaming WAL from primary" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 13 lines of output]", "LOG: consistent recovery state reached at 0/25000088", "LOG: database system is ready to accept read-only connections", "LOG: started streaming WAL from primary at 0/27000000 on timeline 7" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " begin; \\", " create table stream_table (message text); \\", " insert into stream_table values ('Important Data'); \\", " commit; \\", " select *, current_timestamp from stream_table\";" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 4 lines of output]", " message | current_timestamp ", "----------------+-------------------------------", " Important Data | 2025-05-05 15:39:06.086007+00", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " select *, current_timestamp from stream_table\"" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " message | current_timestamp ", "----------------+-------------------------------", " Important Data | 2025-05-05 15:39:06.483319+00", "(1 row)" ] } }, { "key" : { "id" : "pgalt", "image" : "pgbackrest/doc:debian", "name" : "pg-alt", "option" : "-m 512m", "os" : "debian", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.9" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo apt-get install postgresql-client libxml2 libssh2-1" ], "cmd-extra" : "-y 2>&1", "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo scp build:/build/pgbackrest/src/pgbackrest /usr/bin" ], "cmd-extra" : "2>&1", "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 755 /usr/bin/pgbackrest" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 770 /var/log/pgbackrest" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/log/pgbackrest" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest/conf.d" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo touch /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 640 /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres mkdir -m 750 -p /var/lib/postgresql/.ssh" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres ssh-keygen -f /var/lib/postgresql/.ssh/id_rsa \\", " -t rsa -b 4096 -N \"\"" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "(echo -n 'no-agent-forwarding,no-X11-forwarding,no-port-forwarding,' && \\", " echo -n 'command=\"/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }\" ' && \\", " sudo ssh root@pg-alt cat /var/lib/postgresql/.ssh/id_rsa.pub) | \\", " sudo -u pgbackrest tee -a /home/pgbackrest/.ssh/authorized_keys" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "(echo -n 'no-agent-forwarding,no-X11-forwarding,no-port-forwarding,' && \\", " echo -n 'command=\"/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }\" ' && \\", " sudo ssh root@repository cat /home/pgbackrest/.ssh/id_rsa.pub) | \\", " sudo -u postgres tee -a /var/lib/postgresql/.ssh/authorized_keys" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest ssh postgres@pg-alt" ], "cmd-extra" : "-o StrictHostKeyChecking=no", "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres ssh pgbackrest@repository" ], "cmd-extra" : "-o StrictHostKeyChecking=no", "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-alt", "option" : { "demo-alt" : { "pg1-path" : { "value" : "/var/lib/postgresql/16/demo" } }, "global" : { "log-level-file" : { "value" : "detail" }, "log-timestamp" : { "value" : "n" }, "repo1-host" : { "value" : "repository" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo-alt]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "log-level-file=detail", "repo1-host=repository" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "demo-alt" : { "pg1-host" : { "value" : "pg-alt" }, "pg1-path" : { "value" : "/var/lib/postgresql/16/demo" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-path=/var/lib/postgresql/16/demo", "", "[demo-alt]", "pg1-host=pg-alt", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres /usr/lib/postgresql/16/bin/initdb \\", " -D /var/lib/postgresql/16/demo -k -A peer" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_createcluster 16 demo" ], "host" : "pg-alt", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "Configuring already existing cluster (configuration: /etc/postgresql/16/demo, data: /var/lib/postgresql/16/demo, owner: 102:103)", "Ver Cluster Port Status Owner Data directory Log file", "16 demo 5432 down postgres /var/lib/postgresql/16/demo /var/log/postgresql/postgresql-16-demo.log" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "cat /root/postgresql.common.conf >> /etc/postgresql/16/demo/postgresql.conf" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "file" : "/etc/postgresql/16/demo/postgresql.conf", "host" : "pg-alt", "option" : { "archive_command" : { "value" : "'pgbackrest --stanza=demo-alt archive-push %p'" }, "archive_mode" : { "value" : "on" }, "max_wal_senders" : { "value" : "3" }, "wal_level" : { "value" : "replica" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "archive_command = 'pgbackrest --stanza=demo-alt archive-push %p'", "archive_mode = on", "max_wal_senders = 3", "wal_level = replica" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo restart" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo-alt --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-alt", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-create command begin 2.55.1: --exec-id=350-5f466612 --log-level-console=info --log-level-file=detail --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo1-host=repository --stanza=demo-alt", "P00 INFO: stanza-create for stanza 'demo-alt' on repo1", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "check stanza | successfully archived to " ] }, "host" : "pg-alt", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=359-3c8eb968 --log-level-console=info --log-level-file=detail --no-log-timestamp --repo1-host=repository", "P00 INFO: check stanza 'demo-alt'", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 000000010000000000000001 successfully archived to '/var/lib/pgbackrest/archive/demo-alt/16-1/0000000100000000/000000010000000000000001-d7ba3d079e9c0ecc7d33e16d7b45a0a2f58adc95.gz' on repo1", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "check stanza | successfully archived to " ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=1241-54620747 --log-level-console=info --no-log-timestamp --repo1-path=/var/lib/pgbackrest", "P00 INFO: check stanza 'demo'", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 000000070000000000000027 successfully archived to '/var/lib/pgbackrest/archive/demo/16-1/0000000700000000/000000070000000000000027-14a933858863107b129b7f0e18a08d88ef43be6a.gz' on repo1", "P00 INFO: check stanza 'demo-alt'", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 000000010000000000000002 successfully archived to '/var/lib/pgbackrest/archive/demo-alt/16-1/0000000100000000/000000010000000000000002-c47a526be6ff2c84659563d1bb7e1b73eac35985.gz' on repo1", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 750 /var/spool/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/spool/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 750 /var/spool/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/spool/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "archive-async" : { "value" : "y" }, "spool-path" : { "value" : "/var/spool/pgbackrest" } }, "global:archive-get" : { "process-max" : { "value" : "2" } }, "global:archive-push" : { "process-max" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "archive-async=y", "log-level-file=detail", "repo1-host=repository", "spool-path=/var/spool/pgbackrest", "", "[global:archive-get]", "process-max=2", "", "[global:archive-push]", "process-max=2" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-standby", "option" : { "global" : { "archive-async" : { "value" : "y" }, "spool-path" : { "value" : "/var/spool/pgbackrest" } }, "global:archive-get" : { "process-max" : { "value" : "2" } }, "global:archive-push" : { "process-max" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/16/demo", "recovery-option=primary_conninfo=host=172.17.0.6 port=5432 user=replicator", "", "[global]", "archive-async=y", "log-level-file=detail", "repo1-host=repository", "spool-path=/var/spool/pgbackrest", "", "[global:archive-get]", "process-max=2", "", "[global:archive-push]", "process-max=2" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"alter user replicator password 'bogus'\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "ALTER ROLE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo restart" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres rm -f /var/log/pgbackrest/demo-archive-push-async.log" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " select pg_create_restore_point('test async push'); select pg_switch_wal(); \\", " select pg_create_restore_point('test async push'); select pg_switch_wal(); \\", " select pg_create_restore_point('test async push'); select pg_switch_wal(); \\", " select pg_create_restore_point('test async push'); select pg_switch_wal(); \\", " select pg_create_restore_point('test async push'); select pg_switch_wal();\"" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "WAL segment" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=2341-bdea8c52 --log-level-console=info --log-level-file=detail --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --repo1-host=repository --stanza=demo", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 00000007000000000000002D successfully archived to '/var/lib/pgbackrest/archive/demo/16-1/0000000700000000/00000007000000000000002D-7d798d0ab5870d10a755a412464de99fb662b910.gz' on repo1", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/log/pgbackrest/demo-archive-push-async.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ " WAL file\\(s\\) to archive|pushed WAL file \\'0000000" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "-------------------PROCESS START-------------------", "P00 INFO: archive-push:async command begin 2.55.1: [/var/lib/postgresql/16/demo/pg_wal] --archive-async --exec-id=2327-27143da5 --log-level-console=off --log-level-file=detail --log-level-stderr=off --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --process-max=2 --repo1-host=repository --spool-path=/var/spool/pgbackrest --stanza=demo", "P00 INFO: push 1 WAL file(s) to archive: 000000070000000000000028", "P01 DETAIL: pushed WAL file '000000070000000000000028' to the archive", "P00 INFO: archive-push:async command end: completed successfully", "", "-------------------PROCESS START-------------------", "P00 INFO: archive-push:async command begin 2.55.1: [/var/lib/postgresql/16/demo/pg_wal] --archive-async --exec-id=2345-a01809d1 --log-level-console=off --log-level-file=detail --log-level-stderr=off --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --process-max=2 --repo1-host=repository --spool-path=/var/spool/pgbackrest --stanza=demo", "P00 INFO: push 5 WAL file(s) to archive: 000000070000000000000029...00000007000000000000002D", "P02 DETAIL: pushed WAL file '00000007000000000000002A' to the archive", "P01 DETAIL: pushed WAL file '000000070000000000000029' to the archive", "P02 DETAIL: pushed WAL file '00000007000000000000002B' to the archive", "P01 DETAIL: pushed WAL file '00000007000000000000002C' to the archive", "P02 DETAIL: pushed WAL file '00000007000000000000002D' to the archive", "P00 INFO: archive-push:async command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 5" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/log/pgbackrest/demo-archive-get-async.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "found [0-F]{24} in the .* archive" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "-------------------PROCESS START-------------------", "P00 INFO: archive-get:async command begin 2.55.1: [000000070000000000000024, 000000070000000000000025, 000000070000000000000026, 000000070000000000000027, 000000070000000000000028, 000000070000000000000029, 00000007000000000000002A, 00000007000000000000002B] --archive-async --exec-id=790-af06cf41 --log-level-console=off --log-level-file=detail --log-level-stderr=off --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --process-max=2 --repo1-host=repository --spool-path=/var/spool/pgbackrest --stanza=demo", "P00 INFO: get 8 WAL file(s) from archive: 000000070000000000000024...00000007000000000000002B", "P02 DETAIL: found 000000070000000000000025 in the repo1: 16-1 archive", "P01 DETAIL: found 000000070000000000000024 in the repo1: 16-1 archive", "P02 DETAIL: found 000000070000000000000026 in the repo1: 16-1 archive", "P01 DETAIL: found 000000070000000000000027 in the repo1: 16-1 archive", "P00 DETAIL: unable to find 000000070000000000000028 in the archive", "P00 INFO: archive-get:async command end: completed successfully", " [filtered 14 lines of output]", "P00 INFO: archive-get:async command begin 2.55.1: [000000070000000000000028, 000000070000000000000029, 00000007000000000000002A, 00000007000000000000002B, 00000007000000000000002C, 00000007000000000000002D, 00000007000000000000002E, 00000007000000000000002F] --archive-async --exec-id=833-0b7bea8c --log-level-console=off --log-level-file=detail --log-level-stderr=off --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --process-max=2 --repo1-host=repository --spool-path=/var/spool/pgbackrest --stanza=demo", "P00 INFO: get 8 WAL file(s) from archive: 000000070000000000000028...00000007000000000000002F", "P01 DETAIL: found 000000070000000000000028 in the repo1: 16-1 archive", "P02 DETAIL: found 000000070000000000000029 in the repo1: 16-1 archive", "P01 DETAIL: found 00000007000000000000002A in the repo1: 16-1 archive", "P00 DETAIL: unable to find 00000007000000000000002B in the archive", "P00 INFO: archive-get:async command end: completed successfully", " [filtered 2 lines of output]", "P00 INFO: archive-get:async command begin 2.55.1: [00000007000000000000002B, 00000007000000000000002C, 00000007000000000000002D, 00000007000000000000002E, 00000007000000000000002F, 000000070000000000000030, 000000070000000000000031, 000000070000000000000032] --archive-async --exec-id=846-ea70e4e6 --log-level-console=off --log-level-file=detail --log-level-stderr=off --no-log-timestamp --pg1-path=/var/lib/postgresql/16/demo --process-max=2 --repo1-host=repository --spool-path=/var/spool/pgbackrest --stanza=demo", "P00 INFO: get 8 WAL file(s) from archive: 00000007000000000000002B...000000070000000000000032", "P02 DETAIL: found 00000007000000000000002C in the repo1: 16-1 archive", "P01 DETAIL: found 00000007000000000000002B in the repo1: 16-1 archive", "P02 DETAIL: found 00000007000000000000002D in the repo1: 16-1 archive", "P00 DETAIL: unable to find 00000007000000000000002E in the archive", "P00 INFO: archive-get:async command end: completed successfully", " [filtered 17 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"alter user replicator password 'jw8s0F4'\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "ALTER ROLE" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "demo" : { "pg2-host" : { "value" : "pg-standby" }, "pg2-path" : { "value" : "/var/lib/postgresql/16/demo" } }, "global" : { "backup-standby" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-path=/var/lib/postgresql/16/demo", "pg2-host=pg-standby", "pg2-path=/var/lib/postgresql/16/demo", "", "[demo-alt]", "pg1-host=pg-alt", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "backup-standby=y", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo --log-level-console=detail backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "backup file pg-primary|replay on the standby" ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 2 lines of output]", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 00000007000000000000002F, lsn = 0/2F000028", "P00 INFO: wait for replay on the standby to reach 0/2F000028", "P00 INFO: replay on the standby reached 0/2F000028", "P00 INFO: check archive for prior segment 00000007000000000000002E", "P01 DETAIL: backup file pg-primary:/var/lib/postgresql/16/demo/global/pg_control (8KB, 0.53%) checksum 20a516191e9d0ce661163067b70f348cd0c478e7", "P01 DETAIL: match file from prior backup pg-primary:/var/lib/postgresql/16/demo/pg_logical/replorigin_checkpoint (8B, 0.53%) checksum 347fc8f2df71bd4436e38bd1516ccd7ea0d46532", "P02 DETAIL: backup file pg-standby:/var/lib/postgresql/16/demo/base/5/1249 (464KB, 31.38%) checksum ad21eff1041d8672b2fbccf2b8e4af4c4d4cf7b2", " [filtered 1278 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 16 demo stop" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres /usr/lib/postgresql/17/bin/initdb \\", " -D /var/lib/postgresql/17/demo -k -A peer" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_createcluster 17 demo" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sh -c 'cd /var/lib/postgresql && \\", " /usr/lib/postgresql/17/bin/pg_upgrade \\", " --old-bindir=/usr/lib/postgresql/16/bin \\", " --new-bindir=/usr/lib/postgresql/17/bin \\", " --old-datadir=/var/lib/postgresql/16/demo \\", " --new-datadir=/var/lib/postgresql/17/demo \\", " --old-options=\" -c config_file=/etc/postgresql/16/demo/postgresql.conf\" \\", " --new-options=\" -c config_file=/etc/postgresql/17/demo/postgresql.conf\"'" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Upgrade Complete" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 40 lines of output]", "Checking for extension updates ok", "", "Upgrade Complete", "----------------", "Optimizer statistics are not transferred by pg_upgrade.", " [filtered 3 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "cat /root/postgresql.common.conf >> /etc/postgresql/17/demo/postgresql.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "file" : "/etc/postgresql/17/demo/postgresql.conf", "host" : "pg-primary", "option" : { "archive_command" : { "value" : "'pgbackrest --stanza=demo archive-push %p'" }, "archive_mode" : { "value" : "on" }, "max_wal_senders" : { "value" : "3" }, "wal_level" : { "value" : "replica" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "archive_command = 'pgbackrest --stanza=demo archive-push %p'", "archive_mode = on", "max_wal_senders = 3", "wal_level = replica" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/postgresql/17/demo" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/17/demo", "", "[global]", "archive-async=y", "log-level-file=detail", "repo1-host=repository", "spool-path=/var/spool/pgbackrest", "", "[global:archive-get]", "process-max=2", "", "[global:archive-push]", "process-max=2" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-standby", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/postgresql/17/demo" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/postgresql/17/demo", "recovery-option=primary_conninfo=host=172.17.0.6 port=5432 user=replicator", "", "[global]", "archive-async=y", "log-level-file=detail", "repo1-host=repository", "spool-path=/var/spool/pgbackrest", "", "[global:archive-get]", "process-max=2", "", "[global:archive-push]", "process-max=2" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/postgresql/17/demo" }, "pg2-path" : { "value" : "/var/lib/postgresql/17/demo" } }, "global" : { "backup-standby" : { "value" : "n" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-path=/var/lib/postgresql/17/demo", "pg2-host=pg-standby", "pg2-path=/var/lib/postgresql/17/demo", "", "[demo-alt]", "pg1-host=pg-alt", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "backup-standby=n", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo cp /etc/postgresql/16/demo/pg_hba.conf \\", " /etc/postgresql/17/demo/pg_hba.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --no-online \\", " --log-level-console=info stanza-upgrade" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-upgrade command begin 2.55.1: --exec-id=2734-020283ec --log-level-console=info --log-level-file=detail --no-log-timestamp --no-online --pg1-path=/var/lib/postgresql/17/demo --repo1-host=repository --stanza=demo", "P00 INFO: stanza-upgrade for stanza 'demo' on repo1", "P00 INFO: stanza-upgrade command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 17 demo start" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_lsclusters" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo check" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_dropcluster 16 demo" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_dropcluster 16 demo" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_createcluster 17 demo" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo check" ], "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: unable to check pg2: [DbConnectError] raised from remote-0 ssh protocol on 'pg-standby': unable to connect to 'dbname='postgres' port=5432': connection to server on socket \"/var/run/postgresql/.s.PGSQL.5432\" failed: No such file or directory", " \tIs the server running locally and accepting connections on that socket?" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo --type=full backup" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta --type=standby restore" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/postgresql/17/demo/postgresql.conf", "host" : "pg-standby", "option" : { "hot_standby" : { "value" : "on" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "hot_standby = on" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo pg_ctlcluster 17 demo start" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo check" ], "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "global" : { "backup-standby" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-path=/var/lib/postgresql/17/demo", "pg2-host=pg-standby", "pg2-path=/var/lib/postgresql/17/demo", "", "[demo-alt]", "pg1-host=pg-alt", "pg1-path=/var/lib/postgresql/16/demo", "", "[global]", "backup-standby=y", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y" ] } } ] } }, "{\"os-type\":\"rhel\"}" : { "all" : { "user-guide" : [ { "key" : { "id" : "azure", "image" : "mcr.microsoft.com/azure-storage/azurite", "name" : "azure-server", "option" : "-v {[host-repo-path]}/doc/resource/fake-cert/azure-server.crt:/root/public.crt:ro -v {[host-repo-path]}/doc/resource/fake-cert/azure-server.key:/root/private.key:ro -e AZURITE_ACCOUNTS='pgbackrest:YXpLZXk='", "os" : "rhel", "param" : "azurite-blob --blobPort 443 --blobHost 0.0.0.0 --cert=/root/public.crt --key=/root/private.key", "update-hosts" : false }, "type" : "host", "value" : { "ip" : "172.17.0.2" } }, { "key" : { "id" : "s3", "image" : "minio/minio", "name" : "s3-server", "option" : "-v {[host-repo-path]}/doc/resource/fake-cert/s3-server.crt:/root/.minio/certs/public.crt:ro -v {[host-repo-path]}/doc/resource/fake-cert/s3-server.key:/root/.minio/certs/private.key:ro -e MINIO_REGION=us-east-1 -e MINIO_DOMAIN=s3.us-east-1.amazonaws.com -e MINIO_BROWSER=off -e MINIO_ACCESS_KEY=accessKey1 -e MINIO_SECRET_KEY=verySecretKey1", "os" : "rhel", "param" : "server /data --address :443", "update-hosts" : false }, "type" : "host", "value" : { "ip" : "172.17.0.3" } }, { "key" : { "id" : "sftp", "image" : "pgbackrest/doc:rhel", "name" : "sftp-server", "os" : "rhel", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.4" } }, { "key" : { "id" : "build", "image" : "pgbackrest/doc:rhel", "name" : "build", "os" : "rhel", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.5" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /build/pgbackrest-release-2.55.1" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo cp -r /pgbackrest/* /build/pgbackrest-release-2.55.1" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown -R vagrant /build" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo yum install meson gcc postgresql13-devel openssl-devel \\", " libxml2-devel lz4-devel libzstd-devel bzip2-devel libyaml-devel libssh2-devel" ], "cmd-extra" : "-y 2>&1", "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "meson setup /build/pgbackrest /build/pgbackrest-release-2.55.1" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "ninja -C /build/pgbackrest" ], "host" : "build", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "id" : "pg1", "image" : "pgbackrest/doc:rhel", "name" : "pg-primary", "option" : "-m 512m", "os" : "rhel", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.6" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo yum install postgresql-libs libssh2" ], "cmd-extra" : "-y 2>&1", "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo scp build:/build/pgbackrest/src/pgbackrest /usr/bin" ], "cmd-extra" : "2>&1", "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 755 /usr/bin/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 770 /var/log/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/log/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest/conf.d" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo touch /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 640 /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "pgBackRest 2.55.1 - General help", "", "Usage:", " pgbackrest [options] [command]", "", "Commands:", " annotate add or modify backup annotation", " archive-get get a WAL segment from the archive", " archive-push push a WAL segment to the archive", " backup backup a database cluster", " check check the configuration", " expire expire backups that exceed retention", " help get help", " info retrieve information about backups", " repo-get get a file from a repository", " repo-ls list files in a repository", " restore restore a database cluster", " server pgBackRest server", " server-ping ping pgBackRest server", " stanza-create create the required stanza data", " stanza-delete delete a stanza", " stanza-upgrade upgrade a stanza", " start allow pgBackRest processes to run", " stop stop pgBackRest processes from running", " verify verify contents of a repository", " version get version", "", "Use 'pgbackrest help [command]' for more information." ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres /usr/pgsql-13/bin/initdb \\", " -D /var/lib/pgsql/13/data -k -A peer" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "cat /root/postgresql.common.conf >> /var/lib/pgsql/13/data/postgresql.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "file" : "/var/lib/pgsql/13/data/postgresql.conf", "host" : "pg-primary", "option" : { "log_filename" : { "value" : "'postgresql.log'" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "log_filename = 'postgresql.log'" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/pgsql/13/data" } }, "global" : { "log-timestamp" : { "value" : "n" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres bash -c ' \\", " export PGBACKREST_LOG_PATH=/path/set/by/env && \\", " pgbackrest --log-level-console=error help backup log-path'" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "current\\: \\/path\\/set\\/by\\/env" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "pgBackRest 2.55.1 - 'backup' command - 'log-path' option help", "", "Path where log files are stored.", "", "The log path provides a location for pgBackRest to store log files. Note that", "if log-level-file=off then no log path is required.", "", "current: /path/set/by/env", "default: /var/log/pgbackrest" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /var/lib/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 750 /var/lib/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/lib/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-path" : { "value" : "/var/lib/pgbackrest" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-path=/var/lib/pgbackrest" ] } }, { "key" : { "file" : "/var/lib/pgsql/13/data/postgresql.conf", "host" : "pg-primary", "option" : { "archive_command" : { "value" : "'pgbackrest --stanza=demo archive-push %p'" }, "archive_mode" : { "value" : "on" }, "max_wal_senders" : { "value" : "3" }, "wal_level" : { "value" : "replica" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "archive_command = 'pgbackrest --stanza=demo archive-push %p'", "archive_mode = on", "log_filename = 'postgresql.log'", "max_wal_senders = 3", "wal_level = replica" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl restart postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " create or replace function create_test_table(prefix int, scale int, data bool) returns void as \\$\\$ \\", " declare \\", " index int; \\", " begin \\", " for index in 1 .. scale loop \\", " execute 'create table test_' || prefix || '_' || index || ' (id int)'; \\", " \\", " if data then \\", " execute 'insert into test_' || prefix || '_' || index || ' values (' || (prefix * index) || ')'; \\", " end if; \\", " end loop; \\", " end \\$\\$ LANGUAGE plpgsql;\"" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global:archive-push" : { "compress-level" : { "value" : "3" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-path=/var/lib/pgbackrest", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-retention-full" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-cipher-pass" : { "value" : "zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO" }, "repo1-cipher-type" : { "value" : "aes-256-cbc" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-create command begin 2.55.1: --exec-id=1144-a6cb75c7 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --stanza=demo", "P00 INFO: stanza-create for stanza 'demo' on repo1", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ " successfully archived to " ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=1171-efe8fd39 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --stanza=demo", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 000000010000000000000001 successfully archived to '/var/lib/pgbackrest/archive/demo/13-1/0000000100000000/000000010000000000000001-ccfaed8bf95406170361480900c17de474a073fc.gz' on repo1", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "start-fast" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "no prior backup exists|full backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: backup command begin 2.55.1: --exec-id=1241-ab2876e1 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-full=2 --stanza=demo --start-fast", "P00 WARN: no prior backup exists, incr backup has been changed to full", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 000000010000000000000002, lsn = 0/2000028", " [filtered 3 lines of output]", "P00 INFO: check archive for segment(s) 000000010000000000000002:000000010000000000000003", "P00 INFO: new backup label = 20250505-152734F", "P00 INFO: full backup size = 23.2MB, file total = 936", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=1241-ab2876e1 --log-level-console=info --no-log-timestamp --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-full=2 --stanza=demo" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-152734F" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=diff \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "diff backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 7 lines of output]", "P00 INFO: check archive for segment(s) 000000010000000000000004:000000010000000000000005", "P00 INFO: new backup label = 20250505-152734F_20250505-152737D", "P00 INFO: diff backup size = 9.1KB, file total = 936", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=1301-ccbde6df --log-level-console=info --no-log-timestamp --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-full=2 --stanza=demo" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest info" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "(full|incr|diff) backup" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: ok", " cipher: aes-256-cbc", "", " db (current)", " wal archive min/max (13): 000000010000000000000001/000000010000000000000005", "", " full backup: 20250505-152734F", " timestamp start/stop: 2025-05-05 15:27:34+00 / 2025-05-05 15:27:36+00", " wal start/stop: 000000010000000000000002 / 000000010000000000000003", " database size: 23.2MB, database backup size: 23.2MB", " repo1: backup set size: 2.9MB, backup size: 2.9MB", "", " diff backup: 20250505-152734F_20250505-152737D", " timestamp start/stop: 2025-05-05 15:27:37+00 / 2025-05-05 15:27:38+00", " wal start/stop: 000000010000000000000004 / 000000010000000000000005", " database size: 23.2MB, database backup size: 9.1KB", " repo1: backup set size: 2.9MB, backup size: 864B", " backup reference total: 1 full" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres rm /var/lib/pgsql/13/data/global/pg_control" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "err-expect" : "1", "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl status postgresql-13.service" ], "err-expect" : "3", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "failed" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "postgresql-13.service - PostgreSQL 13 database server", " Loaded: loaded (/usr/lib/systemd/system/postgresql-13.service, disabled)", " Active: failed (failed)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres find /var/lib/pgsql/13/data -mindepth 1 -delete" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres mkdir -p /var/lib/pgsql/pgbackrest/doc/example" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cp -r /pgbackrest/doc/example/* \\", " /var/lib/pgsql/pgbackrest/doc/example" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat \\", " /var/lib/pgsql/pgbackrest/doc/example/pgsql-pgbackrest-info.sql" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "-- An example of monitoring pgBackRest from within PostgreSQL", "--", "-- Use copy to export data from the pgBackRest info command into the jsonb", "-- type so it can be queried directly by PostgreSQL.", "", "-- Create monitor schema", "create schema monitor;", "", "-- Get pgBackRest info in JSON format", "create function monitor.pgbackrest_info()", " returns jsonb AS $$", "declare", " data jsonb;", "begin", " -- Create a temp table to hold the JSON data", " create temp table temp_pgbackrest_data (data text);", "", " -- Copy data into the table directly from the pgBackRest info command", " copy temp_pgbackrest_data (data)", " from program", " 'pgbackrest --output=json info' (format text);", "", " select replace(temp_pgbackrest_data.data, E'\\n', '\\n')::jsonb", " into data", " from temp_pgbackrest_data;", "", " drop table temp_pgbackrest_data;", "", " return data;", "end $$ language plpgsql;" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -f \\", " /var/lib/pgsql/pgbackrest/doc/example/pgsql-pgbackrest-info.sql" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat \\", " /var/lib/pgsql/pgbackrest/doc/example/pgsql-pgbackrest-query.sql" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "-- Get last successful backup for each stanza", "--", "-- Requires the monitor.pgbackrest_info function.", "with stanza as", "(", " select data->'name' as name,", " data->'backup'->(", " jsonb_array_length(data->'backup') - 1) as last_backup,", " data->'archive'->(", " jsonb_array_length(data->'archive') - 1) as current_archive", " from jsonb_array_elements(monitor.pgbackrest_info()) as data", ")", "select name,", " to_timestamp(", " (last_backup->'timestamp'->>'stop')::numeric) as last_successful_backup,", " current_archive->>'max' as last_archived_wal", " from stanza;" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -f \\", " /var/lib/pgsql/pgbackrest/doc/example/pgsql-pgbackrest-query.sql" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " name | last_successful_backup | last_archived_wal ", "--------+------------------------+--------------------------", " \"demo\" | 2025-05-05 15:27:38+00 | 000000010000000000000005", "(1 row)" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-bundle" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=full backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres find /var/lib/pgbackrest/backup/demo/latest/ -type f | wc -l" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "5" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-block" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --annotation=source=\"demo backup\" \\", " --annotation=key=value --type=full backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-152752F" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --set=20250505-152752F info" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "annotation" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: ok", " cipher: aes-256-cbc", "", " db (current)", " wal archive min/max (13): 000000020000000000000007/000000020000000000000009", "", " full backup: 20250505-152752F", " timestamp start/stop: 2025-05-05 15:27:52+00 / 2025-05-05 15:27:53+00", " wal start/stop: 000000020000000000000008 / 000000020000000000000009", " lsn start/stop: 0/8000028 / 0/9000050", " database size: 23.2MB, database backup size: 23.2MB", " repo1: backup size: 2.9MB", " database list: postgres (13383)", " annotation(s)", " key: value", " source: demo backup" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --set=20250505-152752F \\", " --annotation=key= --annotation=new_key=new_value annotate" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --set=20250505-152752F info" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "annotation" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: ok", " cipher: aes-256-cbc", "", " db (current)", " wal archive min/max (13): 000000020000000000000007/000000020000000000000009", "", " full backup: 20250505-152752F", " timestamp start/stop: 2025-05-05 15:27:52+00 / 2025-05-05 15:27:53+00", " wal start/stop: 000000020000000000000008 / 000000020000000000000009", " lsn start/stop: 0/8000028 / 0/9000050", " database size: 23.2MB, database backup size: 23.2MB", " repo1: backup size: 2.9MB", " database list: postgres (13383)", " annotation(s)", " new_key: new_value", " source: demo backup" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-retention-full" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=full \\", " --log-level-console=detail backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "archive retention on backup 20250505-152734F|remove archive" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 948 lines of output]", "P00 INFO: repo1: remove expired backup 20250505-152750F", "P00 DETAIL: repo1: 13-1 archive retention on backup 20250505-152752F, start = 000000020000000000000008", "P00 INFO: repo1: 13-1 remove archive, start = 000000020000000000000007, stop = 000000020000000000000007", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-152755F" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=full \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "expire full backup set 20250505-152734F|archive retention on backup 20250505-152755F|remove archive" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 11 lines of output]", "P00 INFO: repo1: expire full backup 20250505-152752F", "P00 INFO: repo1: remove expired backup 20250505-152752F", "P00 INFO: repo1: 13-1 remove archive, start = 000000020000000000000008, stop = 000000020000000000000009", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-retention-diff" : { "value" : "1" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=1", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=diff backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-152757F_20250505-152759D" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=incr backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=diff \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "expire diff backup set 20250505-152757F_20250505-152759D" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 10 lines of output]", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=2302-3a19f28b --log-level-console=info --no-log-timestamp --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-diff=1 --repo1-retention-full=2 --stanza=demo", "P00 INFO: repo1: expire diff backup set 20250505-152757F_20250505-152759D, 20250505-152757F_20250505-152800I", "P00 INFO: repo1: remove expired backup 20250505-152757F_20250505-152800I", "P00 INFO: repo1: remove expired backup 20250505-152757F_20250505-152759D", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo1-retention-diff" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-152757F_20250505-152801D" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " select pg_create_restore_point('generate WAL'); select pg_switch_wal(); \\", " select pg_create_restore_point('generate WAL'); select pg_switch_wal();\"" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=diff \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "new backup label" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 6 lines of output]", "P00 INFO: backup stop archive = 000000020000000000000017, lsn = 0/17000050", "P00 INFO: check archive for segment(s) 000000020000000000000016:000000020000000000000017", "P00 INFO: new backup label = 20250505-152757F_20250505-152803D", "P00 INFO: diff backup size = 11.6KB, file total = 936", "P00 INFO: backup command end: completed successfully", " [filtered 2 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-152757F_20250505-152803D" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=detail \\", " --repo1-retention-archive-type=diff --repo1-retention-archive=1 expire" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "archive retention on backup 20250505-152757F_20250505-152801D|remove archive" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: expire command begin 2.55.1: --exec-id=2492-06555233 --log-level-console=detail --no-log-timestamp --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo1-retention-archive=1 --repo1-retention-archive-type=diff --repo1-retention-diff=2 --repo1-retention-full=2 --stanza=demo", "P00 DETAIL: repo1: 13-1 archive retention on backup 20250505-152755F, start = 00000002000000000000000A, stop = 00000002000000000000000B", "P00 DETAIL: repo1: 13-1 archive retention on backup 20250505-152757F, start = 00000002000000000000000C, stop = 00000002000000000000000D", "P00 DETAIL: repo1: 13-1 archive retention on backup 20250505-152757F_20250505-152801D, start = 000000020000000000000012, stop = 000000020000000000000013", "P00 DETAIL: repo1: 13-1 archive retention on backup 20250505-152757F_20250505-152803D, start = 000000020000000000000016", "P00 INFO: repo1: 13-1 remove archive, start = 00000002000000000000000E, stop = 000000020000000000000011", "P00 INFO: repo1: 13-1 remove archive, start = 000000020000000000000014, stop = 000000020000000000000015", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta \\", " --log-level-console=detail restore" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "demo\\/PG_VERSION - exists and matches backup|remove invalid files|rename global\\/pg_control" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 2 lines of output]", "P00 DETAIL: check '/var/lib/pgsql/13/data' exists", "P00 DETAIL: remove 'global/pg_control' so cluster will not start if restore does not complete", "P00 INFO: remove invalid files/links/paths from '/var/lib/pgsql/13/data'", "P00 DETAIL: remove invalid file '/var/lib/pgsql/13/data/backup_label.old'", "P00 DETAIL: remove invalid file '/var/lib/pgsql/13/data/base/13383/pg_internal.init'", " [filtered 981 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"create database test1;\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "CREATE DATABASE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"create database test2;\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "CREATE DATABASE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"create table test1_table (id int); \\", " insert into test1_table (id) values (1);\" test1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "INSERT 0 1" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"create table test2_table (id int); \\", " insert into test2_table (id) values (2);\" test2" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "INSERT 0 1" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=incr backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -Atc \"select oid from pg_database where datname = 'test1'\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "32768" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres du -sh /var/lib/pgsql/13/data/base/32768" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "7.8M\t/var/lib/pgsql/13/data/base/32768" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-152757F_20250505-152813I" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo \\", " --set=20250505-152757F_20250505-152813I info" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "database list" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 12 lines of output]", " repo1: backup size: 1.9MB", " backup reference list: 20250505-152757F, 20250505-152757F_20250505-152803D", " database list: postgres (13383), test1 (32768), test2 (32769)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta \\", " --db-include=test2 --type=immediate --target-action=promote restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select * from test2_table;\" test2" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " id ", "----", " 2", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select * from test1_table;\" test1" ], "err-expect" : "2", "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "relation mapping file.*contains invalid data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "psql: error: connection to server on socket \"/run/postgresql/.s.PGSQL.5432\" failed: FATAL: relation mapping file \"base/32768/pg_filenode.map\" contains invalid data" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres du -sh /var/lib/pgsql/13/data/base/32768" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "8.0K\t/var/lib/pgsql/13/data/base/32768" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"drop database test1;\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "DROP DATABASE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select oid, datname from pg_database order by oid;\"" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "test2" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " oid | datname ", "-------+-----------", " 1 | template1", " 13382 | template0", " 13383 | postgres", " 32769 | test2", "(4 rows)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"begin; \\", " create table important_table (message text); \\", " insert into important_table values ('Important Data'); \\", " commit; \\", " select * from important_table;\"" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " message ", "----------------", " Important Data", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 1" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -Atc \"select current_timestamp\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "2025-05-05 15:28:25.620667+00" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 1" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"begin; \\", " drop table important_table; \\", " commit; \\", " select * from important_table;\"" ], "err-expect" : "1", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "does not exist" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "ERROR: relation \"important_table\" does not exist", "LINE 1: ...le important_table; commit; select * from important_...", " ^" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=incr backup" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest repo-ls backup/demo --filter=\"(F|D|I)$\" --sort=desc | head -1" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "20250505-152757F_20250505-152827I" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest info" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "20250505-152757F_20250505-152827I" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 38 lines of output]", " backup reference total: 1 full, 1 diff", "", " incr backup: 20250505-152757F_20250505-152827I", " timestamp start/stop: 2025-05-05 15:28:27+00 / 2025-05-05 15:28:28+00", " wal start/stop: 00000004000000000000001A / 00000004000000000000001A", " [filtered 2 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta \\", " --set=20250505-152757F_20250505-152827I --target-timeline=current \\", " --type=time \"--target=2025-05-05 15:28:25.620667+00\" --target-action=promote restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm /var/lib/pgsql/13/data/log/postgresql.log" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "err-expect" : "1", "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/pgsql/13/data/log/postgresql.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "recovery ended before configured recovery target was reached" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 11 lines of output]", "LOG: database system is ready to accept read only connections", "LOG: redo done at 0/1A000100", "FATAL: recovery ended before configured recovery target was reached", "LOG: startup process (PID 3455) exited with exit code 1", "LOG: terminating any other active server processes" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta \\", " --type=time \"--target=2025-05-05 15:28:25.620667+00\" \\", " --target-action=promote restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm /var/lib/pgsql/13/data/log/postgresql.log" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/pgsql/13/data/postgresql.auto.conf" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "recovery_target_time" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 9 lines of output]", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:28:33", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "recovery_target_time = '2025-05-05 15:28:25.620667+00'", "recovery_target_action = 'promote'" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select * from important_table\"" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " message ", "----------------", " Important Data", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/pgsql/13/data/log/postgresql.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "recovery stopping before|last completed transaction|starting point-in-time recovery" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 5 lines of output]", "LOG: database system was interrupted; last known up at 2025-05-05 15:28:13 UTC", "LOG: restored log file \"00000004.history\" from archive", "LOG: starting point-in-time recovery to 2025-05-05 15:28:25.620667+00", "LOG: restored log file \"00000004.history\" from archive", "LOG: restored log file \"000000040000000000000019\" from archive", " [filtered 2 lines of output]", "LOG: consistent recovery state reached at 0/19000100", "LOG: database system is ready to accept read only connections", "LOG: recovery stopping before commit of transaction 495, time 2025-05-05 15:28:26.982491+00", "LOG: redo done at 0/1901E348", "LOG: last completed transaction was at log time 2025-05-05 15:28:24.226566+00", "LOG: selected new timeline ID: 5", "LOG: archive recovery complete", "LOG: database system is ready to accept connections" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stop" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stop command begin 2.55.1: --exec-id=3747-c882f699 --log-level-console=info --no-log-timestamp --stanza=demo", "P00 INFO: stop command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=1 \\", " --log-level-console=info stanza-delete" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-delete command begin 2.55.1: --exec-id=3773-0ed7e517 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo=1 --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --stanza=demo", "P00 INFO: stanza-delete command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo2-azure-account" : { "value" : "pgbackrest" }, "repo2-azure-container" : { "value" : "demo-container" }, "repo2-azure-key" : { "value" : "YXpLZXk=" }, "repo2-path" : { "value" : "/demo-repo" }, "repo2-retention-full" : { "value" : "4" }, "repo2-type" : { "value" : "azure" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "repo2-azure-account=pgbackrest", "repo2-azure-container=demo-container", "repo2-azure-key=YXpLZXk=", "repo2-path=/demo-repo", "repo2-retention-full=4", "repo2-type=azure", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "echo \"172.17.0.2 pgbackrest.blob.core.windows.net\" | tee -a /etc/hosts" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres bash -c 'export AZURE_CLI_DISABLE_CONNECTION_VERIFICATION=1;az storage container create -n demo-container \\", " --connection-string \"DefaultEndpointsProtocol=https;AccountName=pgbackrest;AccountKey=YXpLZXk=\" \\", " 2>&1'" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-create command begin 2.55.1: --exec-id=3939-d3c96342 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo2-type=azure --stanza=demo", "P00 INFO: stanza-create for stanza 'demo' on repo1", "P00 INFO: stanza-create for stanza 'demo' on repo2", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=2 \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "no prior backup exists|full backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: backup command begin 2.55.1: --exec-id=3966-a598a23b --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo=2 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-block --repo1-bundle --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo2-type=azure --stanza=demo --start-fast", "P00 WARN: no prior backup exists, incr backup has been changed to full", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 00000005000000000000001B, lsn = 0/1B000028", " [filtered 3 lines of output]", "P00 INFO: check archive for segment(s) 00000005000000000000001B:00000005000000000000001B", "P00 INFO: new backup label = 20250505-152851F", "P00 INFO: full backup size = 30.8MB, file total = 1229", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=3966-a598a23b --log-level-console=info --no-log-timestamp --repo=2 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo2-type=azure --stanza=demo" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo3-path" : { "value" : "/demo-repo" }, "repo3-retention-full" : { "value" : "4" }, "repo3-s3-bucket" : { "value" : "demo-bucket" }, "repo3-s3-endpoint" : { "value" : "s3.us-east-1.amazonaws.com" }, "repo3-s3-key" : { "value" : "accessKey1" }, "repo3-s3-key-secret" : { "value" : "verySecretKey1" }, "repo3-s3-region" : { "value" : "us-east-1" }, "repo3-type" : { "value" : "s3" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "repo2-azure-account=pgbackrest", "repo2-azure-container=demo-container", "repo2-azure-key=YXpLZXk=", "repo2-path=/demo-repo", "repo2-retention-full=4", "repo2-type=azure", "repo3-path=/demo-repo", "repo3-retention-full=4", "repo3-s3-bucket=demo-bucket", "repo3-s3-endpoint=s3.us-east-1.amazonaws.com", "repo3-s3-key=accessKey1", "repo3-s3-key-secret=verySecretKey1", "repo3-s3-region=us-east-1", "repo3-type=s3", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "echo \"172.17.0.3 demo-bucket.s3.us-east-1.amazonaws.com s3.us-east-1.amazonaws.com\" | tee -a /etc/hosts" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "mc --insecure alias set s3 https://127.0.0.1 accessKey1 verySecretKey1" ], "host" : "s3-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "mc --insecure mb --with-versioning s3/demo-bucket" ], "host" : "s3-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 4 lines of output]", "P00 INFO: stanza 'demo' already exists on repo2 and is valid", "P00 INFO: stanza-create for stanza 'demo' on repo3", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "no prior backup exists|full backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: backup command begin 2.55.1: --exec-id=4089-773fbc3c --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo=3 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-block --repo1-bundle --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo3-retention-full=4 --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo2-type=azure --repo3-type=s3 --stanza=demo --start-fast", "P00 WARN: no prior backup exists, incr backup has been changed to full", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 00000005000000000000001C, lsn = 0/1C000028", " [filtered 3 lines of output]", "P00 INFO: check archive for segment(s) 00000005000000000000001C:00000005000000000000001D", "P00 INFO: new backup label = 20250505-152901F", "P00 INFO: full backup size = 30.8MB, file total = 1229", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=4089-773fbc3c --log-level-console=info --no-log-timestamp --repo=3 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo3-retention-full=4 --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo2-type=azure --repo3-type=s3 --stanza=demo" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -Atc \"select date_trunc('seconds', current_timestamp)\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "2025-05-05 15:29:07+00" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "process-max" : { "value" : "4" }, "repo4-bundle" : { "value" : "y" }, "repo4-path" : { "value" : "/demo-repo" }, "repo4-sftp-host" : { "value" : "sftp-server" }, "repo4-sftp-host-key-hash-type" : { "value" : "sha1" }, "repo4-sftp-host-user" : { "value" : "pgbackrest" }, "repo4-sftp-private-key-file" : { "value" : "/var/lib/pgsql/.ssh/id_rsa_sftp" }, "repo4-sftp-public-key-file" : { "value" : "/var/lib/pgsql/.ssh/id_rsa_sftp.pub" }, "repo4-type" : { "value" : "sftp" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "process-max=4", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "repo2-azure-account=pgbackrest", "repo2-azure-container=demo-container", "repo2-azure-key=YXpLZXk=", "repo2-path=/demo-repo", "repo2-retention-full=4", "repo2-type=azure", "repo3-path=/demo-repo", "repo3-retention-full=4", "repo3-s3-bucket=demo-bucket", "repo3-s3-endpoint=s3.us-east-1.amazonaws.com", "repo3-s3-key=accessKey1", "repo3-s3-key-secret=verySecretKey1", "repo3-s3-region=us-east-1", "repo3-type=s3", "repo4-bundle=y", "repo4-path=/demo-repo", "repo4-sftp-host=sftp-server", "repo4-sftp-host-key-hash-type=sha1", "repo4-sftp-host-user=pgbackrest", "repo4-sftp-private-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp", "repo4-sftp-public-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp.pub", "repo4-type=sftp", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres mkdir -m 750 -p /var/lib/pgsql/.ssh" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres ssh-keygen -f /var/lib/pgsql/.ssh/id_rsa_sftp \\", " -t rsa -b 4096 -N \"\" -m PEM" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "id -u pgbackrest > /dev/null 2>&1 || adduser -n pgbackrest" ], "host" : "sftp-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "mkdir -m 750 -p /demo-repo && chown pgbackrest /demo-repo" ], "host" : "sftp-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest mkdir -m 750 -p /home/pgbackrest/.ssh" ], "host" : "sftp-server", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "(sudo ssh root@pg-primary cat /var/lib/pgsql/.ssh/id_rsa_sftp.pub) | \\", " sudo -u pgbackrest tee -a /home/pgbackrest/.ssh/authorized_keys" ], "host" : "sftp-server", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "ssh-keyscan -H sftp-server >> /var/lib/pgsql/.ssh/known_hosts 2>/dev/null" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "postgres" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 6 lines of output]", "P00 INFO: stanza 'demo' already exists on repo3 and is valid", "P00 INFO: stanza-create for stanza 'demo' on repo4", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=4 \\", " --log-level-console=info backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "no prior backup exists|full backup size" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: backup command begin 2.55.1: --exec-id=4322-888f0cdd --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --process-max=4 --repo=4 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-block --repo1-bundle --repo4-bundle --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo4-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo3-retention-full=4 --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo4-sftp-host=sftp-server --repo4-sftp-host-key-hash-type=sha1 --repo4-sftp-host-user=pgbackrest --repo4-sftp-private-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp --repo4-sftp-public-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp.pub --repo2-type=azure --repo3-type=s3 --repo4-type=sftp --stanza=demo --start-fast", "P00 WARN: option 'repo4-retention-full' is not set for 'repo4-retention-full-type=count', the repository may run out of space", " HINT: to retain full backups indefinitely (without warning), set option 'repo4-retention-full' to the maximum.", "P00 WARN: no prior backup exists, incr backup has been changed to full", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 00000005000000000000001E, lsn = 0/1E000028", " [filtered 3 lines of output]", "P00 INFO: check archive for segment(s) 00000005000000000000001E:00000005000000000000001F", "P00 INFO: new backup label = 20250505-152911F", "P00 INFO: full backup size = 30.8MB, file total = 1229", "P00 INFO: backup command end: completed successfully", "P00 INFO: expire command begin 2.55.1: --exec-id=4322-888f0cdd --log-level-console=info --no-log-timestamp --repo=4 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo4-path=/demo-repo --repo1-retention-diff=2 --repo1-retention-full=2 --repo2-retention-full=4 --repo3-retention-full=4 --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo4-sftp-host=sftp-server --repo4-sftp-host-key-hash-type=sha1 --repo4-sftp-host-user=pgbackrest --repo4-sftp-private-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp --repo4-sftp-public-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp.pub --repo2-type=azure --repo3-type=s3 --repo4-type=sftp --stanza=demo", "P00 INFO: expire command end: completed successfully" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "repo5-gcs-bucket" : { "value" : "demo-bucket" }, "repo5-gcs-key" : { "value" : "/etc/pgbackrest/gcs-key.json" }, "repo5-path" : { "value" : "/demo-repo" }, "repo5-type" : { "value" : "gcs" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "process-max=4", "repo1-block=y", "repo1-bundle=y", "repo1-cipher-pass=zWaf6XtpjIVZC5444yXB+cgFDFl7MxGlgkZSaoPvTGirhPygu4jOKOXf9LO4vjfO", "repo1-cipher-type=aes-256-cbc", "repo1-path=/var/lib/pgbackrest", "repo1-retention-diff=2", "repo1-retention-full=2", "repo2-azure-account=pgbackrest", "repo2-azure-container=demo-container", "repo2-azure-key=YXpLZXk=", "repo2-path=/demo-repo", "repo2-retention-full=4", "repo2-type=azure", "repo3-path=/demo-repo", "repo3-retention-full=4", "repo3-s3-bucket=demo-bucket", "repo3-s3-endpoint=s3.us-east-1.amazonaws.com", "repo3-s3-key=accessKey1", "repo3-s3-key-secret=verySecretKey1", "repo3-s3-region=us-east-1", "repo3-type=s3", "repo4-bundle=y", "repo4-path=/demo-repo", "repo4-sftp-host=sftp-server", "repo4-sftp-host-key-hash-type=sha1", "repo4-sftp-host-user=pgbackrest", "repo4-sftp-private-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp", "repo4-sftp-public-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp.pub", "repo4-type=sftp", "repo5-gcs-bucket=demo-bucket", "repo5-gcs-key=/etc/pgbackrest/gcs-key.json", "repo5-path=/demo-repo", "repo5-type=gcs", "start-fast=y", "", "[global:archive-push]", "compress-level=3" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 1" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 stanza-delete" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 info" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "missing stanza data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: error (missing stanza data)", " cipher: none" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "mc ls --versions s3/demo-bucket/demo-repo/backup/demo/backup.info" ], "cmd-extra" : "--insecure", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "backup\\.info$" ] }, "host" : "s3-server", "load-env" : true, "output" : true, "run-as-user" : "root" }, "type" : "exe", "value" : { "output" : [ "[2025-05-05 15:29:18 UTC] 0B STANDARD c89dad87-2067-4ef3-bcc6-b10dc276ab3f v3 DEL backup.info", "[2025-05-05 15:29:07 UTC] 1.0KiB STANDARD 81c92a67-8cf3-479e-9d98-ed6bb81a5c89 v2 PUT backup.info", "[2025-05-05 15:29:01 UTC] 372B STANDARD b7196686-d823-4b6b-83fc-c5c6532d221c v1 PUT backup.info", "[2025-05-05 15:29:18 UTC] 0B STANDARD e61860f5-f8b6-4c70-95cb-d3532e97124f v3 DEL backup.info.copy", "[2025-05-05 15:29:07 UTC] 1.0KiB STANDARD 70489080-5a15-48f2-be78-80ab11f54118 v2 PUT backup.info.copy" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 \\", " --repo-target-time=\"2025-05-05 15:29:07+00\" info" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "full backup" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 5 lines of output]", " wal archive min/max (13): 00000005000000000000001C/00000005000000000000001D", "", " full backup: 20250505-152901F", " timestamp start/stop: 2025-05-05 15:29:01+00 / 2025-05-05 15:29:06+00", " wal start/stop: 00000005000000000000001C / 00000005000000000000001D", " repo3: backup set size: 3.8MB, backup size: 3.8MB" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --repo=3 --delta \\", " --repo-target-time=\"2025-05-05 15:29:07+00\" --log-level-console=info restore" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ " restore backup set " ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: restore command begin 2.55.1: --delta --exec-id=4555-c667fd71 --log-level-console=info --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --process-max=4 --repo=3 --repo2-azure-account= --repo2-azure-container=demo-container --repo2-azure-key= --repo1-cipher-pass= --repo1-cipher-type=aes-256-cbc --repo5-gcs-bucket=demo-bucket --repo5-gcs-key= --repo1-path=/var/lib/pgbackrest --repo2-path=/demo-repo --repo3-path=/demo-repo --repo4-path=/demo-repo --repo5-path=/demo-repo --repo3-s3-bucket=demo-bucket --repo3-s3-endpoint=s3.us-east-1.amazonaws.com --repo3-s3-key= --repo3-s3-key-secret= --repo3-s3-region=us-east-1 --repo4-sftp-host=sftp-server --repo4-sftp-host-key-hash-type=sha1 --repo4-sftp-host-user=pgbackrest --repo4-sftp-private-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp --repo4-sftp-public-key-file=/var/lib/pgsql/.ssh/id_rsa_sftp.pub --repo-target-time=\"2025-05-05 15:29:07+00\" --repo2-type=azure --repo3-type=s3 --repo4-type=sftp --repo5-type=gcs --stanza=demo", "P00 INFO: repo3: restore backup set 20250505-152901F, recovery will start at 2025-05-05 15:29:01", "P00 INFO: remove invalid files/links/paths from '/var/lib/pgsql/13/data'", "P00 INFO: write updated /var/lib/pgsql/13/data/postgresql.auto.conf", " [filtered 2 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "id" : "repo1", "image" : "pgbackrest/doc:rhel", "name" : "repository", "option" : "-m 512m", "os" : "rhel", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.7" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo groupadd pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo adduser -gpgbackrest -n pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo yum install postgresql-libs libssh2" ], "cmd-extra" : "-y 2>&1", "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo scp build:/build/pgbackrest/src/pgbackrest /usr/bin" ], "cmd-extra" : "2>&1", "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 755 /usr/bin/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 770 /var/log/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown pgbackrest:pgbackrest /var/log/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest/conf.d" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo touch /etc/pgbackrest/pgbackrest.conf" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 640 /etc/pgbackrest/pgbackrest.conf" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown pgbackrest:pgbackrest /etc/pgbackrest/pgbackrest.conf" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /var/lib/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 750 /var/lib/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown pgbackrest:pgbackrest /var/lib/pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "global" : { "repo1-path" : { "value" : "/var/lib/pgbackrest" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[global]", "repo1-path=/var/lib/pgbackrest" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "demo" : { "pg1-host" : { "value" : "pg-primary" }, "pg1-host-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "pg1-host-cert-file" : { "value" : "/etc/pgbackrest/cert/client.crt" }, "pg1-host-key-file" : { "value" : "/etc/pgbackrest/cert/client.key" }, "pg1-host-type" : { "value" : "tls" }, "pg1-path" : { "value" : "/var/lib/pgsql/13/data" } }, "global" : { "log-timestamp" : { "value" : "n" }, "repo1-retention-full" : { "value" : "2" }, "start-fast" : { "value" : "y" }, "tls-server-address" : { "value" : "*" }, "tls-server-auth" : { "value" : "pgbackrest-client=*" }, "tls-server-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "tls-server-cert-file" : { "value" : "/etc/pgbackrest/cert/server.crt" }, "tls-server-key-file" : { "value" : "/etc/pgbackrest/cert/server.key" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "tls-server-address=*", "tls-server-auth=pgbackrest-client=*", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/pgsql/13/data" } }, "global" : { "log-level-file" : { "value" : "detail" }, "log-timestamp" : { "value" : "n" }, "repo1-host" : { "value" : "repository" }, "repo1-host-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "repo1-host-cert-file" : { "value" : "/etc/pgbackrest/cert/client.crt" }, "repo1-host-key-file" : { "value" : "/etc/pgbackrest/cert/client.key" }, "repo1-host-type" : { "value" : "tls" }, "tls-server-address" : { "value" : "*" }, "tls-server-auth" : { "value" : "pgbackrest-client=demo" }, "tls-server-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "tls-server-cert-file" : { "value" : "/etc/pgbackrest/cert/server.crt" }, "tls-server-key-file" : { "value" : "/etc/pgbackrest/cert/server.key" } } }, "reset" : true }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "log-level-file=detail", "repo1-host=repository", "repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "repo1-host-cert-file=/etc/pgbackrest/cert/client.crt", "repo1-host-key-file=/etc/pgbackrest/cert/client.key", "repo1-host-type=tls", "tls-server-address=*", "tls-server-auth=pgbackrest-client=demo", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "mkdir -p -m 770 /etc/pgbackrest/cert && \\", " cp /pgbackrest/doc/resource/fake-cert/ca.crt \\", " /etc/pgbackrest/cert/ca.crt && \\", " \\", " openssl genrsa -out /etc/pgbackrest/cert/server.key 2048 2>&1 && \\", " chmod 600 /etc/pgbackrest/cert/server.key && \\", " openssl req -new -sha256 -nodes -out /etc/pgbackrest/cert/server.csr \\", " -key /etc/pgbackrest/cert/server.key -subj \"/CN=repository\" 2>&1 && \\", " openssl x509 -req -in /etc/pgbackrest/cert/server.csr \\", " -CA /etc/pgbackrest/cert/ca.crt \\", " -CAkey /pgbackrest/doc/resource/fake-cert/ca.key -CAcreateserial \\", " -out /etc/pgbackrest/cert/server.crt -days 9 2>&1 && \\", " \\", " openssl genrsa -out /etc/pgbackrest/cert/client.key 2048 2>&1 && \\", " chmod 600 /etc/pgbackrest/cert/client.key && \\", " openssl req -new -sha256 -nodes -out /etc/pgbackrest/cert/client.csr \\", " -key /etc/pgbackrest/cert/client.key -subj \"/CN=pgbackrest-client\" 2>&1 && \\", " openssl x509 -req -in /etc/pgbackrest/cert/client.csr \\", " -CA /etc/pgbackrest/cert/ca.crt \\", " -CAkey /pgbackrest/doc/resource/fake-cert/ca.key -CAcreateserial \\", " -out /etc/pgbackrest/cert/client.crt -days 9 2>&1 && \\", " \\", " chown -R pgbackrest /etc/pgbackrest/cert" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "echo '[Unit]' | tee /etc/systemd/system/pgbackrest.service && \\", " echo 'Description=pgBackRest Server' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'After=network.target' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'StartLimitIntervalSec=0' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '[Service]' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'Type=simple' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'Restart=always' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'RestartSec=1' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'User=pgbackrest' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStart=/usr/bin/pgbackrest server' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStartPost=/bin/sleep 3' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStartPost=/bin/bash -c \"[ ! -z $MAINPID ]\"' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecReload=/bin/kill -HUP $MAINPID' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '[Install]' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'WantedBy=multi-user.target' | tee -a /etc/systemd/system/pgbackrest.service" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo cat /etc/systemd/system/pgbackrest.service" ], "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "[Unit]", "Description=pgBackRest Server", "After=network.target", "StartLimitIntervalSec=0", "", "[Service]", "Type=simple", "Restart=always", "RestartSec=1", "User=pgbackrest", "ExecStart=/usr/bin/pgbackrest server", "ExecStartPost=/bin/sleep 3", "ExecStartPost=/bin/bash -c \"[ ! -z $MAINPID ]\"", "ExecReload=/bin/kill -HUP $MAINPID", "", "[Install]", "WantedBy=multi-user.target" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl enable pgbackrest" ], "cmd-extra" : "2>&1", "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start pgbackrest" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "mkdir -p -m 770 /etc/pgbackrest/cert && \\", " cp /pgbackrest/doc/resource/fake-cert/ca.crt \\", " /etc/pgbackrest/cert/ca.crt && \\", " \\", " openssl genrsa -out /etc/pgbackrest/cert/server.key 2048 2>&1 && \\", " chmod 600 /etc/pgbackrest/cert/server.key && \\", " openssl req -new -sha256 -nodes -out /etc/pgbackrest/cert/server.csr \\", " -key /etc/pgbackrest/cert/server.key -subj \"/CN=pg-primary\" 2>&1 && \\", " openssl x509 -req -in /etc/pgbackrest/cert/server.csr \\", " -CA /etc/pgbackrest/cert/ca.crt \\", " -CAkey /pgbackrest/doc/resource/fake-cert/ca.key -CAcreateserial \\", " -out /etc/pgbackrest/cert/server.crt -days 9 2>&1 && \\", " \\", " openssl genrsa -out /etc/pgbackrest/cert/client.key 2048 2>&1 && \\", " chmod 600 /etc/pgbackrest/cert/client.key && \\", " openssl req -new -sha256 -nodes -out /etc/pgbackrest/cert/client.csr \\", " -key /etc/pgbackrest/cert/client.key -subj \"/CN=pgbackrest-client\" 2>&1 && \\", " openssl x509 -req -in /etc/pgbackrest/cert/client.csr \\", " -CA /etc/pgbackrest/cert/ca.crt \\", " -CAkey /pgbackrest/doc/resource/fake-cert/ca.key -CAcreateserial \\", " -out /etc/pgbackrest/cert/client.crt -days 9 2>&1 && \\", " \\", " chown -R postgres /etc/pgbackrest/cert" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "echo '[Unit]' | tee /etc/systemd/system/pgbackrest.service && \\", " echo 'Description=pgBackRest Server' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'After=network.target' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'StartLimitIntervalSec=0' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '[Service]' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'Type=simple' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'Restart=always' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'RestartSec=1' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'User=postgres' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStart=/usr/bin/pgbackrest server' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStartPost=/bin/sleep 3' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStartPost=/bin/bash -c \"[ ! -z $MAINPID ]\"' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecReload=/bin/kill -HUP $MAINPID' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '[Install]' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'WantedBy=multi-user.target' | tee -a /etc/systemd/system/pgbackrest.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo cat /etc/systemd/system/pgbackrest.service" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "[Unit]", "Description=pgBackRest Server", "After=network.target", "StartLimitIntervalSec=0", "", "[Service]", "Type=simple", "Restart=always", "RestartSec=1", "User=postgres", "ExecStart=/usr/bin/pgbackrest server", "ExecStartPost=/bin/sleep 3", "ExecStartPost=/bin/bash -c \"[ ! -z $MAINPID ]\"", "ExecReload=/bin/kill -HUP $MAINPID", "", "[Install]", "WantedBy=multi-user.target" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl enable pgbackrest" ], "cmd-extra" : "2>&1", "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo stanza-create" ], "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo check" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo check" ], "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo backup" ], "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: no prior backup exists, incr backup has been changed to full" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta restore" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo --type=full backup" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "global" : { "process-max" : { "value" : "3" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "tls-server-address=*", "tls-server-auth=pgbackrest-client=*", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo --type=full backup" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest info" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "timestamp start/stop" ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "stanza: demo", " status: ok", " cipher: none", "", " db (current)", " wal archive min/max (13): 000000070000000000000023/000000070000000000000025", "", " full backup: 20250505-153034F", " timestamp start/stop: 2025-05-05 15:30:34+00 / 2025-05-05 15:30:37+00", " wal start/stop: 000000070000000000000023 / 000000070000000000000023", " database size: 30.8MB, database backup size: 30.8MB", " repo1: backup set size: 3.8MB, backup size: 3.8MB", "", " full backup: 20250505-153038F", " timestamp start/stop: 2025-05-05 15:30:38+00 / 2025-05-05 15:30:41+00", " wal start/stop: 000000070000000000000024 / 000000070000000000000025", " database size: 30.8MB, database backup size: 30.8MB", " repo1: backup set size: 3.8MB, backup size: 3.8MB" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo backup" ], "err-expect" : "56", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "\\: stop file exists for all stanzas" ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: unable to check pg1: [StopError] raised from remote-0 tls protocol on 'pg-primary': stop file exists for all stanzas", "P00 ERROR: [056]: unable to find primary cluster - cannot proceed", " HINT: are all available clusters in recovery?" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest stop" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: stop file already exists for all stanzas" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo stop" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo backup" ], "err-expect" : "56", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "\\: stop file exists for stanza demo" ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: unable to check pg1: [StopError] raised from remote-0 tls protocol on 'pg-primary': stop file exists for stanza demo", "P00 ERROR: [056]: unable to find primary cluster - cannot proceed", " HINT: are all available clusters in recovery?" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo start" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "id" : "pg2", "image" : "pgbackrest/doc:rhel", "name" : "pg-standby", "option" : "-m 512m", "os" : "rhel", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.8" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo yum install postgresql-libs libssh2" ], "cmd-extra" : "-y 2>&1", "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo scp build:/build/pgbackrest/src/pgbackrest /usr/bin" ], "cmd-extra" : "2>&1", "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 755 /usr/bin/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 770 /var/log/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/log/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest/conf.d" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo touch /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 640 /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-standby", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/pgsql/13/data" } }, "global" : { "log-level-file" : { "value" : "detail" }, "log-timestamp" : { "value" : "n" }, "repo1-host" : { "value" : "repository" }, "repo1-host-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "repo1-host-cert-file" : { "value" : "/etc/pgbackrest/cert/client.crt" }, "repo1-host-key-file" : { "value" : "/etc/pgbackrest/cert/client.key" }, "repo1-host-type" : { "value" : "tls" }, "tls-server-address" : { "value" : "*" }, "tls-server-auth" : { "value" : "pgbackrest-client=demo" }, "tls-server-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "tls-server-cert-file" : { "value" : "/etc/pgbackrest/cert/server.crt" }, "tls-server-key-file" : { "value" : "/etc/pgbackrest/cert/server.key" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "log-level-file=detail", "repo1-host=repository", "repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "repo1-host-cert-file=/etc/pgbackrest/cert/client.crt", "repo1-host-key-file=/etc/pgbackrest/cert/client.key", "repo1-host-type=tls", "tls-server-address=*", "tls-server-auth=pgbackrest-client=demo", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "mkdir -p -m 770 /etc/pgbackrest/cert && \\", " cp /pgbackrest/doc/resource/fake-cert/ca.crt \\", " /etc/pgbackrest/cert/ca.crt && \\", " \\", " openssl genrsa -out /etc/pgbackrest/cert/server.key 2048 2>&1 && \\", " chmod 600 /etc/pgbackrest/cert/server.key && \\", " openssl req -new -sha256 -nodes -out /etc/pgbackrest/cert/server.csr \\", " -key /etc/pgbackrest/cert/server.key -subj \"/CN=pg-standby\" 2>&1 && \\", " openssl x509 -req -in /etc/pgbackrest/cert/server.csr \\", " -CA /etc/pgbackrest/cert/ca.crt \\", " -CAkey /pgbackrest/doc/resource/fake-cert/ca.key -CAcreateserial \\", " -out /etc/pgbackrest/cert/server.crt -days 9 2>&1 && \\", " \\", " openssl genrsa -out /etc/pgbackrest/cert/client.key 2048 2>&1 && \\", " chmod 600 /etc/pgbackrest/cert/client.key && \\", " openssl req -new -sha256 -nodes -out /etc/pgbackrest/cert/client.csr \\", " -key /etc/pgbackrest/cert/client.key -subj \"/CN=pgbackrest-client\" 2>&1 && \\", " openssl x509 -req -in /etc/pgbackrest/cert/client.csr \\", " -CA /etc/pgbackrest/cert/ca.crt \\", " -CAkey /pgbackrest/doc/resource/fake-cert/ca.key -CAcreateserial \\", " -out /etc/pgbackrest/cert/client.crt -days 9 2>&1 && \\", " \\", " chown -R postgres /etc/pgbackrest/cert" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "echo '[Unit]' | tee /etc/systemd/system/pgbackrest.service && \\", " echo 'Description=pgBackRest Server' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'After=network.target' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'StartLimitIntervalSec=0' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '[Service]' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'Type=simple' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'Restart=always' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'RestartSec=1' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'User=postgres' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStart=/usr/bin/pgbackrest server' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStartPost=/bin/sleep 3' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStartPost=/bin/bash -c \"[ ! -z $MAINPID ]\"' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecReload=/bin/kill -HUP $MAINPID' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '[Install]' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'WantedBy=multi-user.target' | tee -a /etc/systemd/system/pgbackrest.service" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo cat /etc/systemd/system/pgbackrest.service" ], "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "[Unit]", "Description=pgBackRest Server", "After=network.target", "StartLimitIntervalSec=0", "", "[Service]", "Type=simple", "Restart=always", "RestartSec=1", "User=postgres", "ExecStart=/usr/bin/pgbackrest server", "ExecStartPost=/bin/sleep 3", "ExecStartPost=/bin/bash -c \"[ ! -z $MAINPID ]\"", "ExecReload=/bin/kill -HUP $MAINPID", "", "[Install]", "WantedBy=multi-user.target" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl enable pgbackrest" ], "cmd-extra" : "2>&1", "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres mkdir -p -m 700 /var/lib/pgsql/13/data" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=standby restore" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/pgsql/13/data/postgresql.auto.conf" ], "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "# Do not edit this file manually!", "# It will be overwritten by the ALTER SYSTEM command.", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:27:43", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:28:07", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:28:33", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "# Removed by pgBackRest restore on 2025-05-05 15:29:21 # recovery_target_time = '2025-05-05 15:28:25.620667+00'", "# Removed by pgBackRest restore on 2025-05-05 15:29:21 # recovery_target_action = 'promote'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:29:21", "restore_command = 'pgbackrest --repo=3 --repo-target-time=\"2025-05-05 15:29:07+00\" --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:30:29", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:31:33", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "cat /root/postgresql.common.conf >> /var/lib/pgsql/13/data/postgresql.conf" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "file" : "/var/lib/pgsql/13/data/postgresql.conf", "host" : "pg-standby", "option" : { "archive_command" : { "value" : "'pgbackrest --stanza=demo archive-push %p'" }, "archive_mode" : { "value" : "on" }, "hot_standby" : { "value" : "on" }, "log_filename" : { "value" : "'postgresql.log'" }, "max_wal_senders" : { "value" : "3" }, "wal_level" : { "value" : "replica" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "archive_command = 'pgbackrest --stanza=demo archive-push %p'", "archive_mode = on", "hot_standby = on", "log_filename = 'postgresql.log'", "max_wal_senders = 3", "wal_level = replica" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm /var/lib/pgsql/13/data/log/postgresql.log" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/pgsql/13/data/log/postgresql.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "entering standby mode|database system is ready to accept read only connections" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 4 lines of output]", "LOG: listening on Unix socket \"/tmp/.s.PGSQL.5432\"", "LOG: database system was interrupted; last known up at 2025-05-05 15:30:38 UTC", "LOG: entering standby mode", "LOG: restored log file \"00000007.history\" from archive", "LOG: restored log file \"000000070000000000000024\" from archive", "LOG: redo starts at 0/24000028", "LOG: restored log file \"000000070000000000000025\" from archive", "LOG: consistent recovery state reached at 0/25000050", "LOG: database system is ready to accept read only connections" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " begin; \\", " create table replicated_table (message text); \\", " insert into replicated_table values ('Important Data'); \\", " commit; \\", " select * from replicated_table\";" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " message ", "----------------", " Important Data", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select * from replicated_table;\"" ], "err-expect" : "1", "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "does not exist" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "ERROR: relation \"replicated_table\" does not exist", "LINE 1: select * from replicated_table;", " ^" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"select *, current_timestamp from pg_switch_wal()\";" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " pg_switch_wal | current_timestamp ", "---------------+-------------------------------", " 0/26017738 | 2025-05-05 15:31:39.445111+00", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " select *, current_timestamp from replicated_table\"" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " message | current_timestamp ", "----------------+-------------------------------", " Important Data | 2025-05-05 15:31:40.659536+00", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "because this is a standby" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=1197-5567e589 --log-level-console=info --log-level-file=detail --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo1-host=repository --repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt --repo1-host-cert-file=/etc/pgbackrest/cert/client.crt --repo1-host-key-file=/etc/pgbackrest/cert/client.key --repo1-host-type=tls --stanza=demo", "P00 INFO: check repo1 (standby)", "P00 INFO: switch wal not performed because this is a standby", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " create user replicator password 'jw8s0F4' replication\";" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "CREATE ROLE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sh -c 'echo \\", " \"host replication replicator 172.17.0.8/32 md5\" \\", " >> /var/lib/pgsql/13/data/pg_hba.conf'" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl reload postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-standby", "option" : { "demo" : { "recovery-option" : { "value" : "primary_conninfo=host=172.17.0.6 port=5432 user=replicator" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "recovery-option=primary_conninfo=host=172.17.0.6 port=5432 user=replicator", "", "[global]", "log-level-file=detail", "repo1-host=repository", "repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "repo1-host-cert-file=/etc/pgbackrest/cert/client.crt", "repo1-host-key-file=/etc/pgbackrest/cert/client.key", "repo1-host-type=tls", "tls-server-address=*", "tls-server-auth=pgbackrest-client=demo", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sh -c 'echo \\", " \"172.17.0.6:*:replication:replicator:jw8s0F4\" \\", " >> /var/lib/pgsql/.pgpass'" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres chmod 600 /var/lib/pgsql/.pgpass" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --delta --type=standby restore" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/pgsql/13/data/postgresql.auto.conf" ], "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "# Do not edit this file manually!", "# It will be overwritten by the ALTER SYSTEM command.", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:27:43", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:28:07", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:28:33", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "# Removed by pgBackRest restore on 2025-05-05 15:29:21 # recovery_target_time = '2025-05-05 15:28:25.620667+00'", "# Removed by pgBackRest restore on 2025-05-05 15:29:21 # recovery_target_action = 'promote'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:29:21", "restore_command = 'pgbackrest --repo=3 --repo-target-time=\"2025-05-05 15:29:07+00\" --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:30:29", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'", "", "# Recovery settings generated by pgBackRest restore on 2025-05-05 15:31:46", "primary_conninfo = 'host=172.17.0.6 port=5432 user=replicator'", "restore_command = 'pgbackrest --stanza=demo archive-get %f \"%p\"'" ] } }, { "key" : { "file" : "/var/lib/pgsql/13/data/postgresql.conf", "host" : "pg-standby", "option" : { "hot_standby" : { "value" : "on" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "archive_command = 'pgbackrest --stanza=demo archive-push %p'", "archive_mode = on", "hot_standby = on", "log_filename = 'postgresql.log'", "max_wal_senders = 3", "wal_level = replica" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm /var/lib/pgsql/13/data/log/postgresql.log" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-13.service" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/lib/pgsql/13/data/log/postgresql.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "started streaming WAL from primary" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 12 lines of output]", "LOG: database system is ready to accept read only connections", "LOG: restored log file \"000000070000000000000026\" from archive", "LOG: started streaming WAL from primary at 0/27000000 on timeline 7" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " begin; \\", " create table stream_table (message text); \\", " insert into stream_table values ('Important Data'); \\", " commit; \\", " select *, current_timestamp from stream_table\";" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " message | current_timestamp ", "----------------+-------------------------------", " Important Data | 2025-05-05 15:31:52.190109+00", "(1 row)" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " select *, current_timestamp from stream_table\"" ], "highlight" : { "filter" : false, "filter-context" : 2, "list" : [ "Important Data" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " message | current_timestamp ", "----------------+-------------------------------", " Important Data | 2025-05-05 15:31:52.512491+00", "(1 row)" ] } }, { "key" : { "id" : "pgalt", "image" : "pgbackrest/doc:rhel", "name" : "pg-alt", "option" : "-m 512m", "os" : "rhel", "update-hosts" : true }, "type" : "host", "value" : { "ip" : "172.17.0.9" } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo yum install postgresql-libs libssh2" ], "cmd-extra" : "-y 2>&1", "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo scp build:/build/pgbackrest/src/pgbackrest /usr/bin" ], "cmd-extra" : "2>&1", "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 755 /usr/bin/pgbackrest" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 770 /var/log/pgbackrest" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/log/pgbackrest" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p /etc/pgbackrest/conf.d" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo touch /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chmod 640 /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /etc/pgbackrest/pgbackrest.conf" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-alt", "option" : { "demo-alt" : { "pg1-path" : { "value" : "/var/lib/pgsql/13/data" } }, "global" : { "log-level-file" : { "value" : "detail" }, "log-timestamp" : { "value" : "n" }, "repo1-host" : { "value" : "repository" }, "repo1-host-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "repo1-host-cert-file" : { "value" : "/etc/pgbackrest/cert/client.crt" }, "repo1-host-key-file" : { "value" : "/etc/pgbackrest/cert/client.key" }, "repo1-host-type" : { "value" : "tls" }, "tls-server-address" : { "value" : "*" }, "tls-server-auth" : { "value" : "pgbackrest-client=demo-alt" }, "tls-server-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "tls-server-cert-file" : { "value" : "/etc/pgbackrest/cert/server.crt" }, "tls-server-key-file" : { "value" : "/etc/pgbackrest/cert/server.key" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo-alt]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "log-level-file=detail", "repo1-host=repository", "repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "repo1-host-cert-file=/etc/pgbackrest/cert/client.crt", "repo1-host-key-file=/etc/pgbackrest/cert/client.key", "repo1-host-type=tls", "tls-server-address=*", "tls-server-auth=pgbackrest-client=demo-alt", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "demo-alt" : { "pg1-host" : { "value" : "pg-alt" }, "pg1-host-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "pg1-host-cert-file" : { "value" : "/etc/pgbackrest/cert/client.crt" }, "pg1-host-key-file" : { "value" : "/etc/pgbackrest/cert/client.key" }, "pg1-host-type" : { "value" : "tls" }, "pg1-path" : { "value" : "/var/lib/pgsql/13/data" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/13/data", "", "[demo-alt]", "pg1-host=pg-alt", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "tls-server-address=*", "tls-server-auth=pgbackrest-client=*", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "mkdir -p -m 770 /etc/pgbackrest/cert && \\", " cp /pgbackrest/doc/resource/fake-cert/ca.crt \\", " /etc/pgbackrest/cert/ca.crt && \\", " \\", " openssl genrsa -out /etc/pgbackrest/cert/server.key 2048 2>&1 && \\", " chmod 600 /etc/pgbackrest/cert/server.key && \\", " openssl req -new -sha256 -nodes -out /etc/pgbackrest/cert/server.csr \\", " -key /etc/pgbackrest/cert/server.key -subj \"/CN=pg-alt\" 2>&1 && \\", " openssl x509 -req -in /etc/pgbackrest/cert/server.csr \\", " -CA /etc/pgbackrest/cert/ca.crt \\", " -CAkey /pgbackrest/doc/resource/fake-cert/ca.key -CAcreateserial \\", " -out /etc/pgbackrest/cert/server.crt -days 9 2>&1 && \\", " \\", " openssl genrsa -out /etc/pgbackrest/cert/client.key 2048 2>&1 && \\", " chmod 600 /etc/pgbackrest/cert/client.key && \\", " openssl req -new -sha256 -nodes -out /etc/pgbackrest/cert/client.csr \\", " -key /etc/pgbackrest/cert/client.key -subj \"/CN=pgbackrest-client\" 2>&1 && \\", " openssl x509 -req -in /etc/pgbackrest/cert/client.csr \\", " -CA /etc/pgbackrest/cert/ca.crt \\", " -CAkey /pgbackrest/doc/resource/fake-cert/ca.key -CAcreateserial \\", " -out /etc/pgbackrest/cert/client.crt -days 9 2>&1 && \\", " \\", " chown -R postgres /etc/pgbackrest/cert" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "echo '[Unit]' | tee /etc/systemd/system/pgbackrest.service && \\", " echo 'Description=pgBackRest Server' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'After=network.target' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'StartLimitIntervalSec=0' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '[Service]' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'Type=simple' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'Restart=always' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'RestartSec=1' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'User=postgres' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStart=/usr/bin/pgbackrest server' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStartPost=/bin/sleep 3' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecStartPost=/bin/bash -c \"[ ! -z $MAINPID ]\"' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'ExecReload=/bin/kill -HUP $MAINPID' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo '[Install]' | tee -a /etc/systemd/system/pgbackrest.service && \\", " echo 'WantedBy=multi-user.target' | tee -a /etc/systemd/system/pgbackrest.service" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo cat /etc/systemd/system/pgbackrest.service" ], "host" : "pg-alt", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "[Unit]", "Description=pgBackRest Server", "After=network.target", "StartLimitIntervalSec=0", "", "[Service]", "Type=simple", "Restart=always", "RestartSec=1", "User=postgres", "ExecStart=/usr/bin/pgbackrest server", "ExecStartPost=/bin/sleep 3", "ExecStartPost=/bin/bash -c \"[ ! -z $MAINPID ]\"", "ExecReload=/bin/kill -HUP $MAINPID", "", "[Install]", "WantedBy=multi-user.target" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl enable pgbackrest" ], "cmd-extra" : "2>&1", "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start pgbackrest" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres /usr/pgsql-13/bin/initdb \\", " -D /var/lib/pgsql/13/data -k -A peer" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "cat /root/postgresql.common.conf >> /var/lib/pgsql/13/data/postgresql.conf" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "file" : "/var/lib/pgsql/13/data/postgresql.conf", "host" : "pg-alt", "option" : { "archive_command" : { "value" : "'pgbackrest --stanza=demo-alt archive-push %p'" }, "archive_mode" : { "value" : "on" }, "log_filename" : { "value" : "'postgresql.log'" }, "max_wal_senders" : { "value" : "3" }, "wal_level" : { "value" : "replica" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "archive_command = 'pgbackrest --stanza=demo-alt archive-push %p'", "archive_mode = on", "log_filename = 'postgresql.log'", "max_wal_senders = 3", "wal_level = replica" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl restart postgresql-13.service" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-alt", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo-alt --log-level-console=info stanza-create" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-alt", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-create command begin 2.55.1: --exec-id=960-376c0d96 --log-level-console=info --log-level-file=detail --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo1-host=repository --repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt --repo1-host-cert-file=/etc/pgbackrest/cert/client.crt --repo1-host-key-file=/etc/pgbackrest/cert/client.key --repo1-host-type=tls --stanza=demo-alt", "P00 INFO: stanza-create for stanza 'demo-alt' on repo1", "P00 INFO: stanza-create command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "check stanza | successfully archived to " ] }, "host" : "pg-alt", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=987-2341c8d3 --log-level-console=info --log-level-file=detail --no-log-timestamp --repo1-host=repository --repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt --repo1-host-cert-file=/etc/pgbackrest/cert/client.crt --repo1-host-key-file=/etc/pgbackrest/cert/client.key --repo1-host-type=tls", "P00 INFO: check stanza 'demo-alt'", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 000000010000000000000001 successfully archived to '/var/lib/pgbackrest/archive/demo-alt/13-1/0000000100000000/000000010000000000000001-fdc4d128665b12cd6451f2a34646fc503687864c.gz' on repo1", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "check stanza | successfully archived to " ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=1331-582c5602 --log-level-console=info --no-log-timestamp --repo1-path=/var/lib/pgbackrest", "P00 INFO: check stanza 'demo'", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 000000070000000000000027 successfully archived to '/var/lib/pgbackrest/archive/demo/13-1/0000000700000000/000000070000000000000027-6ec3f77c103df27e1c5c2176ba20a25be52f4c59.gz' on repo1", "P00 INFO: check stanza 'demo-alt'", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 000000010000000000000002 successfully archived to '/var/lib/pgbackrest/archive/demo-alt/13-1/0000000100000000/000000010000000000000002-1ca03294e18dcc858dcb227abf90a4f2a6bb094d.gz' on repo1", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 750 /var/spool/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/spool/pgbackrest" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo mkdir -p -m 750 /var/spool/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo chown postgres:postgres /var/spool/pgbackrest" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "global" : { "archive-async" : { "value" : "y" }, "spool-path" : { "value" : "/var/spool/pgbackrest" } }, "global:archive-get" : { "process-max" : { "value" : "2" } }, "global:archive-push" : { "process-max" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "archive-async=y", "log-level-file=detail", "repo1-host=repository", "repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "repo1-host-cert-file=/etc/pgbackrest/cert/client.crt", "repo1-host-key-file=/etc/pgbackrest/cert/client.key", "repo1-host-type=tls", "spool-path=/var/spool/pgbackrest", "tls-server-address=*", "tls-server-auth=pgbackrest-client=demo", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key", "", "[global:archive-get]", "process-max=2", "", "[global:archive-push]", "process-max=2" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-standby", "option" : { "global" : { "archive-async" : { "value" : "y" }, "spool-path" : { "value" : "/var/spool/pgbackrest" } }, "global:archive-get" : { "process-max" : { "value" : "2" } }, "global:archive-push" : { "process-max" : { "value" : "2" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/13/data", "recovery-option=primary_conninfo=host=172.17.0.6 port=5432 user=replicator", "", "[global]", "archive-async=y", "log-level-file=detail", "repo1-host=repository", "repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "repo1-host-cert-file=/etc/pgbackrest/cert/client.crt", "repo1-host-key-file=/etc/pgbackrest/cert/client.key", "repo1-host-type=tls", "spool-path=/var/spool/pgbackrest", "tls-server-address=*", "tls-server-auth=pgbackrest-client=demo", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key", "", "[global:archive-get]", "process-max=2", "", "[global:archive-push]", "process-max=2" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"alter user replicator password 'bogus'\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "ALTER ROLE" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl restart postgresql-13.service" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres rm -f /var/log/pgbackrest/demo-archive-push-async.log" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \" \\", " select pg_create_restore_point('test async push'); select pg_switch_wal(); \\", " select pg_create_restore_point('test async push'); select pg_switch_wal(); \\", " select pg_create_restore_point('test async push'); select pg_switch_wal(); \\", " select pg_create_restore_point('test async push'); select pg_switch_wal(); \\", " select pg_create_restore_point('test async push'); select pg_switch_wal();\"" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --log-level-console=info check" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "WAL segment" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: check command begin 2.55.1: --exec-id=5523-a294109a --log-level-console=info --log-level-file=detail --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --repo1-host=repository --repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt --repo1-host-cert-file=/etc/pgbackrest/cert/client.crt --repo1-host-key-file=/etc/pgbackrest/cert/client.key --repo1-host-type=tls --stanza=demo", "P00 INFO: check repo1 configuration (primary)", "P00 INFO: check repo1 archive for WAL (primary)", "P00 INFO: WAL segment 00000007000000000000002D successfully archived to '/var/lib/pgbackrest/archive/demo/13-1/0000000700000000/00000007000000000000002D-aeeff865302e5d3d88bc6400d7bc492b147e06ee.gz' on repo1", "P00 INFO: check command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/log/pgbackrest/demo-archive-push-async.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ " WAL file\\(s\\) to archive|pushed WAL file \\'0000000" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "-------------------PROCESS START-------------------", "P00 INFO: archive-push:async command begin 2.55.1: [/var/lib/pgsql/13/data/pg_wal] --archive-async --exec-id=5493-218290f9 --log-level-console=off --log-level-file=detail --log-level-stderr=off --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --process-max=2 --repo1-host=repository --repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt --repo1-host-cert-file=/etc/pgbackrest/cert/client.crt --repo1-host-key-file=/etc/pgbackrest/cert/client.key --repo1-host-type=tls --spool-path=/var/spool/pgbackrest --stanza=demo", "P00 INFO: push 1 WAL file(s) to archive: 000000070000000000000028", "P01 DETAIL: pushed WAL file '000000070000000000000028' to the archive", "P00 DETAIL: statistics: {\"socket.client\":{\"total\":1},\"socket.session\":{\"total\":1},\"tls.client\":{\"total\":1},\"tls.session\":{\"total\":1}}", "P00 INFO: archive-push:async command end: completed successfully", "", "-------------------PROCESS START-------------------", "P00 INFO: archive-push:async command begin 2.55.1: [/var/lib/pgsql/13/data/pg_wal] --archive-async --exec-id=5525-183ecf58 --log-level-console=off --log-level-file=detail --log-level-stderr=off --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --process-max=2 --repo1-host=repository --repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt --repo1-host-cert-file=/etc/pgbackrest/cert/client.crt --repo1-host-key-file=/etc/pgbackrest/cert/client.key --repo1-host-type=tls --spool-path=/var/spool/pgbackrest --stanza=demo", "P00 INFO: push 5 WAL file(s) to archive: 000000070000000000000029...00000007000000000000002D", "P01 DETAIL: pushed WAL file '000000070000000000000029' to the archive", "P02 DETAIL: pushed WAL file '00000007000000000000002A' to the archive", "P01 DETAIL: pushed WAL file '00000007000000000000002B' to the archive", "P02 DETAIL: pushed WAL file '00000007000000000000002C' to the archive", "P01 DETAIL: pushed WAL file '00000007000000000000002D' to the archive", "P00 DETAIL: statistics: {\"socket.client\":{\"total\":1},\"socket.session\":{\"total\":1},\"tls.client\":{\"total\":1},\"tls.session\":{\"total\":1}}", "P00 INFO: archive-push:async command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 5" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres cat /var/log/pgbackrest/demo-archive-get-async.log" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "found [0-F]{24} in the .* archive" ] }, "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "-------------------PROCESS START-------------------", "P00 INFO: archive-get:async command begin 2.55.1: [000000070000000000000024, 000000070000000000000025, 000000070000000000000026, 000000070000000000000027, 000000070000000000000028, 000000070000000000000029, 00000007000000000000002A, 00000007000000000000002B] --archive-async --exec-id=1728-47faf303 --log-level-console=off --log-level-file=detail --log-level-stderr=off --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --process-max=2 --repo1-host=repository --repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt --repo1-host-cert-file=/etc/pgbackrest/cert/client.crt --repo1-host-key-file=/etc/pgbackrest/cert/client.key --repo1-host-type=tls --spool-path=/var/spool/pgbackrest --stanza=demo", "P00 INFO: get 8 WAL file(s) from archive: 000000070000000000000024...00000007000000000000002B", "P02 DETAIL: found 000000070000000000000025 in the repo1: 13-1 archive", "P01 DETAIL: found 000000070000000000000024 in the repo1: 13-1 archive", "P01 DETAIL: found 000000070000000000000027 in the repo1: 13-1 archive", "P02 DETAIL: found 000000070000000000000026 in the repo1: 13-1 archive", "P00 DETAIL: unable to find 000000070000000000000028 in the archive", "P00 DETAIL: statistics: {\"socket.client\":{\"total\":1},\"socket.session\":{\"total\":1},\"tls.client\":{\"total\":1},\"tls.session\":{\"total\":1}}", " [filtered 24 lines of output]", "P00 INFO: archive-get:async command begin 2.55.1: [000000070000000000000028, 000000070000000000000029, 00000007000000000000002A, 00000007000000000000002B, 00000007000000000000002C, 00000007000000000000002D, 00000007000000000000002E, 00000007000000000000002F] --archive-async --exec-id=1777-98b8e35e --log-level-console=off --log-level-file=detail --log-level-stderr=off --no-log-timestamp --pg1-path=/var/lib/pgsql/13/data --process-max=2 --repo1-host=repository --repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt --repo1-host-cert-file=/etc/pgbackrest/cert/client.crt --repo1-host-key-file=/etc/pgbackrest/cert/client.key --repo1-host-type=tls --spool-path=/var/spool/pgbackrest --stanza=demo", "P00 INFO: get 8 WAL file(s) from archive: 000000070000000000000028...00000007000000000000002F", "P01 DETAIL: found 000000070000000000000028 in the repo1: 13-1 archive", "P02 DETAIL: found 000000070000000000000029 in the repo1: 13-1 archive", "P01 DETAIL: found 00000007000000000000002A in the repo1: 13-1 archive", "P02 DETAIL: found 00000007000000000000002B in the repo1: 13-1 archive", "P01 DETAIL: found 00000007000000000000002C in the repo1: 13-1 archive", "P02 DETAIL: found 00000007000000000000002D in the repo1: 13-1 archive", "P00 DETAIL: unable to find 00000007000000000000002E in the archive", "P00 DETAIL: statistics: {\"socket.client\":{\"total\":1},\"socket.session\":{\"total\":1},\"tls.client\":{\"total\":1},\"tls.session\":{\"total\":1}}", " [filtered 7 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres psql -c \"alter user replicator password 'jw8s0F4'\"" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "ALTER ROLE" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "demo" : { "pg2-host" : { "value" : "pg-standby" }, "pg2-host-ca-file" : { "value" : "/etc/pgbackrest/cert/ca.crt" }, "pg2-host-cert-file" : { "value" : "/etc/pgbackrest/cert/client.crt" }, "pg2-host-key-file" : { "value" : "/etc/pgbackrest/cert/client.key" }, "pg2-host-type" : { "value" : "tls" }, "pg2-path" : { "value" : "/var/lib/pgsql/13/data" } }, "global" : { "backup-standby" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/13/data", "pg2-host=pg-standby", "pg2-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg2-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg2-host-key-file=/etc/pgbackrest/cert/client.key", "pg2-host-type=tls", "pg2-path=/var/lib/pgsql/13/data", "", "[demo-alt]", "pg1-host=pg-alt", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "backup-standby=y", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "tls-server-address=*", "tls-server-auth=pgbackrest-client=*", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo --log-level-console=detail backup" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "backup file pg-primary|replay on the standby" ] }, "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 2 lines of output]", "P00 INFO: execute non-exclusive backup start: backup begins after the requested immediate checkpoint completes", "P00 INFO: backup start archive = 00000007000000000000002F, lsn = 0/2F000028", "P00 INFO: wait for replay on the standby to reach 0/2F000028", "P00 INFO: replay on the standby reached 0/2F000028", "P00 INFO: check archive for prior segment 00000007000000000000002E", "P01 DETAIL: backup file pg-primary:/var/lib/pgsql/13/data/log/postgresql.log (11KB, 0.47%) checksum a243e99af415b81bf4d9d0ec91af46983eed09df", "P01 DETAIL: backup file pg-primary:/var/lib/pgsql/13/data/global/pg_control (8KB, 0.82%) checksum b2232ca69c379dce910b569560f7c1ada80e9da6", "P01 DETAIL: backup file pg-primary:/var/lib/pgsql/13/data/pg_hba.conf (4.5KB, 1.01%) checksum 65e54ae24bda87b2542351cb16a7fecc7e5aceeb", "P01 DETAIL: match file from prior backup pg-primary:/var/lib/pgsql/13/data/current_logfiles (26B, 1.02%) checksum 78a9f5c10960f0d91fcd313937469824861795a2", "P01 DETAIL: match file from prior backup pg-primary:/var/lib/pgsql/13/data/pg_logical/replorigin_checkpoint (8B, 1.02%) checksum 347fc8f2df71bd4436e38bd1516ccd7ea0d46532", " [filtered 1243 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl stop postgresql-13.service" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres /usr/pgsql-14/bin/initdb \\", " -D /var/lib/pgsql/14/data -k -A peer" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sh -c 'cd /var/lib/pgsql && \\", " /usr/pgsql-14/bin/pg_upgrade \\", " --old-bindir=/usr/pgsql-13/bin \\", " --new-bindir=/usr/pgsql-14/bin \\", " --old-datadir=/var/lib/pgsql/13/data \\", " --new-datadir=/var/lib/pgsql/14/data \\", " --old-options=\" -c config_file=/var/lib/pgsql/13/data/postgresql.conf\" \\", " --new-options=\" -c config_file=/var/lib/pgsql/14/data/postgresql.conf\"'" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "Upgrade Complete" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ " [filtered 68 lines of output]", "Checking for extension updates ok", "", "Upgrade Complete", "----------------", "Optimizer statistics are not transferred by pg_upgrade.", " [filtered 4 lines of output]" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "cat /root/postgresql.common.conf >> /var/lib/pgsql/14/data/postgresql.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : "root" }, "type" : "exe" }, { "key" : { "file" : "/var/lib/pgsql/14/data/postgresql.conf", "host" : "pg-primary", "option" : { "archive_command" : { "value" : "'pgbackrest --stanza=demo archive-push %p'" }, "archive_mode" : { "value" : "on" }, "log_filename" : { "value" : "'postgresql.log'" }, "max_wal_senders" : { "value" : "3" }, "wal_level" : { "value" : "replica" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "archive_command = 'pgbackrest --stanza=demo archive-push %p'", "archive_mode = on", "log_filename = 'postgresql.log'", "max_wal_senders = 3", "wal_level = replica" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-primary", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/pgsql/14/data" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/14/data", "", "[global]", "archive-async=y", "log-level-file=detail", "repo1-host=repository", "repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "repo1-host-cert-file=/etc/pgbackrest/cert/client.crt", "repo1-host-key-file=/etc/pgbackrest/cert/client.key", "repo1-host-type=tls", "spool-path=/var/spool/pgbackrest", "tls-server-address=*", "tls-server-auth=pgbackrest-client=demo", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key", "", "[global:archive-get]", "process-max=2", "", "[global:archive-push]", "process-max=2" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "pg-standby", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/pgsql/14/data" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-path=/var/lib/pgsql/14/data", "recovery-option=primary_conninfo=host=172.17.0.6 port=5432 user=replicator", "", "[global]", "archive-async=y", "log-level-file=detail", "repo1-host=repository", "repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "repo1-host-cert-file=/etc/pgbackrest/cert/client.crt", "repo1-host-key-file=/etc/pgbackrest/cert/client.key", "repo1-host-type=tls", "spool-path=/var/spool/pgbackrest", "tls-server-address=*", "tls-server-auth=pgbackrest-client=demo", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key", "", "[global:archive-get]", "process-max=2", "", "[global:archive-push]", "process-max=2" ] } }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "demo" : { "pg1-path" : { "value" : "/var/lib/pgsql/14/data" }, "pg2-path" : { "value" : "/var/lib/pgsql/14/data" } }, "global" : { "backup-standby" : { "value" : "n" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/14/data", "pg2-host=pg-standby", "pg2-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg2-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg2-host-key-file=/etc/pgbackrest/cert/client.key", "pg2-host-type=tls", "pg2-path=/var/lib/pgsql/14/data", "", "[demo-alt]", "pg1-host=pg-alt", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "backup-standby=n", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "tls-server-address=*", "tls-server-auth=pgbackrest-client=*", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo cp /var/lib/pgsql/13/data/pg_hba.conf \\", " /var/lib/pgsql/14/data/pg_hba.conf" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --no-online \\", " --log-level-console=info stanza-upgrade" ], "highlight" : { "filter" : true, "filter-context" : 2, "list" : [ "completed successfully" ] }, "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 INFO: stanza-upgrade command begin 2.55.1: --exec-id=6035-d86ea449 --log-level-console=info --log-level-file=detail --no-log-timestamp --no-online --pg1-path=/var/lib/pgsql/14/data --repo1-host=repository --repo1-host-ca-file=/etc/pgbackrest/cert/ca.crt --repo1-host-cert-file=/etc/pgbackrest/cert/client.crt --repo1-host-key-file=/etc/pgbackrest/cert/client.key --repo1-host-type=tls --stanza=demo", "P00 INFO: stanza-upgrade for stanza 'demo' on repo1", "P00 INFO: stanza-upgrade command end: completed successfully" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-14.service" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl status postgresql-14.service" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo check" ], "host" : "pg-primary", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm -rf /var/lib/pgsql/13/data" ], "host" : "pg-primary", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo rm -rf /var/lib/pgsql/13/data" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres mkdir -p -m 700 /usr/pgsql-14/bin" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo check" ], "host" : "repository", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe", "value" : { "output" : [ "P00 WARN: unable to check pg2: [DbConnectError] raised from remote-0 tls protocol on 'pg-standby': unable to connect to 'dbname='postgres' port=5432': could not connect to server: No such file or directory", " \tIs the server running locally and accepting", " \tconnections on Unix domain socket \"/run/postgresql/.s.PGSQL.5432\"?" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u pgbackrest pgbackrest --stanza=demo --type=full backup" ], "host" : "repository", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo --type=standby restore" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/var/lib/pgsql/14/data/postgresql.conf", "host" : "pg-standby", "option" : { "hot_standby" : { "value" : "on" } } }, "type" : "cfg-postgresql", "value" : { "config" : [ "hot_standby = on" ] } }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo systemctl start postgresql-14.service" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres sleep 2" ], "host" : "pg-standby", "load-env" : true, "output" : false, "run-as-user" : null }, "type" : "exe" }, { "key" : { "bash-wrap" : true, "cmd" : [ "sudo -u postgres pgbackrest --stanza=demo check" ], "host" : "pg-standby", "load-env" : true, "output" : true, "run-as-user" : null }, "type" : "exe" }, { "key" : { "file" : "/etc/pgbackrest/pgbackrest.conf", "host" : "repository", "option" : { "global" : { "backup-standby" : { "value" : "y" } } } }, "type" : "cfg-pgbackrest", "value" : { "config" : [ "[demo]", "pg1-host=pg-primary", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/14/data", "pg2-host=pg-standby", "pg2-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg2-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg2-host-key-file=/etc/pgbackrest/cert/client.key", "pg2-host-type=tls", "pg2-path=/var/lib/pgsql/14/data", "", "[demo-alt]", "pg1-host=pg-alt", "pg1-host-ca-file=/etc/pgbackrest/cert/ca.crt", "pg1-host-cert-file=/etc/pgbackrest/cert/client.crt", "pg1-host-key-file=/etc/pgbackrest/cert/client.key", "pg1-host-type=tls", "pg1-path=/var/lib/pgsql/13/data", "", "[global]", "backup-standby=y", "process-max=3", "repo1-path=/var/lib/pgbackrest", "repo1-retention-full=2", "start-fast=y", "tls-server-address=*", "tls-server-auth=pgbackrest-client=*", "tls-server-ca-file=/etc/pgbackrest/cert/ca.crt", "tls-server-cert-file=/etc/pgbackrest/cert/server.crt", "tls-server-key-file=/etc/pgbackrest/cert/server.key" ] } } ] } } } pgbackrest-release-2.55.1/doc/resource/fake-cert/000077500000000000000000000000001500617037600216115ustar00rootroot00000000000000pgbackrest-release-2.55.1/doc/resource/fake-cert/.gitignore000066400000000000000000000000141500617037600235740ustar00rootroot00000000000000*.csr *.srl pgbackrest-release-2.55.1/doc/resource/fake-cert/README.md000066400000000000000000000030571500617037600230750ustar00rootroot00000000000000# pgBackRest Documentation Certificates The certificates in this directory are used for documentation generation only and should not be used for actual services. ## pgBackRest CA Generate a CA that will be used to sign documentation certificates. It can be installed in the documentation containers to make certificates signed by it valid. ``` cd [pgbackrest-root]/doc/resource/fake-cert openssl ecparam -genkey -name prime256v1 | openssl ec -out ca.key openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 \ -subj "/C=US/ST=All/L=All/O=pgBackRest/CN=pgbackrest.org" ``` ## S3 Certificate Mimic an S3 certificate for the `us-east-1`/`us-east-2` region to generate S3 documentation. ``` cd [pgbackrest-root]/doc/resource/fake-cert openssl ecparam -genkey -name prime256v1 | openssl ec -out s3-server.key openssl req -new -sha256 -nodes -out s3-server.csr -key s3-server.key -config s3.cnf openssl x509 -req -in s3-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial \ -out s3-server.crt -days 99999 -extensions v3_req -extfile s3.cnf ``` ## Azure Certificate Mimic an Azure certificate for the `*.blob.core.windows.net` hosts to generate Azure documentation. ``` cd [pgbackrest-root]/doc/resource/fake-cert openssl ecparam -genkey -name prime256v1 | openssl ec -out azure-server.key openssl req -new -sha256 -nodes -out azure-server.csr -key azure-server.key -config azure.cnf openssl x509 -req -in azure-server.csr -CA ca.crt -CAkey ca.key -CAcreateserial \ -out azure-server.crt -days 99999 -extensions v3_req -extfile azure.cnf ``` pgbackrest-release-2.55.1/doc/resource/fake-cert/azure-server.crt000066400000000000000000000014461500617037600247620ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIICJjCCAc2gAwIBAgIUdW+DRN7XbILssJmdxycMz90EEwUwCgYIKoZIzj0EAwIw VzELMAkGA1UEBhMCVVMxDDAKBgNVBAgMA0FsbDEMMAoGA1UEBwwDQWxsMRMwEQYD VQQKDApwZ0JhY2tSZXN0MRcwFQYDVQQDDA5wZ2JhY2tyZXN0Lm9yZzAgFw0yMDA2 MjkxOTM0MjhaGA8yMjk0MDQxMzE5MzQyOFowdzELMAkGA1UEBhMCVVMxDDAKBgNV BAgMA0FsbDEMMAoGA1UEBwwDQWxsMRMwEQYDVQQKDApwZ0JhY2tSZXN0MRwwGgYD VQQLDBNVbml0IFRlc3RpbmcgRG9tYWluMRkwFwYDVQQDDBBjb3JlLndpbmRvd3Mu bmV0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEqQy14z/cTAwvIDUCgU+5ATJh 5hsvMaUrYfuCEFC9tx7+zeqrEbtWOqO1dQVnCfZr38lwrTDzJvZJKqh4rTlWoKNV MFMwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwOQYDVR0RBDIwMIIVYmxvYi5jb3Jl LndpbmRvd3MubmV0ghcqLmJsb2IuY29yZS53aW5kb3dzLm5ldDAKBggqhkjOPQQD AgNHADBEAiB5RbKWvkzISbAHRqkg4egKcitsijqZsPJgpj4X91ercwIgBJmMNKVP ELrECSmLFbJQCIZJAMcbzmLxZNcnsRaMUG8= -----END CERTIFICATE----- pgbackrest-release-2.55.1/doc/resource/fake-cert/azure-server.key000066400000000000000000000003431500617037600247550ustar00rootroot00000000000000-----BEGIN EC PRIVATE KEY----- MHcCAQEEIEGn3zrwzQ8+ZP6i+eye3iqQybiBK4ap+JAQ0uNGEMP1oAoGCCqGSM49 AwEHoUQDQgAEqQy14z/cTAwvIDUCgU+5ATJh5hsvMaUrYfuCEFC9tx7+zeqrEbtW OqO1dQVnCfZr38lwrTDzJvZJKqh4rTlWoA== -----END EC PRIVATE KEY----- pgbackrest-release-2.55.1/doc/resource/fake-cert/azure.cnf000066400000000000000000000006141500617037600234300ustar00rootroot00000000000000[req] default_bits = 4096 prompt = no default_md = sha256 req_extensions = v3_req distinguished_name = dn [ dn ] C=US ST=All L=All O=pgBackRest OU=Unit Testing Domain CN = core.windows.net [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment subjectAltName = @alt_names [ alt_names ] DNS.1 = blob.core.windows.net DNS.2 = *.blob.core.windows.net pgbackrest-release-2.55.1/doc/resource/fake-cert/ca.crt000066400000000000000000000013511500617037600227060ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIB+jCCAaCgAwIBAgIJAJDUUhiBUbmEMAoGCCqGSM49BAMCMFcxCzAJBgNVBAYT AlVTMQwwCgYDVQQIDANBbGwxDDAKBgNVBAcMA0FsbDETMBEGA1UECgwKcGdCYWNr UmVzdDEXMBUGA1UEAwwOcGdiYWNrcmVzdC5vcmcwIBcNMTkwNTI3MDAxOTU5WhgP MjI5MzAzMTAwMDE5NTlaMFcxCzAJBgNVBAYTAlVTMQwwCgYDVQQIDANBbGwxDDAK BgNVBAcMA0FsbDETMBEGA1UECgwKcGdCYWNrUmVzdDEXMBUGA1UEAwwOcGdiYWNr cmVzdC5vcmcwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQYHUcSknRDL+fgFJZI IC73Ju75yA0203IxPO35i8mVb9CcWVhEgHmS+cQ6SfY6GC7V61VB7gwzQ+XESi2p ndhJo1MwUTAdBgNVHQ4EFgQUYMbKIlTUE6gklw8KcSC6fnlOitwwHwYDVR0jBBgw FoAUYMbKIlTUE6gklw8KcSC6fnlOitwwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjO PQQDAgNIADBFAiEA1Bzy17/6jQimg3ROZTrVGkRtAuzTtjgDParHFrIhSDoCIH43 OeOUaPVb0rXGPLu9rFpjPOmtFSW3lf4skheJMKyN -----END CERTIFICATE----- pgbackrest-release-2.55.1/doc/resource/fake-cert/ca.key000066400000000000000000000003431500617037600227060ustar00rootroot00000000000000-----BEGIN EC PRIVATE KEY----- MHcCAQEEIB5f3SxfiZ92GMpuqpfTiPO3xaVOnxRh6qVAoRtu7NOZoAoGCCqGSM49 AwEHoUQDQgAEGB1HEpJ0Qy/n4BSWSCAu9ybu+cgNNtNyMTzt+YvJlW/QnFlYRIB5 kvnEOkn2Ohgu1etVQe4MM0PlxEotqZ3YSQ== -----END EC PRIVATE KEY----- pgbackrest-release-2.55.1/doc/resource/fake-cert/s3-server.crt000066400000000000000000000016101500617037600241520ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIICbTCCAhOgAwIBAgIJAODTXyGnxWtVMAoGCCqGSM49BAMCMFcxCzAJBgNVBAYT AlVTMQwwCgYDVQQIDANBbGwxDDAKBgNVBAcMA0FsbDETMBEGA1UECgwKcGdCYWNr UmVzdDEXMBUGA1UEAwwOcGdiYWNrcmVzdC5vcmcwIBcNMTkwNTI3MDIwODEwWhgP MjI5MzAzMTAwMjA4MTBaMIGBMQswCQYDVQQGEwJVUzEMMAoGA1UECAwDQWxsMQww CgYDVQQHDANBbGwxEzARBgNVBAoMCnBnQmFja1Jlc3QxHDAaBgNVBAsME1VuaXQg VGVzdGluZyBEb21haW4xIzAhBgNVBAMMGnMzLnVzLWVhc3QtMS5hbWF6b25hd3Mu Y29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEe2dO1v1gE0Qj4H407i0K8tN kASkveckACPFzXs2i/++rZY4bwUub08JcMRv0WWwnRzOoumsN26Ge454vTbjoqOB mjCBlzAJBgNVHRMEAjAAMAsGA1UdDwQEAwIF4DB9BgNVHREEdjB0ghpzMy51cy1l YXN0LTEuYW1hem9uYXdzLmNvbYIcKi5zMy51cy1lYXN0LTEuYW1hem9uYXdzLmNv bYIaczMudXMtZWFzdC0yLmFtYXpvbmF3cy5jb22CHCouczMudXMtZWFzdC0yLmFt YXpvbmF3cy5jb20wCgYIKoZIzj0EAwIDSAAwRQIgLiE7LuK6O/bKo70XPUi6xoDE ew+EHO31klTOeWiS6oMCIQCHMEqSAcDF/gnG/UXnp2viHOFjnY+NZgQo76l+/2mE iQ== -----END CERTIFICATE----- pgbackrest-release-2.55.1/doc/resource/fake-cert/s3-server.key000066400000000000000000000003431500617037600241540ustar00rootroot00000000000000-----BEGIN EC PRIVATE KEY----- MHcCAQEEIBhweMaCuhrRJy6hLV9X7QRCorDdyiUvSWEySHXZJM4DoAoGCCqGSM49 AwEHoUQDQgAEEe2dO1v1gE0Qj4H407i0K8tNkASkveckACPFzXs2i/++rZY4bwUu b08JcMRv0WWwnRzOoumsN26Ge454vTbjog== -----END EC PRIVATE KEY----- pgbackrest-release-2.55.1/doc/resource/fake-cert/s3.cnf000066400000000000000000000007501500617037600226300ustar00rootroot00000000000000[req] default_bits = 4096 prompt = no default_md = sha256 req_extensions = v3_req distinguished_name = dn [ dn ] C=US ST=All L=All O=pgBackRest OU=Unit Testing Domain CN = s3.us-east-1.amazonaws.com [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment subjectAltName = @alt_names [ alt_names ] DNS.1 = s3.us-east-1.amazonaws.com DNS.2 = *.s3.us-east-1.amazonaws.com DNS.3 = s3.us-east-2.amazonaws.com DNS.4 = *.s3.us-east-2.amazonaws.com pgbackrest-release-2.55.1/doc/resource/git-history.cache000066400000000000000000061447301500617037600232360ustar00rootroot00000000000000[ { "commit": "8bbb98243566bfa052f6aacfd1bd3a6acd770e40", "date": "2025-05-01 13:22:15 -0400", "subject": "Revert \"calculate content-md5 on S3 only when required\".", "body": "20bfd14 removed content-md5 where allowed by the specification but failed to notice that either content-md5 or x-amz-content-* is required for PUT when object lock is enabled.\n\nOn top of that it appears Scality S3 (at least?) won't accept alternate content checksums when object lock is enabled. Technically this is a violation of the specification but nonetheless the change breaks working installations.\n\nFor now it seems safer to revert this change and pursue a better solution for a future feature release." }, { "commit": "5e01098617a3803067b153be2d41e023cd4d95e4", "date": "2025-05-01 13:06:18 -0400", "subject": "Fix lower bounds checking for option keys.", "body": "Specifying an indexed option with a key of 0, e.g. pg0-path, would lead to a segfault.\n\nAdd lower bounds checking to fix the issue." }, { "commit": "293bc2a75d5cbe364f443bb87a998d76e4618ac2", "date": "2025-05-01 12:53:05 -0400", "subject": "Add details about notifying packagers to release instructions." }, { "commit": "6c0e21292629efaf53e513c5badc0824a10a1e79", "date": "2025-05-01 12:38:37 -0400", "subject": "Begin v2.55.1 development." }, { "commit": "6c90196e74ef0da38568f08fd7cb234d10130e79", "date": "2025-04-21 18:25:00 -0400", "subject": "v2.55.0: Verification Improvements and PostgreSQL 18 Support" }, { "commit": "4bb7cb1ac65a135e3f311ef288f2ad946493266b", "date": "2025-04-21 09:02:56 -0400", "subject": "Fix incorrect not-equal comparison in TEST_RESULT_Z_NE().", "body": "This comparison was not being used for strings in the tests but best to fix it so it does not cause confusion in the future." }, { "commit": "9386920b79971347c6c57589190bf09b591d8b4d", "date": "2025-04-16 12:26:15 -0500", "subject": "Add FAQ about exporting self-contained cluster." }, { "commit": "c2f64bb03bdbb2ce883041118dbcfd79da3f1307", "date": "2025-04-15 16:50:40 -0500", "subject": "Fix highlight error in user guide.", "body": "87776bc9 updated the RHEL documentation to PostgreSQL 13/14 but did not update recovery highlighting to be compatible with RHEL. This was not caught because the RHEL documentation was being build as PDF, which does not do highlighting.\n\nInstead build the RHEL documentation as HTML in the first stage (and PDF in the second) so the error is caught.\n\nFinally, fix the RHEL documentation to generate the highlight by concatenating the log." }, { "commit": "8bdba747981e8805a9bf355050f70cdf19e9babc", "date": "2025-04-14 13:34:35 -0500", "subject": "Clarify incremental backup expiration.", "body": "The documentation was a bit misleading regarding how incremental backups are expired. Update the misleading part (\"Differential and Incremental backups are count-based...\") and move the explanation of how incremental expiration works out of differential expiration into the introductory paragraph.\n\nAlso add a note about how full backups are considered as differential for the purpose of expiration." }, { "commit": "565d2e0c47cd21df890a40b3a94a8c865bed76d9", "date": "2025-04-14 12:29:31 -0500", "subject": "PostgreSQL 18 experimental support.", "body": "Support is experimental since PostgreSQL 18 is still in development and has not released a beta, but it may be useful for testing." }, { "commit": "019e4cf8a779ee2572a5227ef2d80c3036f877f9", "date": "2025-04-14 10:46:55 -0500", "subject": "Another fix for integration tests on alternate architectures.", "body": "37544da5 missed another place where 57ffd2df had accidentally hard-coded the integration test architecture to x86_64.\n\nFix the test code to use the correct image based on architecture." }, { "commit": "48a43e76e2657bbdd871268124c5580eb7ce7caa", "date": "2025-04-11 18:39:56 -0500", "subject": "Enable format-overflow=2 compiler warning.", "body": "Warn about calls to formatted input/output functions such as sprintf and vsprintf that might overflow the destination buffer.\n\nThis warning found a few short float buffers. In practice these are unlikely to ever be a problem based on our float usage but best to be safe.\n\nNote that this warning only requires the buffer size to be 317 but it must be 318 to prevent warnings from -Wformat-truncation=2. We are not ready to enable that warning yet but seems better to get the buffer correct now." }, { "commit": "5fe23ff07b0e6e1bea9f36d49b3f03a62a7be861", "date": "2025-04-11 17:02:14 -0500", "subject": "Remove implied -Wformat-nonliteral warning.", "body": "-Wformat-nonliteral is already implied by -format=2." }, { "commit": "9fba07c26dc5ff353f86afc7f7bdf61edf1c8781", "date": "2025-04-11 16:52:45 -0500", "subject": "Remove -Wno-implicit-fallthrough and explicitly mark fallthrough.", "body": "Last time we tried this the markered seems a bit unreliable but __attribute__((fallthrough)) appears to work pretty well." }, { "commit": "a076ee63bd99d1af293c30645f79893579382060", "date": "2025-04-10 15:29:37 -0500", "subject": "Remove unmaintained/unused development environment build files.", "body": "The Vagrantfile has not been maintained in years and the Dockerfile is only used by a single developer. There are instructions for building a development environment in CONTRIBUTING.md so these build files are no longer required." }, { "commit": "dd3a7d99a78d0d604967d90e8a8dbf8915432896", "date": "2025-04-10 12:08:35 -0500", "subject": "Recommend not placing spool-path within pg_xlog/pg_wal.", "body": "This is generally not a good idea and it can cause problems for PostgreSQL tools such as pg_rewind." }, { "commit": "cab4d6fd5c775d6e1a9cd9889d825eee667b5322", "date": "2025-04-10 10:26:50 -0500", "subject": "Warn when a value for a multi-key option is overwritten.", "body": "Some options can contain multiple key/value pairs. However, if if the key is specified again the value will be silently overwritten. In most cases one value per key is appropriate, but it makes sense to warn the user about the overwrite." }, { "commit": "68f22aea6665b9dcbe76dbfaa731f37187e1ce20", "date": "2025-04-09 14:50:16 -0500", "subject": "Allow connections to PostgreSQL on abstract domain sockets.", "body": "Currently the pg-socket-path option is required to be a valid absolute path but this precludes the use of abstract domain sockets.\n\nSet the option type to string so abstract domain sockets are allowed. This removes some validation but libpq will report if the path is invalid and we don't use it for any other purpose." }, { "commit": "20bfd14b73ad6d75b6fb1169565b18dcfa183c9b", "date": "2025-04-09 12:27:27 -0500", "subject": "Calculate content-md5 on S3 only when required.", "body": "The content-md5 header was generated for all requests with content but it is only required for batch delete requests. It is not clear why this header is required when x-amz-content-sha256 is also provided or why it is required only for this request but the documentation is clear on the matter. However, the content for these requests is relatively small compared to uploading files so omitting content-md5 where possible will save some CPU cycles.\n\nCurrent AWS S3 and recent Minio don't complain if this header is missing but since it is still required by older versions of Minio and it is specified in the documentation for batch delete it is makes sense to keep it." }, { "commit": "c925832e1737f1b5cac347c482a725a87f2237cb", "date": "2025-04-09 12:02:19 -0500", "subject": "Add LSTDEF() macro.", "body": "This macro allows static List objects to be created which simplifies usage when passing lists to functions. Also, since List objects are commonly used this makes the code base a bit more consistent.\n\nFor now skip static lists that are only used locally within a function since the benefit is not as clear." }, { "commit": "53a45d25c3e60e5067010b2676f5d6216638fbe3", "date": "2025-04-08 10:45:06 -0500", "subject": "Fix documentation for specifying multiple stanzas with tls-server-auth.", "body": "The prior documentation said that multiple stanzas should be specified by repeating the tls-server-auth option. This is incorrect -- in fact a comma-separated list of stanza should be used.\n\nFix the documentation and add a test to confirm this behavior.\n\nIn passing add some const qualifiers that were missing in the relevant code." }, { "commit": "37544da56c711fc0a344d0a292488d4265b9cabc", "date": "2025-04-07 11:57:00 -0500", "subject": "Fix integration tests on alternate architectures.", "body": "57ffd2df accidentally hard-coded the integration test architecture to x86_64 and it was not noticed because in CI all the integration tests run on that architecture.\n\nFix the test code to retrieve the current architecture and use it for integration tests." }, { "commit": "e18ca19895bd3dbb0a6718bb6f01b9e43010fbb7", "date": "2025-04-04 10:26:03 -0500", "subject": "Remove extraneous const from cvtIntToZ() prototype." }, { "commit": "aeea81a3883e320d071f72152b86cff9944dd50f", "date": "2025-04-04 10:07:32 -0500", "subject": "Fix test logging in storageHelperDryRunInit()." }, { "commit": "cc50e1aecd0ba7b35b451c97ac082fbd3b46591b", "date": "2025-04-02 15:25:23 -0500", "subject": "Revert \"full/incremental backup method\".", "body": "This method was introduced in cad595f but on further reflection it does not seem worth the added complexity just to make restore consistency faster without improving the speed of the overall backup. The most common recovery case is PITR and this method produces diminishing returns as the recovery time gets further from the backup end time.\n\nA better solution (not implemented here) is to copy unmodified files from prior backups. This is much faster than recopying and compressing files from the cluster (especially on object stores with a copy command) and can even be done after the backup window to further reduce WAL replay required for consistency. It also reduces load on the host where the backup is made." }, { "commit": "4a42eec552233f34e0ace251e201a4e7e5021f21", "date": "2025-04-02 11:59:38 -0500", "subject": "Set noResetLogLevel in config/parse unit tests where required.", "body": "This prevents the logging from getting reset so later tests can validate warnings." }, { "commit": "33655095f469d37a34aa7ed33855d1b2fa2e529c", "date": "2025-03-31 19:30:41 -0500", "subject": "Use latest Minio image for testing.", "body": "It has been a while since a Minio bug has broken the tests so go back to testing against the latest version until the trust is broken again." }, { "commit": "dcfe30e322091dae49b00d77c731c9b31f1ae7dd", "date": "2025-03-31 14:18:29 -0500", "subject": "Fix instances of \"of of\"." }, { "commit": "b28ae98cec7ae7e881e6476c1cba1ba7e1abc358", "date": "2025-03-31 12:24:03 -0500", "subject": "Update unix_socket_directory to unix_socket_directories.", "body": "unix_socket_directory has not been valid since PostgreSQL 9.2 and since 9.5 is now the minimum supported version there is no reason to document both." }, { "commit": "e4def8845eb5c3fe9fb8cc44d9f64766cfb348c2", "date": "2025-03-26 08:44:29 -0600", "subject": "Allow verification of a specified backup.", "body": "Add support for the verify command --set option. This (internal) option was already accepted without errors but was not implemented.\n\nThe default behavior for verify is to check all the backups present. With the --set option only the specified backup will be verified. If the specified backup label is not valid an error is added to the result and verification is skipped. In addition, only WAL required to make the specified backup consistent will be verified." }, { "commit": "945c7d1eb67a97c00c32d8f121bca93d90c3ec80", "date": "2025-03-25 12:43:32 -0600", "subject": "Update verify command summary.", "body": "The prior text implied that verify could only operate on a single repository. Make the summary more general to indicate that the command can work on any repository." }, { "commit": "1cfafa386c860a8f756c658130de869446445cdd", "date": "2025-03-25 11:00:53 -0600", "subject": "Strip version from RHEL label in user guide.", "body": "RHEL 7 is EOL so remove it from the label. Rather than update the version range just remove it from the label since the user guide is generally applicable to RHEL." }, { "commit": "87776bc9cfb4af369ed22d8a116145cb1e5a5eae", "date": "2025-03-25 10:28:46 -0600", "subject": "Update PostgresQL versions in user guide.", "body": "PostgreSQL 12 is EOL and no longer available in the yum.postgresql.org repository.\n\nUpdate the base and update versions of the RHEL and Debian documentation to better cover supported versions." }, { "commit": "84eb9a742dcf75f071e011946263dedaa985099e", "date": "2025-03-24 12:21:31 -0600", "subject": "Clarify behavior of --repo-retention-full-type.", "body": "Make some clarifications and add explicit documentation for repo-retention-full-type=count." }, { "commit": "5d2c67c3c6975ea3ed08c7425f6e410ae378913f", "date": "2025-03-21 10:37:20 -0600", "subject": "Fix block incremental restore issue on non-default repository.", "body": "If the selected backup to restore was not in the default (lowest number) repository and block incremental was used, then restore would erroneously try to load the file super block list from the default repository. Specifying --repo would fix this since it changed the default repository.\n\nFix by updating the super block read to the specified repository." }, { "commit": "723f900eaa41d58edc5cc58444029ab7c272acb1", "date": "2025-03-21 09:01:02 -0600", "subject": "Fix missing return in FUNCTION_LOG_RETURN_VOID().", "body": "This macro is only ever called last in functions so this is not an active issue, but it makes sense to fix since it would pose a risk for future development." }, { "commit": "d10ad3780a46ae321aa1db4f146655eba1cfda8b", "date": "2025-03-19 11:17:59 -0600", "subject": "Fix test logging." }, { "commit": "f00c0d4e853a2384fe9e333daa640659e6cd6d31", "date": "2025-03-19 09:34:32 -0600", "subject": "Fix comments in unit tests." }, { "commit": "750a051b7aeccc3dd7ae9c07af2c73cb25057de9", "date": "2025-02-28 16:53:03 -0600", "subject": "Add numeric output to version command.", "body": "Add a new output option to the version command that defaults to text (current behavior) but can be set to num to output the version in a numeric format, e.g. 2055000.\n\nThis makes it easier to compare and identify versions of pgBackRest." }, { "commit": "57ffd2dfcbc47300d61d901789c6d09ffc8e25b2", "date": "2025-02-28 15:38:05 -0600", "subject": "Fix multi-architecture unit testing.", "body": "The Github action we were using for multi-architecture testing stopped working. The project does not seem to be getting regular maintenance so it seems better to roll multi-architecture testing into our existing container builds.\n\nIntroduce multi-architecture builds and testing into our test framework. For now this only works for unit tests -- integration tests will still only run on x86_64. That could be updated in the future but since emulation is so slow it is not clear if it would be useful.\n\nAlso fix an invalid 32-bit checksum. The d11 test had not been running as 32-bit since d8ff89a so the checksum was not updated when it should have been in 48f511d." }, { "commit": "814bf487c3955e877d226faaa22324d3c79863b2", "date": "2025-02-25 16:37:22 -0600", "subject": "Add support for GCS requester pays.", "body": "GCS user projects allow the bucket requester to be billed rather than the bucket owner. This is useful for hosted services that create backups and want to provide direct (probably read-only) access to the backups." }, { "commit": "4db0263fd63274a8168eb4de17b320d2f86251a2", "date": "2025-02-25 16:12:28 -0600", "subject": "Add support for S3 requester pays.", "body": "S3 requester pays allows the bucket requester to be billed rather than the bucket owner. This is useful for hosted services that create backups and want to provide direct (probably read-only) access to the backups." }, { "commit": "7941d6506e26cbfa1979b636554b1b5377f05c20", "date": "2025-02-25 15:36:55 -0600", "subject": "Remove unneeded verbiage from option descriptions.", "body": "\"Setting this option\" didn't really add anything and was inconsistent with other option descriptions." }, { "commit": "f2bbfad29cee8468239d2340d5cf12204e352d10", "date": "2025-02-21 08:24:57 -0600", "subject": "Fix case in option summary." }, { "commit": "2155a12a3d861645de5618518350f90962fbc26d", "date": "2025-02-17 11:51:51 -0600", "subject": "Add detail logging for expired archive path.", "body": "When archive expiration has a large number of files to remove it may look like the process has hung, at least at higher log levels.\n\nAdd a log message at detail level to show that progress is being made." }, { "commit": "4dcfa9ef59c5e75c6c0ee83390c64b9a3d4af1fc", "date": "2025-02-17 10:56:57 -0600", "subject": "Add detail logging to expire test.", "body": "This is required to test logging to be added in a subsequent commit." }, { "commit": "82299eb4cd997524eeddef170ad99669ed6651cf", "date": "2025-02-15 12:00:00 -0600", "subject": "Clarify requirement for local/remote pgBackRest versions to match." }, { "commit": "47f050f8fd816e57326d881f4253d8a13c0f8da6", "date": "2025-02-15 11:17:04 -0600", "subject": "Fix expire archive range logging.", "body": "The prior logging only output the last path to be removed since start was overwritten as each path was deleted. This had no affect on expire functionality but was confusing since many more files might be expired than the logs indicated.\n\nFix logging so the correct start path is logged." }, { "commit": "ef0a71d629bfd0729f3f8a55f38588ea25c64fb9", "date": "2025-02-15 11:02:23 -0600", "subject": "Update Cirrus-CI FreeBSD runner versions." }, { "commit": "aa7210a1830825e0a3e6b6740937357341127a6f", "date": "2025-02-04 11:15:51 -0500", "subject": "Change --process-max recommendation for object stores to --repo-bundle.", "body": "While process-max is as useful for object stores as any other storage type, for file creation time in particular file bundling is far more effective since fewer files are created.\n\nUpdate the recommendation to reflect this." }, { "commit": "922e9f0775c39b64e009251a6bec8ef9b83c190f", "date": "2025-02-04 10:06:17 -0500", "subject": "Verify recovery target timeline.", "body": "If the user picks an invalid timeline (or the default is invalid) they will not discover it until after the restore is complete and recovery starts. In that case they'll receive a message like this:\n\nFATAL: requested timeline 2 is not a child of this server's history\nDETAIL: Latest checkpoint is at 0/7000028 on timeline 1, but in the history of the requested timeline, the server forked off from that timeline at 0/600AA20.\n\nThis message generally causes confusion unless one is familiar with it. In this case 1) a standby was promoted creating a new timeline 2) a new backup was made from the primary 3) the new backup was restored but could not follow the new timeline because the backup was made after the new timeline forked off. Since PostgreSQL 12 following the latest timeline has been the default so this error has become common in split brain situations.\n\nImprove pgBackRest to read the history files and provide better error messages. Now this error is thrown before the restore starts:\n\nERROR: [058]: target timeline 2 forked from backup timeline 1 at 0/600aa20 which is before backup lsn of 0/7000028\n HINT: was the target timeline created by accidentally promoting a standby?\n HINT: was the target timeline created by testing a restore without --archive-mode=off?\n HINT: was the backup made after the target timeline was created?\n\nThis saves time since it happens before the restore and gives more information about what has gone wrong.\n\nIf the backup timeline is not an ancestor of the target timeline the error message is:\n\nERROR: [058]: backup timeline 6, lsn 0/4ffffff is not in the history of target timeline B\n HINT: was the target timeline created by promoting from a timeline < latest?\n\nThis situation should be rare but can happen during complex recovery scenarios where the user is explicitly setting the target time." }, { "commit": "322e764f295f65b2a1189e207f6115471f943b1b", "date": "2025-01-30 21:43:48 -0500", "subject": "Add Coverity build to release instructions." }, { "commit": "6e437defa93b8bc43e93aefcc4e7320633ac9651", "date": "2025-01-30 14:28:28 -0500", "subject": "Refactor backupBlockIncrMapSize() range handling to satisfy Coverity.", "body": "Coverity complained about a possible overflow of result in the prior implementation.\n\nIt appears that Coverity was not able to follow the logic through the try block, but refactor and add an assertion to silence the complaint." }, { "commit": "89615eee654388ea046812bfaa30d514b269c5d1", "date": "2025-01-30 13:59:42 -0500", "subject": "Refactor loop in restoreManifestMap() to satisfy Coverity.", "body": "Coverity complained that decrementing targetIdx would result in it equaling UINT_MAX. While this is true it had no impact overall (at it least in the current code) since targetIdx was immediately incremented in the loop.\n\nHowever, Coverity's suggestion is better and safer for future code updates so it makes sense to change it." }, { "commit": "5421ef3e92fb01efdddd235bd7a7c18d7dac6c37", "date": "2025-01-30 13:48:59 -0500", "subject": "Add cast to suppress Coverity complaint about volatile used in assert().", "body": "Coverity had this complaint:\n\nassert_side_effect: Argument openData of ASSERT() has a side effect because the variable is volatile. The containing function might work differently in a non-debug build.\n\nIt appears this can also be fixed by assigning the volatile variable to an automatic but the cast seems to work just as well." }, { "commit": "d5cefb7290c34c64a2b9aa89ff04a718df4ec18b", "date": "2025-01-29 13:48:26 -0500", "subject": "Fix error reporting for queries with no results.", "body": "If a query that expected no results returned an error then it would incorrectly report that no results were expected because the error was interpreted as a result.\n\nSwitch the order of the checks so that an error is reported instead and add a test to prevent regression." }, { "commit": "d50b01b4852d82020b840496603d620b00ea98f9", "date": "2025-01-28 18:48:11 -0500", "subject": "Add assertions to satisfy Coverity about possible underflows.", "body": "Coverity complained about possible underflows so add assertions to demonstrate that the values in question are greater than zero." }, { "commit": "e46374dc7dc90275ff2836f152fb2b976dbe0e2a", "date": "2025-01-28 15:30:23 -0500", "subject": "Lower log level of backupDbPing()/dbPing() to trace.", "body": "These functions get called very frequently even though they generally result in a noop at the protocol level.\n\nLower the log level to reduce noise in the log at debug level." }, { "commit": "e625ed8be2228cb99c3801f9d1776909a2201c5a", "date": "2025-01-28 09:14:30 -0500", "subject": "Caveat --tablespace-map-all regarding tablespace creation.", "body": "If a tablespace is created after the backup starts then it cannot be mapped using --tablespace-map-all since there is no record of it in the manifest.\n\nThis would be extremely complex to fix but it can be documented." }, { "commit": "dde1b0477209512aabe1e9ca58836922a3c1661f", "date": "2025-01-27 17:14:34 -0500", "subject": "Add StringId linter.", "body": "Verify that all StringIds in the project have been generated correctly.\n\nThis also makes it easy to generate new StringIds by copying an existing StringId and modifying the string. The error message will provide the required value." }, { "commit": "d582739d822bbdb705a6a946db6c87675d5c2ccc", "date": "2025-01-27 15:51:57 -0500", "subject": "Convert 5-bit test StringId to 6-bit.", "body": "The original string was valid as either 5-bit or 6-bit but since we're trying to test 6-bit update the string to something only valid for 6-bit." }, { "commit": "6df96f505fd0f70d84b50ad36498c3a4c05a61bc", "date": "2025-01-23 17:12:05 -0500", "subject": "Separate version into component parts.", "body": "This guarantees a consistent version representation and allows the version to be easily represented in other ways." }, { "commit": "6776940c3bbe8b61e851921099354c4e63d80d7b", "date": "2025-01-23 14:55:44 -0500", "subject": "Use three part version in development builds.", "body": "This makes the versioning more consistent and is required by a subsequent commit that will separate the version components." }, { "commit": "e59385718c5bc207e96325a2f9ed06b9b4395218", "date": "2025-01-23 08:10:37 -0500", "subject": "Update CI containers to include newest PostgreSQL patch releases." }, { "commit": "6fbb28fa2d615576be78376c7f539921f6b8bc6a", "date": "2025-01-23 07:58:41 -0500", "subject": "Do not set recovery_target_timeline=current for PostgreSQL < 12.", "body": "PostgreSQL < 12 defaults recovery_target_timeline to current but if current is explicitly set it behaves as if latest was set. Since current is not handled in the PostgreSQL code it looks as if there should be an error during the integer conversion but that doesn't happen due to incorrect strtoul() usage (not checking endptr).\n\nHandle this by omitting recovery_target_timeline from recovery.conf when it is explicitly set by the user to current." }, { "commit": "e58d468e27cb13346d50fe32af44ef12a057455b", "date": "2025-01-21 18:39:51 -0500", "subject": "Fix typo." }, { "commit": "931435c0177ded2189903385d520d5c8819c4f5e", "date": "2025-01-21 11:45:50 -0500", "subject": "Allow backup command to operate on remote repositories.", "body": "The backup command has always been limited to working only when the repository is local. This was due to some limitations in storage (addressed in 01b81f9) and the protocol helper (addressed in 4a94b6be).\n\nNow that there a no limitations preventing this feature it makes sense to enable it. This allows for more flexibility in where backups are run." }, { "commit": "844f91fe3f3b04464a6a7b1fc7c743800db01933", "date": "2025-01-20 15:12:27 -0500", "subject": "Specify length of encoding strings.", "body": "This saves a byte per string but more importantly makes them match the declaration of encodeHexLookup." }, { "commit": "4bc9376d6fe276a3bf40a5c5e7891676a0c88392", "date": "2025-01-20 14:19:25 -0500", "subject": "Remove \"Additional Notes\" header from release notes.", "body": "This was intended to separate the code changes from documentation and test suite changes but it arguably does not add any clarity.\n\nSince documentation and test suite changes are explicitly marked as such that should be clear enough." }, { "commit": "23bd392bdcf76ecadc0eb9e76b60c34580590fae", "date": "2025-01-20 14:09:54 -0500", "subject": "Improve hex encode performance with bytewise lookup.", "body": "Previously, hex encode looked up each nibble of the input separately. Instead use a larger lookup table containing the two-byte encoding of every possible input byte, resulting in a 1/3 reduction in encoding time.\n\nInspired by and mostly cribbed from PostgreSQL commit e24d7708." }, { "commit": "713f6657d36746221a47989ee2e6ea369fd008dd", "date": "2025-01-20 10:57:27 -0500", "subject": "Merge v2.54.2 release." }, { "commit": "7a33d6168b2650515ded488c2e1b7eabd1d942a8", "date": "2025-01-14 13:10:32 -0500", "subject": "Replace constant version with macro in backup test module." }, { "commit": "6244f02bb3f44f985df5562a90400b3e7c9b9163", "date": "2025-01-14 10:50:48 -0500", "subject": "Update runner versions on Github actions.", "body": "Ubuntu 20.04 will be EOL soon so update all actions that are using it. Update other actions as far as possible without making too many changes." }, { "commit": "fd23257c6a52acd39b3d2deab8bcf905605dbc12", "date": "2025-01-06 13:50:14 -0500", "subject": "Remove extraneous const qualifier." }, { "commit": "b5bb1aa72c3a2fce6b5a024a815db4a09aa421e1", "date": "2025-01-05 13:32:09 -0500", "subject": "Remove makefile formatting from editor config.", "body": "This is no longer required since the makefile has been removed." }, { "commit": "5fac1b405869e126330a99586364517c5917172e", "date": "2025-01-02 09:11:19 -0500", "subject": "Update LICENSE.txt and PostgreSQL copyright for 2025." }, { "commit": "4a94b6bef9108ab775c0070571a1fb7dd8883eeb", "date": "2024-12-27 13:51:50 -0500", "subject": "Refactor protocol helper.", "body": "Simplify and improve data structures that track protocol client connections. The prior code could not store pg or repo clients but not both. We don't have a need for that yet, but tracking clients only by hostIdx was not flexible for some upcoming improvements. It is important to be able to identify and free clients very precisely.\n\nIn general this code should be easier to understand and removes duplicated code for local/remote clients." }, { "commit": "13f23f21683eef2d309f76d76cafc1f9b3852693", "date": "2024-12-26 12:01:59 -0500", "subject": "Fix issue after disabling bundling with block incremental enabled.", "body": "When bundling and block incremental are both enabled the bundleRaw flag is set to indicate that headers are omitted (whenever possible) for encryption and compression. This is intended to save space, especially when there are very large numbers of small files.\n\nIf bundling is disabled this flag needs to be preserved so that existing bundles from prior backups are read correctly. However, the prior code was only saving the flag when bundling was enabled, which caused prior backups to be unreadable if bundling was disabled.\n\nFix so that the flag is preserved and backups are not broken." }, { "commit": "9ee3b2c59309fd86e5504374ad9f195bb363736b", "date": "2024-12-26 10:45:11 -0500", "subject": "Fix compression type in integration tests.", "body": "Due to this bug the compression type in integration tests was always set to none. There are sufficient other tests for compression that this was not masking any bugs, but it was obviously not ideal." }, { "commit": "8b9e03d618d748e36ac006a0b0bb633930961512", "date": "2024-12-23 10:30:41 -0500", "subject": "Move linkCreate interface function to alphabetical order." }, { "commit": "48ecbe422da95a87c24e125f0f52bd5de2c02f6e", "date": "2024-12-19 13:52:59 -0500", "subject": "Clarify behavior of multiple configuration files." }, { "commit": "3210c9283fb7b14f26013dbea05df96e16b77e51", "date": "2024-12-16 14:55:44 -0500", "subject": "Clarify that unhandled errors may occur in edge cases." }, { "commit": "690c9803c3fa420d62bb62eccd4ae0f29266ff11", "date": "2024-12-16 12:56:03 -0500", "subject": "Add missing const qualifier." }, { "commit": "005c7e974f7966d49fbdc8cbdffcf819f532b96b", "date": "2024-12-16 12:04:21 -0500", "subject": "Merge v2.54.1 release." }, { "commit": "4d4d23131c75bf29655f6cd7625c25063ed09408", "date": "2024-12-15 11:11:04 -0500", "subject": "Rephrase invitation to star on Github." }, { "commit": "fbb31eefca5b7f79f55ee96517c05159a9cf7390", "date": "2024-12-11 10:03:52 -0500", "subject": "Change \"find\" to \"visit\" in introduction." }, { "commit": "5c8296df066564be35d3936610ebff1c332f38da", "date": "2024-12-10 11:22:45 -0500", "subject": "Remove reference to disabling network compression in the documentation.", "body": "Previously setting compress-level-network=0 would disable compression. This worked because gzip disables compression at this level but still transmits the data in gz format.\n\nlz4 does not provide similar functionality so we would need to disable the compression filter entirely. This does not seem worth it however since lz4 compression is very efficient and 0 is the default fast mode." }, { "commit": "d96966065b937b595b6c45d41511f6f28e01a8e3", "date": "2024-12-09 13:19:55 -0500", "subject": "Add missing const qualifier." }, { "commit": "0e143ba7e7bf5c2d8c94fd44db57f768d0cdc72d", "date": "2024-11-27 17:05:31 -0500", "subject": "Remove --min-gen option from test.pl.", "body": "This option was useful for the Perl code generation and autoconf generation, which were both slow. These are both gone now and the C code generation is fast enough that there is no need to exclude it.\n\n--dry-run will still prevent certain code generation from running. This may not be necessary any more but removing it should be the subject of a separate commit." }, { "commit": "cad595f9f8e6aaa9b64c9dcad8aa878c0c72a4d6", "date": "2024-11-26 11:23:43 -0500", "subject": "Full/incremental backup method.", "body": "This backup method does a preliminary copy of all files that were last modified prior to a defined interval before calling pg_backup_start(). Then the backup is started as usual and the remainder of the files are copied. The advantage is that generally a smaller set of WAL will be required to make the backup consistent, provided there are some files that have not been recently modified.\r\n\r\nThe length of the prior full backup is used to determine the interval used for the preliminary copy since any files modified within this interval will likely be modified again during the backup. If no prior full backup exists then the interval is set to one day.\r\n\r\nThis feature is being committed as internal-only for the time being." }, { "commit": "0577b0301689bc19d49c4081cc652971de5da8cf", "date": "2024-11-26 11:03:27 -0500", "subject": "Use lz4 for protocol compression.", "body": "lz4 provides much better compression speed and gives similar compression ratios to gz when used at low levels (the gz default was 3)." }, { "commit": "4af42d93b2a0201182d16c5163e9813635d12e3c", "date": "2024-11-25 10:38:37 -0500", "subject": "Update release notes for PostgreSQL 17 support.", "body": "Accurately reflect when different versions of PostgreSQL were supported since an update was required for beta3." }, { "commit": "c351263a1d09829c91fc3f9016fefe666b5b6c62", "date": "2024-11-22 15:25:43 -0500", "subject": "Fix typos.", "body": "Found using `codespell -S *.eps,*.cache,*.xml -L inout,te,fo,bload,fase,collet,hilight,debians,keep-alives` and `typos --hidden --format brief`." }, { "commit": "7f2dfc021c6689a5bddfbd7ed85d64ae93c01324", "date": "2024-11-18 13:33:03 -0500", "subject": "Update Fedora test image to Fedora 41." }, { "commit": "33d7681347ccdbaf2c026fd482ed3949d75d447a", "date": "2024-11-18 10:58:00 -0500", "subject": "Enable missing-variable-declarations compiler warning.", "body": "Warn if a global variable is defined without a previous declaration. Use this option to detect global variables that do not have a matching extern declaration in a header file." }, { "commit": "4ae160aa34747128d2a45aa1f31893ebd7b86f3b", "date": "2024-11-15 09:44:15 -0500", "subject": "Add wait for async archive log exists check in integration test.", "body": "They may be a small delay before the log exists, especially on slower platforms. Add a wait so the test does not fail in this case." }, { "commit": "12fe1393159bf63724279f03d43bc0a69ad2b636", "date": "2024-11-13 17:48:14 -0500", "subject": "Allow negative values for integer options.", "body": "This mostly worked but there was a rendering issue that prevented compilation." }, { "commit": "d7c2d2ba1b97bb05ef7d97c4622c92e97282cafd", "date": "2024-11-13 17:28:21 -0500", "subject": "Move compression driver param list management to a common module.", "body": "This code was duplicated in each driver so this means less duplication.\n\nIn addition, some drivers were not creating a parameter list for decompression which meant they could not be used remotely. This is not a currently a bug since none of them were being used remotely, but it was a blocker for using lz4 for protocol compression." }, { "commit": "274bb24a5ab122b599d31dd6cee197995a564c46", "date": "2024-11-13 09:56:42 -0500", "subject": "Stabilize async archiving in integration tests.", "body": "The integration tests could fail if:\r\n\r\n1. After restoring the PostgreSQL instance the recovery process starts, which calls asynchronous archive-get.\r\n2. After archive-get checks the existence of the queue directory, but before writing the WAL file, there are restores when the next test is begun, which leads to the deletion of the queue directory.\r\n3. Since the directory no longer exists, writing the WAL file will fail, and archive-get will write the error file to the queue.\r\n4. A new PostgreSQL instance will start and the recovery process will begin, which requests the WAL file.\r\n5. The new archive-get looks into the queue directory, finds the error file, and throws out the error, after which the PostgreSQL recovery fails because the previous archive-get background process has not finished yet.\r\n\r\nThis patch fixes the problem by using a separate spool directory for each test." }, { "commit": "db912c049c7e4b98ec1cdfa5b4e45b3bbb7d1a6b", "date": "2024-11-08 10:21:25 -0500", "subject": "Exclude function void return logging macros from coverage reporting.", "body": "An in 355e27d6, it makes sense to exclude FUNCTION_(LOG|TEST)_RETURN_VOID() macros when then they are on the last line of a function because in this case they are a noop (but are still used for debugging)." }, { "commit": "c9c73ede9d8182374372c46face4433997e8c0ea", "date": "2024-11-07 10:30:16 -0500", "subject": "Fix issue with version/help commands attempting to load pgbackrest.conf.", "body": "8d6bceb5 refactored version/help to operate more like regular commands in part to simplify the implementation of --version and --help. Unfortunately this had the side effect of these commands also loading pgbackrest.conf which would lead to an error if the file could not be read or parsed.\r\n\r\nAdd a filter to prevent version or help from loading pgbackrest.conf. Also prevent reads from the env to stop any warnings or errors from that source." }, { "commit": "c6785431653d81d793b9c683091bfa20156c0c2f", "date": "2024-11-07 09:27:02 -0500", "subject": "Remove conditional compilation for lz4.", "body": "0c32757f made lz4 required in the meson build but conditional compilation was left in to make reverting easy for packagers just in case.\n\nSince a few releases have gone by without any complaints, remove conditional compilation for lz4." }, { "commit": "355e27d69ce80c87fb9458f8240699ec5ce5dc5e", "date": "2024-11-02 11:50:30 -0400", "subject": "Exclude function logging macros from coverage reporting.", "body": "It makes sense to exclude these from reporting since they are always covered when the function is covered and they are purely for debugging." }, { "commit": "fc5fdb8d358ae09d398922c1a8df44f47c8b40d6", "date": "2024-11-02 11:36:56 -0400", "subject": "Fix incorrect log macros.", "body": "These led to incorrect log output in some cases. Probably not very noticeable, but definitely wrong.\n\nAlso fix types in the log macro comments." }, { "commit": "ed390780685adf1a463c74ee1cd3dad1b3dd9895", "date": "2024-10-29 14:15:12 +0200", "subject": "Enable float-equal and init-self compiler warnings.", "body": "No code changes were required to enable these warnings." }, { "commit": "98670aee8927e4f7125318faeb1f18c4facf51e3", "date": "2024-10-28 19:13:38 +0200", "subject": "Enable redundant-decls compiler warning.", "body": "Warn if anything is declared more than once in the same scope, even when the extra declaration is valid and changes nothing. This is primarily useful for catching missing header ifdef barriers.\n\nMove the environ variable into config/parse.h since it must be declared by us and we use it multiple times." }, { "commit": "9ec9b9d120c8b1113dfad734a006e0f91427cf3f", "date": "2024-10-28 17:49:55 +0200", "subject": "Enable missing-prototypes compiler warning.", "body": "Warn if a global function is defined without a previous prototype declaration. This helps detect when a function that should be static is accidentally declared extern.\n\nMost of the changes are to add missing header files so functions can see their declarations.\n\nIn a some cases functions that should have been static were marked as such. There were only five of these in the core but every little bit counts.\n\nLastly, it was necessary to suppress the warning in the postgres test modules where the function declarations are not available. This is fixable by aligning the module with the auto-generated code in core, but is not a priority." }, { "commit": "21f7d2d3a840852d7964c5684dd04f29ce9cc68a", "date": "2024-10-28 14:21:38 +0200", "subject": "Enable cast-qual compiler warning.", "body": "Warn whenever a pointer is cast so as to remove a type qualifier from the target type. For example, warn if a const char * is cast to an ordinary char *.\n\nMost of the changes for this are fairly rote: just add a const qualifier where needed. In some cases functions needed to be reworked to return non-const where before they had returned const and then cast it back to non-const. None of these patterns appeared to be bugs, but they were certainly misleading.\n\nSome cases (especially excvp() and calls to bz2) could not be fixed because of how functions out of our control are defined. In those cases the warnings have been suppressed and a comment added to detail the exception. This was also done a few places in the tests." }, { "commit": "bb7e1724a95a4bf087bae6782ae255e1583bc9b7", "date": "2024-10-28 13:21:35 +0200", "subject": "Update String, Buffer, and Variant constants to work with cast-qual.", "body": "These three objects can be created as constants at compile time using specialized macros. Unfortunately since the values assigned are also const, cast-qual complained about the cost qualifier being lost.\n\nFix this by creating new structures to be used just for creating these constants. This is not ideal due to the need to keep the duplicated structures in sync, but in practice these structures are almost never modified. Testing should catch any out of sync structures and this feature is valuable enough to keep even though in theory there could be memory safety issues. In practice the APIs prevent const objects from being used in an unsafe way and testing provides a fair assurance of safety. Writing to these consts would be a fatal error even if it did not cause a segfault.\n\nIdeally, we would be able to use warning suppression in these macros to avoid the extra struct, but due to the way they are used it is not possible to add the required pragmas (even using _Pragma).\n\nFinally this construction makes it obvious that something special is being done, rather than it being under the covers." }, { "commit": "a981ef08d70fdd5faf8052a75d2e066ebcca7057", "date": "2024-10-28 13:20:19 +0200", "subject": "Fix misplaced volatile qualifiers in GCS authorization JWT generation.", "body": "The volatile qualifiers should be on the pointers that are stored on the stack, not on the structures that are allocated by OpenSSL functions.\n\nAlso remove all the casts that were required when volatile was in wrong place." }, { "commit": "118f93baab39339a2c22deeb360f28370e139dd6", "date": "2024-10-28 13:07:35 +0200", "subject": "Update Cirrus MacOS runner to OpenSSL 3.", "body": "OpenSSL 1.1 is no longer available in HomeBrew." }, { "commit": "48f511dad339ef9357e00f7bd8e2bf4b7c800dd4", "date": "2024-10-21 13:56:04 +0300", "subject": "Remove support for PostgreSQL 9.4.", "body": "Per our policy to support five EOL versions of PostgreSQL, 9.4 is no longer supported by pgBackRest. Remove all logic associated with 9.4 and update the tests.\r\n\r\nThis includes a small fix in infoPg.c to allow backup.info files with old versions to be saved. This allows expire to function when old versions are present. Even though those older versions cannot be used, they can be expired.\r\n\r\nTests for 9.4 are left in the expire/info tests to demonstrate that these commands work with old versions present." }, { "commit": "89c9baba72fc99616083a7adc4be2c3e2bf9e4dd", "date": "2024-10-21 13:32:35 +0300", "subject": "Remove autoconf/make build.", "body": "2.54.0 is the last release to support the autoconf/make build. Remove the autoconf/make build to reduce maintenance going forward." }, { "commit": "4125f726a857629c4500f82a3989dc1b69977500", "date": "2024-10-21 09:11:49 +0300", "subject": "Begin v2.55.0 development." }, { "commit": "1e5f46b7d9626eadfcf99d08cb984a74085ee0c8", "date": "2024-10-21 08:57:43 +0300", "subject": "v2.54.0: Target Time for Versioned Storage" }, { "commit": "7effca0d0995844b3b3b89d568c3c8cee7b9ef1e", "date": "2024-10-20 20:28:54 +0300", "subject": "Update CI tests for ppc64le/s390x architectures.", "body": "The action suddenly broken and this appears to be the best work around according to https://github.com/uraimo/run-on-arch-action/issues/155.\n\nUnfortunately the tests take almost twice and long to run, probably because the container needs to be built from scratch." }, { "commit": "9ba2db017348d304f96ce89592fc01f29197e136", "date": "2024-10-16 12:41:26 +0300", "subject": "Clarify source for data_directory." }, { "commit": "f76da03eb674af8c05d90abf5c2a7f246f8c9c01", "date": "2024-10-16 12:14:12 +0300", "subject": "Update CI containers to include newest PostgreSQL patch releases." }, { "commit": "bd0480ca7d6e5ee56a53a2887d893c41f1a8d132", "date": "2024-10-16 11:07:21 +0300", "subject": "Move SFTP test off of PostgreSQL version tested by 32-bit.", "body": "There have been occasional SFTP authentication failures on 32-bit. We are planning to drop 32-bit support so it does not seem worth chasing these errors down and they are likely timing issues anyway." }, { "commit": "65dfe2407d0c358209a01f06cd4326d0a80d1acb", "date": "2024-10-12 10:06:40 +0300", "subject": "Retry socket bind.", "body": "In the case of a rapid restart it is possible that the socket may not be immediately available, so retry until it becomes available.\n\nThis is particularly useful for testing where sockets are bound and released very rapidly." }, { "commit": "ed72c6f9a1b156b64e05a8fc5e236e6568657d0a", "date": "2024-10-11 12:05:23 +0300", "subject": "Render command help summaries the same as option help summaries.", "body": "Option help summaries do not have initial capitals (except in special cases) and final periods so it makes sense to render the command summaries the same way.\n\nUse the same function for both so they are consistent." }, { "commit": "eb2f279a2998d1ed893c5d8984590ab4997a5f32", "date": "2024-10-11 11:53:34 +0300", "subject": "Better logic for deciding when a summary should be lower-cased.", "body": "The old logic would lower-case PostgreSQL which looked odd. This should be more robust for anything that looks like a proper name of acronym." }, { "commit": "70bda2cfb21130eb589a87cb419fb5f22a71c78a", "date": "2024-10-10 11:17:35 +0300", "subject": "Improve SFTP error messages.", "body": "The numbers by themselves weren't very informative and required looking into the libssh2_sftp.h header file for the definition." }, { "commit": "c8ccaaa755993e3fd5e87148bd0a991acb3050c3", "date": "2024-10-10 09:48:43 +0300", "subject": "Fix PostgreSQL query performance for large datasets.", "body": "The asynchronous logic used to implement the query timeout was misusing PQisBusy(), which caused the wait handler to throttle the consumption of command results. It could introduce a large delay on a query up to `db-timeout` because of the back-off sequence.\r\n\r\nFollowing the recommendation of libpq, fix by polling the client socket for data availability and then continue consuming results and checking for command busyness." }, { "commit": "33fa3965616b1432e5d1d59c489302fa4891a39e", "date": "2024-10-09 17:57:52 +0300", "subject": "Refactor Wait object to expose remaining wait as waitRemains().", "body": "This is useful for code that has its own wait mechanism, e.g. poll(), but still needs a way to track overall time elapsed.\n\nTo keep it simple waitRemains() is called by waitMore()." }, { "commit": "77ae753ef59bd40b9867be21e8519093786c3b85", "date": "2024-10-09 17:48:00 +0300", "subject": "Allow fdReady() shim to be run only once with a specified return value.", "body": "This allows more targeted testing than the prior (still supported) approach." }, { "commit": "6ac4e8dd78f16e93300f7747c4bc8fa8e1a0558b", "date": "2024-10-09 10:46:45 +0300", "subject": "Move fd module shim to io module test.", "body": "The shim is needed in an earlier test and this is a more logical place for it anyway." }, { "commit": "1020bc117ab2e116abb41eebd40216726a3d2561", "date": "2024-10-08 18:15:34 +0300", "subject": "Shuffle PostgreSQL versions between test containers.", "body": "Ubuntu 20.04 has been having consistent errors starting PostgreSQL 10 so move 9.5 to this container instead. An older version makes sense with an older distro.\n\nAlso move PostgreSQL 12 from RHEL 8 since this version will be EOL soon." }, { "commit": "b50ad48fd44ba958a6a71ac0c5fe1b113968e577", "date": "2024-10-08 11:01:55 +0300", "subject": "Use systemctl replacement to generate documentation.", "body": "Containers are notoriously unfriendly to systemctl (really systemd) but we prefer to use systemctl to make our documentation as accurate as possible. This replacement performs all the functions of systemctl without requiring systemd, which great simplifies container configuration and allows the documentation build to work in more environments." }, { "commit": "1ebdcd611d1724feedb23f9de854819ec5499c9f", "date": "2024-10-08 10:28:01 +0300", "subject": "Use output path for temp config files during documentation build.", "body": "Putting the files in the user's home directory was too Linux-centric." }, { "commit": "b3ca2e34823b28862d999f87c4d7346707952a5b", "date": "2024-10-05 09:41:50 +0300", "subject": "Correctly display current values for indexed options in help.", "body": "The current value for an indexed option was always for the first index, e.g. pg1-path. This is likely legacy from before indexing was added (and faithfully copied over from Perl, apparently).\r\n\r\nFix this by enumerating the current values in the option help and displaying in the option list when more than one value exists." }, { "commit": "047e3d0ed95d882b00e2da1640e78e3ba06e1d51", "date": "2024-10-02 19:49:15 +0300", "subject": "Finalize catalog number for PostgreSQL 17 release." }, { "commit": "9f97269678091cc43a15920cf8eab34b231ae431", "date": "2024-10-02 18:02:15 +0300", "subject": "Use `uname -m` to get architecture for RHEL package in user guide." }, { "commit": "d4ed1884ea5ce54e05f84ab1aacdfc0385538c06", "date": "2024-09-24 12:33:19 +0300", "subject": "Reduce logging for help functions.", "body": "Full debug/trace logging in production is unlikely to be useful but does use space in the binary.\n\nReduce logging to be useful for testing but not be deployed in production." }, { "commit": "cd7711f5e6306778427ced8b796da09398e08015", "date": "2024-09-20 15:58:18 +0300", "subject": "Update Cirrus CI MacOS running to Sonoma." }, { "commit": "278319eba3444070123d3f0f277be9234604d937", "date": "2024-09-20 15:49:26 +0300", "subject": "Add performance tuning section to user guide.", "body": "We frequently tell users to enable to these options but they are spread through the documentation and not at all obvious. Hopefully putting them in the quick start will make them more visible and also provide an easy place to link." }, { "commit": "f73c6382752ca8cad43a412a03b6b5f34cf94984", "date": "2024-09-20 15:21:38 +0300", "subject": "Skip command-line options in configuration reference.", "body": "Options that are only valid on the command-line should not appear in the configuration reference because it implies that they can be added to pgbackrest.conf, which is not the case.\n\nMost command-line options were already excluded because they lacked a section, but a few were slipping through." }, { "commit": "b19134dccd5d0dab87b5d45b443f481df00370b1", "date": "2024-09-17 10:51:21 +0300", "subject": "Allow repositories on versioned storage to be read at a target time.", "body": "This feature allows the archive-get, info, repo-get, repo-ls, restore, and verify commands to operate at a point-in-time on versioned buckets in Azure, GCS, and S3. This allows recovery even if a repository has been accidentally or maliciously deleted or corrupted." }, { "commit": "46c1297be344909e900be596a865064233da9f87", "date": "2024-09-09 15:47:23 +0300", "subject": "Remove limitation on reading files in parallel during restore.", "body": "This restriction prevented multiple files being read from a remote simultaneously, which was not supported by the protocol. Although the limitation only applied to remotes, it was applied in all cases for testing purposes and because we planned to fix it.\r\n\r\nProtocol command multiplexing added in df8cbc91 allows files to be read simultaneously from a remote so this restriction is no longer required.\r\n\r\nNote that there is a test for this condition since the prior code had coverage. It might be tricky to ensure that test doesn't go away, but in general we should have enough tests in place to ensure simultaneous reads function as expected." }, { "commit": "a42629f87ab70ffaee8d1241eb50c5ea7154a87d", "date": "2024-09-09 12:56:52 +0300", "subject": "Allow requested standby backup to proceed with no standby.", "body": "Add a \"prefer\" value to the backup-standby option to allow the backup to proceed when no standby is found. Note that this will not help if the standby is responding but fails to sync with the primary after the backup has started.\r\n\r\nIntroduce a new option modifier, bool-like, that allows a boolean option to be converted to a string or string-id option while still allowing the option to act like a boolean on the command-line, e.g. --no-backup-standby." }, { "commit": "b454b9e403526b1f352df964d9206c27d2397df3", "date": "2024-09-05 11:38:42 +0300", "subject": "Save backup.info only when contents have changed.", "body": "This prevents backup.info from being saved again when expire does not make any changes.\n\nMore importantly, as we look to support versioning on object stores, it will be much easier to determine a good point-in-time to use for restore if there are no extraneous saves of backup.info." }, { "commit": "83802ded8c5560a097757c420d0753c50d0edbb3", "date": "2024-09-01 12:51:27 +0700", "subject": "Remove invalid const keywords from info/infoBackup module.", "body": "In these functions infoBackup was marked as const even though it was modified in the function. This was allowed by the compiler because the infoBackup struct was not being directly modified but it still goes against our coding conventions." }, { "commit": "95a4b9e10a3f7a26ebaae22fc6abd79cee14189a", "date": "2024-09-01 11:21:56 +0700", "subject": "Refresh web-id token for each S3 authentication.", "body": "The token file pointed to by the AWS_WEB_IDENTITY_TOKEN_FILE environment variable was read once at startup, but for long operations the token might expire before completion.\r\n\r\nInstead read the token on each S3 authentication so the current token is always used." }, { "commit": "d204bac75d3020251c52c9b7892b47c5b3e7f018", "date": "2024-08-30 16:23:55 +0700", "subject": "Refactor storage/remote module to move more repository storage tests.", "body": "Move tests missed in 3b3886f3." }, { "commit": "0b4371f3ecead992e6140251c658e9fed56a8f37", "date": "2024-08-30 16:02:04 +0700", "subject": "Update comment in storage/list module.", "body": "This aligns better with other comments in the area." }, { "commit": "3b3886f3b81776adfe3fd4970e977f56d0c2b02f", "date": "2024-08-30 16:00:07 +0700", "subject": "Refactor storage/remote module to move repository storage tests.", "body": "Move tests that use functions that are not valid for non-Posix repositories to pg storage.\n\nThis allows for a test storage driver that only implements functions required for repositories." }, { "commit": "b3fed2cfcf4562dde52be28c35e20d7d38171564", "date": "2024-08-30 15:44:43 +0700", "subject": "Simplify command/repo tests.", "body": "Reduce the number of tests that use links and special files. This simplifies future versioning tests." }, { "commit": "94e21893e8b43ef9080246feba5fa89d39e67f0c", "date": "2024-08-30 15:39:03 +0700", "subject": "Add hrnSleepRemainder() to time test harness.", "body": "This function replaces some ad hoc implementations in the tests." }, { "commit": "c40dd5ae41ae800a031715a60c91dd839b39905d", "date": "2024-08-28 11:07:52 +0700", "subject": "Improve reporting of partial reads in server test harness.", "body": "Previously partial reads would be reported as an EOF which was not very helpful.\n\nAlso update error formatting to make requests easier to compare." }, { "commit": "2ce01e57d3f2f294efe9a9ac6c7d357342796a13", "date": "2024-08-20 20:57:06 +0700", "subject": "Refactor interface/driver getters for IoRead/Write and StorageRead/Write.", "body": "Add getters where needed and remove the unused driver member from the StorageRead/Write objects. The new getters are only required for testing but they don't compromise the core code." }, { "commit": "a71d88481c60178de1c10dac2a0d306959675999", "date": "2024-08-20 20:34:40 +0700", "subject": "Improve TEST_STORAGE_GET() to allow testing of missing files.", "body": "Previously a missing file would always throw an error. Now there is the option to return NULL for missing files." }, { "commit": "4eab64fcd926c2b5741d4b44f8b9d9144bf1e3a5", "date": "2024-08-20 20:14:32 +0700", "subject": "Improve file removal performed after each unit test.", "body": "The rm would miss files beginning with a dot which are being used going forward for the versioning test driver.\n\nInstead use find since it will delete all files." }, { "commit": "f6aaa3672b851fa8e70942f332a633f3b42babb7", "date": "2024-08-20 11:03:24 +0700", "subject": "Merge v2.53.1 release." }, { "commit": "2b29fe9337e9b2cbde26915c8bfb999b4010560c", "date": "2024-08-17 12:58:55 +0700", "subject": "Move getEpoch() to cvtZToTime() and refactor.", "body": "This function will be needed elsewhere going forward and in any case it makes sense to move this out of the restore module.\n\nRefactor to avoid dependency on a regular expression." }, { "commit": "60f96429b8f58827e3f2d6cd96dc1f6e1725c90c", "date": "2024-08-16 10:56:07 +0700", "subject": "Fix segfault on delayed connection errors.", "body": "Connection errors could cause a segfault if the error was delayed enough to pass the initial call to sckClientOpenWait() and the error was instead thrown by a subsequent call to sckClientOpenWait(), which was not correctly initializing a variable required for error handling.\r\n\r\nWhile this can be produced fairly easily in a test environment, I was unable to craft a unit test to hit this exact condition, probably due to timing. The new code still has full coverage and I added several comments to help prevent regressions." }, { "commit": "04ef43d9ed3e1900a85f0df9475ce6dc04dc9c5f", "date": "2024-08-13 13:45:24 +0800", "subject": "Skip local repository duplicate check for SFTP.", "body": "This check does not make sense for SFTP since the repository will never be local." }, { "commit": "80c9b3001c8f4689837d46783d348a2c839645c0", "date": "2024-08-13 11:53:12 +0800", "subject": "PostgreSQL 17 support.", "body": "This release changed the control and WAL format, which is very unusual for a beta. Update control and WAL versions/structs to match." }, { "commit": "ed9b0c260a6e1f05a17d5d2b86c274afdd2d1b05", "date": "2024-08-13 11:43:05 +0800", "subject": "Fix permissions when restore run as root user.", "body": "When restore was run as the root user the pg_control file would end up with root permissions. This bug was introduced in e634fd8. Fix this by directly overwriting the pg_control temp file rather than doing an atomic write that updates permissions. Also update other parameters to more closely match similar calls.\r\n\r\nThere was also an adjacent error where restore as the root user would fail if the base path did not exist. Fix this by ignoring the missing path since it will be created later and this logic is just trying to find an alternate user for permissions if the user in the manifest does not exist." }, { "commit": "57663536493bfa917e4b1c37d221697a0ddb7a73", "date": "2024-08-02 15:47:30 +0700", "subject": "Use Posix driver to complete dummy storage driver.", "body": "Instead of stub functions, use the Posix driver to make the dummy storage driver interface more functional." }, { "commit": "b306f83493cc859ad15c8f175fc6db2321b79340", "date": "2024-07-31 18:53:02 +0700", "subject": "Summarize backup reference list for info command text output.", "body": "The backup reference list can be very long so it seems better to summarize the list by default for text output and keep the full list when --set is specified." }, { "commit": "c42d484c9d76dfb2baa7b9b462870441e8ad37a0", "date": "2024-07-30 12:48:32 +0700", "subject": "Fix typo in user guide." }, { "commit": "8d6bceb5416a702d010fd5db4be9d6dc294cb1d9", "date": "2024-07-23 16:39:02 +0700", "subject": "Refactor config parse to remove none command, add version/help options.", "body": "The none command was a bit confusing since it was only valid when parsing failed but still needed to be added to various switches and logic. Replace with cfgInited() which should make it clearer what state configuration is in.\n\nMake the default command help and convert --version and --help to real options.\n\nCombine version and help output into a single function to simplify processing in main.\n\nAdditional reformatting and a bit of refactoring." }, { "commit": "6c757366c2c7221a9fd571495fa2ec82b4d4da2f", "date": "2024-07-23 11:50:12 +0700", "subject": "Simplify main() functions in core, doc, and test.", "body": "This refactor simplifies the main() functions and puts the more commonly run commands first.\n\nFor core main() also remove code duplication in local/remote role handling." }, { "commit": "faee89206776287a2ba000c43f28ac6479d33689", "date": "2024-07-23 11:10:54 +0700", "subject": "Do not allow help for command roles other than main.", "body": "Previously requesting help for roles other than main would result in a segfault since help.auto.c.inc does not include such help.\n\nPrevent this by erroring when a user requests help for roles other than main.\n\nThis is a bug but it is unlikely to have ever been seen in the field and in any case it has a very low severity since it only impacts a corner-case for the help command." }, { "commit": "ee70c2e26e63ee6299de5a9ca20a3d1ddea036b1", "date": "2024-07-22 14:17:47 +0700", "subject": "Remove internal repo-create command.", "body": "This command was used by the Perl integration tests to create buckets for storage types that required it. Now that the integration tests are written in C they can simply use the same code to create buckets.\n\nThe command was also used in the documentation but there it seems more appropriate to use the corresponding vendor CLI." }, { "commit": "55ca41e13784ef5fdafdb02444969ceb1a699744", "date": "2024-07-22 12:43:54 +0700", "subject": "Update protocol and storage/remote modules to recent coding standards.", "body": "Add const as appropriate." }, { "commit": "df8cbc91c348bc2f746767c87ac5626460fb6f39", "date": "2024-07-22 11:48:32 +0700", "subject": "Protocol command multiplexing.", "body": "Previously it was not possible to read or write two files at the same time on the same remote because the protocol was entirely taken over by the read or write command. Multiple reads are required to make restores efficient when a list of bundled files is being read but blocks need to be retrieved from a separate file or a different part of the same file.\r\n\r\nImprove that situation with sessions that allow related commands to be run with shared state. Also break read/write into separate requests (rather than pushing all data at once) so they can be multiplexed.\r\n\r\nThe disadvantage for read/write is that they now require more back and forth to transfer a file. This is mitigated by sending asynchronous read/write requests to keep both server and client as busy as possible. Reads that can fit into a single buffer are optimized to transfer in a single command. Reads that transfer the entire file can also skip the close command since it is implicit on end-of-file.\r\n\r\nThese changes allow the protocol to be simplified to provide one response per request, which makes the data end message obsolete. Any data sent for the request is now added to the parameters so no data needs to be sent separately to the server outside the request parameters.\r\n\r\nAlso update the Db protocol to use the new sessions. Previously this code had tracked its own sessions." }, { "commit": "e7f4e8d8004f609ab0edda5d7183ccb3e6c55b6a", "date": "2024-07-22 09:42:33 +0700", "subject": "Begin v2.54 development." }, { "commit": "6e8a45f650e303fe74a1e34ee8e76a193dbc6c4a", "date": "2024-07-22 09:33:31 +0700", "subject": "v2.53: Concurrent Backups" }, { "commit": "dea48be06dfcd50f48b205f65e45e4dbefa7c37a", "date": "2024-07-21 17:01:23 +0700", "subject": "Fix incorrect examples in doc/test help.", "body": "The help for help was copied from core but the examples were not updated.\n\nUpdate the examples to be appropriate for doc/help." }, { "commit": "32a6dd6c3d1becdbad9ae14df83daf231a84f860", "date": "2024-07-21 16:57:35 +0700", "subject": "Improve config file handling in doc/test config load.", "body": "Since 1141dc20 it has been possible to request that cfgParse() skip loading the config file. Use this logic to replace the code used to ignore config files in doc/test config load." }, { "commit": "ec3e387bb73826c30b32bf0221acb2d56f23f1c5", "date": "2024-07-18 11:19:25 +0700", "subject": "Clarify archive-push multi-repo behavior.", "body": "Clarify that archive-push will not be able to push ahead in other repositories if one is failing unless archive-async in enabled." }, { "commit": "c85191e02486d4e9f5ea34ce3bfe7243ddf74831", "date": "2024-07-16 09:45:22 +0700", "subject": "Update Minio test/documentation container version." }, { "commit": "d8ff89ae758314c8a0f006b7a92097c35534d0ff", "date": "2024-07-15 11:53:57 +0700", "subject": "Update EOL Debian 10 to Debian 11/Ubuntu 20.04 in CI.", "body": "Typically we use the oldest Debian/Ubuntu to run 32-bit unit and integration tests. However, 32-bit is no longer fully supported by Ubuntu (multiple packages we need are missing) and apt.postgresql.org no longer packages for any 32-bit version.\n\nTo address these changes, do 64-bit integration testing on the oldest Debian/Ubuntu (currently Ubuntu 20.04) and 32-bit unit/integration testing on the oldest Debian (currently 11) using the included version for integration testing." }, { "commit": "ce2493c3f6718cfa9209665019d56dde36b81b75", "date": "2024-07-15 11:22:13 +0700", "subject": "Remove obsolete vm constants.", "body": "The VM[2-4] constants do not serve a purpose anymore (removed in 794c5771) and just make the mappings more complicated, so remove them." }, { "commit": "c84ab4914f33dbf0cb762cfb5a552ee39b73eb1e", "date": "2024-07-15 11:12:53 +0700", "subject": "Remove references to mock integration tests.", "body": "Mock integration tests were removed in d41b21c8 but CI was still trying to run them, so remove from CI.\n\nAlso rename mock to integration in test unit tests to avoid confusion in the future." }, { "commit": "dfb620b0b8cf884ca486b413f4cd162e258b2fc5", "date": "2024-07-11 10:54:20 +0700", "subject": "Support IP-based SANs for TLS certificate validation.", "body": "The prior SAN code only recognized DNS-based SANs, which meant that it would not properly validate if using an IP-based SAN. \r\n\r\nAdd support for IPv4 and IPv6 SANs with exact matching only.\r\n\r\nThis simplifies testing when certificate generation tools have trouble generating a DNS:1.2.3.4-style SAN, preferring to include the SAN as IP:1.2.3.4." }, { "commit": "d295156dd38c88c9d71e566509426cf91322e326", "date": "2024-07-09 16:07:40 +0700", "subject": "Improve command/role parsing.", "body": "Reduce redundancy by improving cfgParseCommandId() to work when a command role is present. This way the function does not need to be called twice.\n\nAlso, remove the use of StringList in cfgParse() since checking for a colon is faster and saves memory allocations. Modify cfgParseCommandRoleEnum() to accept char * since a String is no longer produced for the role name." }, { "commit": "ccae60940241b30e8bc3e405936b6ba68857930c", "date": "2024-07-09 11:34:13 +0700", "subject": "Add hint to check SFTP authorization log.", "body": "In the case of authorization failures there many be valuable information in the log." }, { "commit": "cf8625d24bd953ad1ed448c32b937584009e5b6b", "date": "2024-07-09 11:17:25 +0700", "subject": "Require OpenSSL >= 1.1.1.", "body": "Versions below 1.1.1 are quite old and no longer need to be supported.\n\nAlso add a missing const in tlsAsn1ToStr()." }, { "commit": "b71d4b53d68800daceb4b6dcb717862cbe5757dc", "date": "2024-07-09 10:48:27 +0700", "subject": "Auto-generate code before building test binary.", "body": "Errors in code generation can cause the test binary build to fail and then it is not possible to see the generated code.\n\nInstead, generate code first so any errors can be seen and analyzed." }, { "commit": "66d3cd42b5a07edaf75b0f365088dc7e202d0573", "date": "2024-07-09 10:45:47 +0700", "subject": "Improve error messages for allow range and allow list options.", "body": "Create mappings between integer, size, time, and stringid option values and their string equivalents. This allows for better error messages and means that the mappings do not need to be stored with defaults, allow lists, etc." }, { "commit": "aadfb54046e52eeedc38750eb91f3e719e22d758", "date": "2024-07-09 10:09:55 +0700", "subject": "Add units to all option sizes.", "body": "Some sizes were missing units. Add units to make it clear that they are sizes in messages." }, { "commit": "6f5066b0b63c5f89e39a8930d29ab725c381221c", "date": "2024-07-07 17:06:44 +0700", "subject": "Fix overly long lines." }, { "commit": "d6f0bf88af8b39492aa8e20c09bdf30260e2eb2f", "date": "2024-07-04 16:53:07 +0700", "subject": "Fix SFTP renaming failure when file already exists.", "body": "Update error handling for libssh2_sftp_rename_ex() in storageWriteSftpClose() when a file already exists. \r\n\r\nThe SFTP servers used during development and testing never returned LIBSSH2_FX_FILE_ALREADY_EXISTS, rather they returned LIBSSH2_FX_FAILURE when a file already existed. However, it is clear that some SFTP servers use LIBSSH2_FX_FILE_ALREADY_EXISTS so add support." }, { "commit": "edd61636a98d193b7c0634b5a4444527132959e0", "date": "2024-07-04 16:22:17 +0700", "subject": "Allow backups to run concurrently on different repositories.", "body": "The prior locking only allowed one backup per stanza, which was required by PostgreSQL <= 9.5 and didn't present a problem when only one stanza could be created.\r\n\r\nNow that multiple stanzas are allowed relax this restriction so that backups can run concurrently for PostgreSQL > 9.5. To do this, update the locking to be per stanza and repo rather than per stanza. Remotes are not aware of the repos that require locking so send an explicit list of files to be locked to the remote. Also remove the advisory lock for PostgreSQL > 9.5.\r\n\r\nFor info output the running backups are combined for progress output in order to avoid changing the JSON format. It definitely makes sense to have per repo progress as well but that will be left for a future commit." }, { "commit": "3a2266f327c340ff1133a6a2398429c844b5755d", "date": "2024-07-04 15:42:09 +0700", "subject": "Display time option defaults and allowed values with appropriate units.", "body": "Similar to size options in 038abaa7, time option defaults and allowed values were displayed in seconds, which could be confusing when the values were large.\n\nThe time options were not updated in 038abaa7 because it required removing the ability to do fractional seconds, e.g. 0.5 seconds. In theory this could cause breakage for users but it seems really unlikely. Fractional seconds are used in tests, however, so the tests have been changed to use milliseconds where required, e.g. 500ms." }, { "commit": "df469471e3afa91212967fc3bca7877a1a8830f3", "date": "2024-07-04 09:28:57 +0700", "subject": "Fix invalid default inherited in job-retry-interval.", "body": "This default was being mistakenly inherited from job-retry and would set job-retry-interval to 1 second for archive get and push. In practice this did not matter since archive get and push default to one retry and the first retry interval is always 0, but it still makes sense to fix it to correct the behavior when the value of job-retry is changed." }, { "commit": "8b82641d87de7fd749414737f62289fb2648e608", "date": "2024-07-01 11:15:14 +0800", "subject": "Shorten enums used in parse.auto.c.inc.", "body": "Rather than using the full enum just use the part of the enum that is unique. This makes the output a bit more readable by removing the repetitive elements. The prefix for each enum is built into its macro." }, { "commit": "cfd4fb2c7da7b0fc113b6518a054eab29d0ef34f", "date": "2024-06-30 16:11:11 +0800", "subject": "Remove PARSE_RULE_OPTION_GROUP_MEMBER() macro from parse.auto.c.inc.", "body": "It is simpler to just set group = true in the PARSE_RULE_OPTION_GROUP_ID(), so do that." }, { "commit": "7d540730941994d3cb630c695e9f672e047d8e7f", "date": "2024-06-25 16:45:47 +0800", "subject": "Default log-level-stderr option to off.", "body": "Writing warnings and errors to stderr by default leads to error messages being lost when the user does not correctly redirect stderr while generating logs for analysis. This happens so often that it seems worth changing the default to increase the quality of the logs we receive.\r\n\r\nIf the user has explicitly set log-level-stderr then there is no change in behavior." }, { "commit": "1094aecab563e8c1601ae80caefb2e4247db87d4", "date": "2024-06-25 16:16:55 +0800", "subject": "Require compiler support for __builtin_clzl() and __builtin_bswap64().", "body": "These functions will be useful for optimizing varint-128 functions.\n\nRequire them in the meson build before adding new code in case there are problems with packaging." }, { "commit": "0c32757fd9269a18b827fa33230d2997c98571a0", "date": "2024-06-25 16:04:02 +0800", "subject": "Require the lz4 library in the meson build.", "body": "We would like to use lz4 for protocol compression instead of gz but first we need to make sure this is not going to cause a problem for packaging.\n\nTo do this make lz4 required in meson but make no changes to the code so this is an easy revert for packagers if there is an issue." }, { "commit": "acb9b6ccbeb78391e19b146488093c83d0ac3ab3", "date": "2024-06-25 15:54:57 +0800", "subject": "Increase width of unsigned integration in performance/type module.", "body": "The prior int was too small to scale up as much as we need for performance testing." }, { "commit": "6495a46ca3e08f867efd677aecd29806e9e9f753", "date": "2024-06-25 15:46:07 +0800", "subject": "Fix --run and --scale options for unit tests.", "body": "These were broken while code was being migrated to C and went unnoticed because the options are generally only used when doing performance testing.\n\nThe C code can only take one --run param so add a check for that in test.pl." }, { "commit": "cfb8aa202ee972579697db2cc8d5d8ef111f0022", "date": "2024-06-21 13:29:17 +0800", "subject": "Add remote locks for stanza commands missed in 31c7824a.", "body": "31c7824a should have added remote locks when the commands were modified to run remotely. This is unlikely to have caused issues since these commands are generally not run concurrently with backup/expire but having the locks is safer." }, { "commit": "b9a9ef2d5fa22c4e257f4d1befbc067029570211", "date": "2024-06-21 11:17:42 +0800", "subject": "Clarify when code generation is run when modifying config.yaml.", "body": "The contributing guide indicated that this happened at compile time but in fact it happens when test.pl is run." }, { "commit": "bcda7f8c7ee9823880bd1a9b5e65075df1102652", "date": "2024-06-20 13:06:04 +0800", "subject": "Optimize assignment of command/option rules.", "body": "Assign the rule to a local variable to make subsequent assignments more efficient.\n\nAlso fix a missing const." }, { "commit": "a0a5f2300cf771095a0a7fc309d40c1f27c899a6", "date": "2024-06-19 10:33:46 +0800", "subject": "Replace strftime() with cvtTimeToZP() and strNew/CatTimeP().", "body": "These functions produce cleaner code and hide implementation details." }, { "commit": "cec486b6dd02b6a755142ad4bee322fb02ff0d23", "date": "2024-06-18 11:42:59 +0800", "subject": "Refactor backup to use flag for backup standby state rather than option.", "body": "Using the option adds a bit of complexity -- it is simpler just to use the state of the standby object to determine if backup from standby is enabled." }, { "commit": "270dce41b6fff0d0ad0769b0ea5df0011140f0b9", "date": "2024-06-18 10:43:54 +0800", "subject": "Refactor lock module.", "body": "Refactor the lock module to split command-specific logic from the basic file locking functionality. Command specific logic is now in command/lock.c. This will make it easier to implement new features such as repository locking and updating lock file contents on remotes.\r\n\r\nThis implementation is essentially a drop-in replacement but there are a few differences. First, the lock names no longer require a path (the path is added in the lock module). Second, the timeout functionality has been removed since it was not being used." }, { "commit": "ad7377c75b2769d2907482de139fb21f779ae674", "date": "2024-06-16 11:55:04 +0800", "subject": "Fix issue with files larger on the replica than on the primary.", "body": "If a file on the primary was larger than on the replica then the next diff/incr backup would store the primary size instead of the replica size when block incremental was enabled. On the next diff/incr backup this would lead to a repo size must be > 0 for file error when validating the manifest.\r\n\r\nFix this by limiting copy based on sizeOriginal rather than size so size can be set to the value expected to be stored in the manifest. As a bonus sizePrior is no longer needed since size can be used for the same purpose." }, { "commit": "2ec99ca4d9e40dcae628c2a435c8242bdcefabc2", "date": "2024-06-12 16:08:17 +1000", "subject": "Specify test images that get valgrind installed.", "body": "This means valgrind is no longer built from source, which caused image builds to run for a very long time.\n\nValgrind is only required in a few images for testing." }, { "commit": "886bb281f69bb22228eb41760ef8ad60cdd820db", "date": "2024-06-11 15:20:04 +1000", "subject": "Migrate CentOS 7 integration tests to Rocky 8.", "body": "CentOS 7 will be EOL on June 30 and since there is no CentOS 8 migrate instead to Rocky 8." }, { "commit": "4ac3b82c99ff3bd48d2cc74651a47ce072ac0022", "date": "2024-06-11 12:08:52 +1000", "subject": "Allow alternative WAL segment sizes for PostgreSQL <= 10.", "body": "Alternative WAL segment sizes can be configured in PostgreSQL <= 10 with compile-time options. We have not allowed these before since it was not a well-tested feature of PostgreSQL.\n\nHowever, forks such as Greenplum allow alternative WAL segment sizes at initdb time (which are presumably well-tested) so it makes sense to allow it.\n\nSince the PostgreSQL versions in question are all EOL it is not important to have this restriction in place anymore." }, { "commit": "e8b965756c380953d4312c26c81489cc9787d1cf", "date": "2024-06-09 11:39:58 +1000", "subject": "Add unit tests for backup from standby with block incremental.", "body": "These tests are important for an upcoming bug fix related to differing sizes of a file on a primary vs standby.\n\nThe test that demonstrates the bug cannot be included here since it causes a test failure, but this commit introduces the infrastructure and one test to guard against a regression in the bug fix." }, { "commit": "cf478bc75352486b5aed555404f912c6fdea92a0", "date": "2024-06-08 13:00:53 +1000", "subject": "Improve efficiency of incremental manifest build.", "body": "Move the file.copy check before the manifest lookup to save a lookup and (probably) find for zero-length files when bundling.\n\nThis also removes a layer of indentation which helps with readability." }, { "commit": "48823b6cd320373b1b8de52d6ff931094fbe43f7", "date": "2024-06-07 14:24:08 +1000", "subject": "Simplify lock file scanning in stop command.", "body": "This simpler implementation only requires that a lock file begin with the stanza (followed by a dash) and end in .lock.\n\nThis will make the implementation more resilient to planned changes in lock file naming." }, { "commit": "ea1596152ceba8ad8f8e94d55c992e0982e5c879", "date": "2024-06-07 14:09:17 +1000", "subject": "Avoid possible race condition while reading lock files in info command.", "body": "Since lockRead() was being called twice the state of the lock file could change between leading to invalid data in the info output.\n\nInstead call lockRead() once and use the result for both the validity test and output." }, { "commit": "01838995ce08f93789d5003598cf5b43ed405465", "date": "2024-06-07 09:21:13 +1000", "subject": "Replace tabs with spaces in meson.build files.", "body": "These were probably copied over from Makefile.in." }, { "commit": "6f562fba60cd035508726b037f72fdcc75e94e88", "date": "2024-05-31 14:52:07 +1000", "subject": "Migrate coverage testing to C and remove dependency on lcov.", "body": "lcov does not seem to be very well maintained and is often not compatible with the version of gcc it ships with until a few months after a new distro is released. In any case, lcov is that not useful for us because it generates reports on all coverage while we are mainly interested in missing coverage during development.\n\nInstead use the JSON output generated by gcov to generate our minimal coverage report and metrics for the documentation.\n\nThere are some slight differences in the metrics. The difference in the common module was due to a bug in the old code -- build/common was being added into common as well as being reported separately. The source of the two additional branches in the backup module is unknown but almost certainly down to how exclusions are processed with regular expressions. Since there is additional coverage rather than coverage missing this seems fine.\n\nSince this was pretty much a rewrite it was also a good time to migrate to C." }, { "commit": "49e252f49260c6d8f3925dc7dace0785322ec288", "date": "2024-05-27 11:49:21 +1000", "subject": "Begin v2.53 development." }, { "commit": "dfc14b193454b3003c10eb2d021699cbd6294004", "date": "2024-05-27 11:13:16 +1000", "subject": "v2.52: PostgreSQL 17beta1 Support" }, { "commit": "899b8927888f3930f07f5607902d0493bd3b8f05", "date": "2024-05-24 12:24:11 +1000", "subject": "New CI container build for PostgreSQL 17 beta1.", "body": "Update the catalog version for beta 1 so pgbackrest will not work with any prior development versions.\n\nAlso improve the integration/all test so the catalog version does not need to be updated again during the beta period." }, { "commit": "c401ae8fb852d8cd7d10b7b7f3dcf1c42de8d3ee", "date": "2024-05-23 11:03:59 +1000", "subject": "Explicitly run test servers on ipv4 localhost.", "body": "If a host defaults to ipv6 then it can confuse the tests and lead to connection failures and inconsistent error messages.\n\nFor now just hard-code the servers to run on ipv4 but this is an area for later improvement." }, { "commit": "04b043797634f36d3deb26dcf0357be1ff9971db", "date": "2024-05-23 10:58:52 +1000", "subject": "Exclude documentation code coverage from metrics report.", "body": "Coverage of the documentation code is not important enough to report to users. If it were reported it should be in a separate section (along with test code coverage)." }, { "commit": "91156bf7e1dec5dfeec18bf13a112d16e3badfda", "date": "2024-05-23 08:19:50 +1000", "subject": "Fix comment typos and formatting." }, { "commit": "b6ac11beec920fe978994c0d018c380477199ca8", "date": "2024-05-23 08:17:02 +1000", "subject": "Update start/stop documentation to reflect actual functionality.", "body": "The exact functionality of start/stop has evolved over time and has become a bit confusing. It may be appropriate to make the behavior more consistent but for now at least document the behavior correctly. The documentation for start/stop was fairly inaccurate." }, { "commit": "9910a4eddb5a7772a289032dc30d6aeab68536ff", "date": "2024-05-20 08:44:37 +1000", "subject": "Add hrnTzSet() to set timezone in tests.", "body": "3c8819e1 replaced gmtime/localtime with gmtime_r/localtime_r but did not take into account a subtle difference in how they operate. While gmtime/localtime operate as if tzset() has been called, i.e. they operate on the TZ env variable directly, gmtime_r/localtime_r require tzset() to be called after changing TZ for consistent results.\n\nRather than call tzset() every time TZ is changed, add hrnTzSet() to encapsulate both operations." }, { "commit": "28ad1badd939b4b02c23140bed82b1dc1034bcda", "date": "2024-05-18 18:55:53 +1000", "subject": "Update Fedora test image to Fedora 40." }, { "commit": "ffe9a17fcb9b3fe1d7caed64ca142228882d819f", "date": "2024-05-18 14:33:40 +1000", "subject": "Replace erroneous THROW_SYS_ERROR_FMT() in storageInfo().", "body": "This was copied from storagePosixInfo() in a474ba54 but there is no guarantee that errno will be valid at this point. In most cases errno was zero so no system error message was displayed, but when using the Posix driver it could output \"[2] No such file or directory\". For other drivers errno was generally not set but could output a random error message in that case that errno was set by some unrelated action.\n\nUse THROW_FMT() instead since errno will not always be set correctly and in any case \"[2] No such file or directory\" is not very useful information since the main error message already says that.\n\nWhile this is technically a bug it is so harmless that it doesn't merit mention in the release notes.\n\nThis was discovered while testing on Fedora 40 which threw \"[38] Function not implemented\" -- clearly unrelated to missing paths/files." }, { "commit": "9e477c432154f5929e0ad3afc4aeae01a739f872", "date": "2024-05-18 09:59:32 +1000", "subject": "Update GitHub test checkout and codeql action versions.", "body": "checkout@v3, codeql-action/init@v2, and codeql-action/analyze@v3 are now deprecated." }, { "commit": "db4eefae1a9e999cc6636740e061413b7a09bc37", "date": "2024-05-10 10:01:16 +0930", "subject": "Remove ASSERT() in sckClientOpen() to silence Coverity complaint.", "body": "Coverity complains that this comparison might have a side effect because the variable is volatile. It's hard to see what that might be but since the assertion is not all that important, just remove it. During testing this sort of error will generally be caught by valgrind." }, { "commit": "d32eb5bb542f80657cfcc86df7663339297bb431", "date": "2024-05-10 09:45:01 +0930", "subject": "Silence invalid Coverity complaint in jsonReadPush().", "body": "Coverity complains that the output from THROW_FMT will be unpredictable since the order of operations in the call is not deterministic, but it fails to understand that subsequent calls to jsonReadTypeNextIgnoreComma() are noops until the value has been processed.\n\nSilence Coverity by assigning the actual type to a local variable so jsonReadTypeNextIgnoreComma() is only called once.\n\nAlso fix an adjacent comment typo." }, { "commit": "bb8988b551757a8932fd99027e436e53609ad7c9", "date": "2024-04-27 19:14:01 +1000", "subject": "Update storage (except remote) module to recent coding standards.", "body": "Add const as appropriate and avoid initializing variables if the variable will definitely be set later on.\n\nThe storage/remote module will be updated with the protocol module once a major waiting refactor has been committed." }, { "commit": "76bcb740b66f64804dbbf5a2a6be6407f9cec376", "date": "2024-04-27 15:42:10 +1000", "subject": "Add GCS batch delete support.", "body": "The GCS driver sent a single file delete request for each file while deleting a path. Depending on latency this could lead to rather long delete times, especially noticeable during expiration.\r\n\r\nImprove GCS delete to use batches, which require multipart HTTP, so also add multipart HTTP infrastructure." }, { "commit": "e00e33b52856a63e82552ccc12df235d1a7f6f17", "date": "2024-04-25 19:32:07 +1000", "subject": "Dynamically find python in meson build.", "body": "This is better than requiring a python3 binary to be on the path because some installations might have, e.g. python3.9.\r\n\r\nAlso add the python3-distutils package to Debian builds to make this work." }, { "commit": "55e996912aa57d39ea21f5dc905b7acabe93b0f8", "date": "2024-04-25 18:35:48 +1000", "subject": "Update meson minimum version to 0.47.", "body": "This allows enabling the check option for run_command, which automatically fails when the command fails." }, { "commit": "4ea4e3f3809af24d8a1ff5be15ca7c713f8f40e1", "date": "2024-04-23 22:34:32 +1000", "subject": "Update Fedora test image to Fedora 38.", "body": "This should have been done in 434938e3 but somehow it didn't happen.\r\n\r\nFedora 38 requires 2048 bit keys so update the VM builds to use them. Update the documentation to use 2048 bit keys. This is not technically required by this commit but it makes sense to do it now.\r\n\r\nAlso update the key location for the yum.p.o repository.\r\n\r\nLastly, shuffle test PostgreSQL versions since PostgreSQL 11 is not longer available in the yum.p.o repository." }, { "commit": "b40c2616c2a8870b592d8c106a0980a94055aba0", "date": "2024-04-21 13:16:53 +1000", "subject": "Update info module to recent coding standards.", "body": "Add const as appropriate and avoid initializing variables if the variable will definitely be set later on." }, { "commit": "c6fcc81db66c4b8f820c4ecf60f7067028dc6e79", "date": "2024-04-21 13:01:40 +1000", "subject": "Update db/postgres modules to recent coding standards.", "body": "Add const as appropriate and avoid initializing variables if the variable will definitely be set later on or is immediately returned." }, { "commit": "19411f39d2737542aa28436b3ab661912a5c3965", "date": "2024-04-21 12:06:39 +1000", "subject": "Update config module to recent coding standards.", "body": "Add const as appropriate and avoid initializing variables if the variable will definitely be set later on or is immediately returned." }, { "commit": "0e36e1a2fc79d8b425de30f54cc804efb30e3ab1", "date": "2024-04-21 12:01:01 +1000", "subject": "Update common module to recent coding standards.", "body": "Add const as appropriate and avoid initializing variables if the variable will definitely be set later on or is immediately returned." }, { "commit": "987e6e31f479874fbb4ff93b6fd45b8178b5e4fa", "date": "2024-04-21 09:42:11 +1000", "subject": "Move packager note about meson to new version.", "body": "Since there were some issues found with the meson install (7877983a, 7b95fd3b) it makes sense for any packagers who have not made the migration to hold off until the next release.\n\nMove the note to the next release where hopefully all issues have been addressed." }, { "commit": "9546b9c5d0e4da7bd952b01cc3374c26e9f90ea0", "date": "2024-04-20 14:19:27 +1000", "subject": "Update command module to recent coding standards.", "body": "Add const as appropriate and avoid initializing variables if the variable will definitely be set later on or is immediately returned." }, { "commit": "fb22f04555fecfa143354372ec3543ab186c131d", "date": "2024-04-18 10:56:24 +1000", "subject": "PostgreSQL 17beta1 support.", "body": "Add catalog version and WAL magic for PostgreSQL 17." }, { "commit": "c8cf8e1f2bfbed2932cfbef4a7a58e3a13e16a46", "date": "2024-04-17 19:02:28 +1000", "subject": "Update contributor name." }, { "commit": "57731b6cd261c732041aacc094429ab02ca13418", "date": "2024-04-17 11:58:13 +1000", "subject": "S3 SSE-C encryption support.", "body": "This feature (enabled with --repo-s3-sse-customer-key) provides an encryption key to encrypt the data after it has been transmitted to the server.\r\n\r\nWhile not as secure as encrypting data before transmission (--repo-cipher-type), this may be useful in certain configurations." }, { "commit": "06d3cb767c171be872262093b4547a62f1b16ee7", "date": "2024-04-17 11:43:41 +1000", "subject": "Improve unit testing of valid page sizes in pg_control.", "body": "Spread the tests over more versions of PostgreSQL and improve/fix comments." }, { "commit": "7b95fd3bd29346e7325aa161bfee45efd71e22eb", "date": "2024-04-10 09:23:17 +1000", "subject": "Allow explicit disabling of optional dependencies in meson builds.", "body": "On some platforms, e.g. FreeBSD, there is a requirement to allow the user to disable support for features even when the required library is present.\r\n\r\nIntroduce tri-state options for the optional features: auto mimics the current behavior and is the default, enable requires libraries for the feature to be present, and disable disables the feature without checking the libraries." }, { "commit": "dab52739cd36d2486b902b6b32d84532a834671b", "date": "2024-04-04 18:21:26 +1100", "subject": "Fix comment typo." }, { "commit": "fec1b215e37d644c3ae7a54bf197beb853e3f1db", "date": "2024-03-30 10:43:05 +1100", "subject": "Remove test data files made obsolete by d41b21c8.", "body": "These should have been removed when the mock integration tests were removed.\n\nIdeally we would also remove filecopy.table.bin but it serves to provide realistic page data for performance testing." }, { "commit": "9f5a97139fa222cf0b448bf76f0821e399849cd9", "date": "2024-03-29 12:21:06 +1100", "subject": "Allow strIdToLog() to output \"null\" values.", "body": "A valid StringId can never be zero so it more or less serves as a NULL value. In most cases zero will not be valid, but it is better to catch this condition with an assert rather than an error in logging." }, { "commit": "014e24889ce082c7cdaf3d128c796e1e00dcd3ff", "date": "2024-03-27 09:53:49 +1100", "subject": "Remove extra space before colons in meson.build files.", "body": "The spacing was not consistent so use the style that best matches our general coding standards." }, { "commit": "7877983acb3b1a27e900f7e7aea89cb7b29a817d", "date": "2024-03-27 09:29:37 +1100", "subject": "Tag pgbackrest build target in meson as installable.", "body": "By default meson does not install anything. Targets can be installed by tagging them as installable in the build definition." }, { "commit": "924aa5e8b127272bd3eff360a7e5737ef825a51f", "date": "2024-03-25 11:22:53 +1300", "subject": "Begin v2.52 development." }, { "commit": "dc07fb1e5bd38026d4fefb67a0525a89ac98b520", "date": "2024-03-25 09:53:22 +1300", "subject": "v2.51: Meson Build System" }, { "commit": "76956e71cfa3044867213bd65a6f22e96120c108", "date": "2024-03-20 09:05:39 +1300", "subject": "Fix progress logging when file size changes during backup.", "body": "If the file size changed during backup then the progress percentage in the log would not be accurate.\r\n\r\nFix this by using the original size to increment the progress since progress total was calculated from original file sizes." }, { "commit": "de55902fb3f92758c4784e6fc06a69466e656ec3", "date": "2024-03-13 12:06:23 +1300", "subject": "Redact secure options in check --report.", "body": "For this to be practically useful secure options must be redacted. Otherwise, no user is likely to share the report.\n\nSince this feature is still internal, there is no real world impact." }, { "commit": "ff47450402fde5ebc64a08d31791551bee52200e", "date": "2024-03-12 12:41:58 +1300", "subject": "Update resume functionality for block incremental.", "body": "Resume was not updated for block incremental so block incremental files were always removed during a resume. Resume worked but was very inefficient with block incremental enabled.\r\n\r\nUpdate resume to preserve block incremental files and add tests." }, { "commit": "e634fd85cedc1ae3aef7c4fa2556d7d73d05cb8a", "date": "2024-03-10 17:08:42 +1300", "subject": "Prevent invalid recovery when backup_label removed.", "body": "If backup_label is removed from a restored backup then PostgreSQL will instead use checkpoint information from pg_control to attempt (what is thinks is) crash recovery. This will nearly always result in a corrupt cluster because the checkpoint will not be from the beginning of the backup, and even if it is, the end point will not be specified, which could lead to recovery stopping too early.\r\n\r\nTo prevent this, invalidate the checkpoint LSN in pg_control on restore. If backup_label is removed then recovery will still fail because PostgreSQL will not be able to find the invalid checkpoint. The LSN of the checkpoint is not logged but it will be visible in pg_controldata output as 0/DEAD. This value is invalid because PostgreSQL always skips the first WAL segment when initializing a cluster." }, { "commit": "960b43589d422a3757535b585e31da03a595ae97", "date": "2024-03-10 16:17:50 +1300", "subject": "Add validation for WAL segment size in pg_control.", "body": "This serves as an additional sanity check to be sure the pg_control format is as expected. The field is useful for being near the end and containing a limited number of discrete values." }, { "commit": "63541b2273360c8c3f181fe0242bd20afb845715", "date": "2024-03-10 15:50:10 +1300", "subject": "Add validation for page checksum version in pg_control.", "body": "This serves as an additional sanity check to be sure the pg_control format is as expected. The field is useful for being all the way at the end and being four bytes that can only have one of two values. Something more distinctive than 0 and 1 would be better, but this is what we have to work with.\n\nConvert PgControl.pageChecksum to unsigned int and rename to PgControl.pageChecksumVersion and make all downstream changes required for the new datatype." }, { "commit": "7448fde157c6d22f57b90c0cef11957b0aadf4b3", "date": "2024-03-10 11:36:39 +1300", "subject": "Improved support for dual stack connections.", "body": "Connections are established using the \"happy eyeballs\" approach from RFC 8305, i.e. new addresses (if available) are tried if the prior address has already had a reasonable time to connect. This prevents waiting too long on a failed connection but does not try all the addresses at once. Prior connections that are still waiting are rechecked periodically if no subsequent connection is successful.\r\n\r\nThis improves substantially on 39bb8a0, which failed to take into account connection attempts that do not fail (but never connect) and use up all the available time." }, { "commit": "f287178b705cc90aa887d66c932e2a223029649b", "date": "2024-03-10 11:09:13 +1300", "subject": "Set function/variable visibility to hidden.", "body": "This saves about 16KiB in the binary and reduces exported symbols by about 75%. All variables are still exported and any functions that are referenced by their pointers or extern'd but never used outside the module where they are defined.\r\n\r\nIn addition to modest space savings, this should also increase performance a bit since the compiler can simplify calls to these functions and load the binary should also be a little faster.\r\n\r\nThe GCC documentation does not make it clear that visibility can be used with variables, but it certainly makes a difference in the binary size, so something is happening. Other sources on the internet suggest that visibility can be used with variables. Clearly exports are not affected, but there may be some other optimization happening." }, { "commit": "e3d9df3ae9669ee5749b970d59f8afeb20fd3d9d", "date": "2024-03-10 10:53:31 +1300", "subject": "Make meson the primary build system.", "body": "Meson has a lot of advantages over autoconf/make, primarily in ease-of-use and performance. Make meson the only build system used for testing and building the Debian documentation, but leave the RHEL documentation using autoconf/make for now so it gets some testing." }, { "commit": "c64cd8e01958c3316049d73f403241c136bcf392", "date": "2024-03-10 10:43:24 +1300", "subject": "Disable arm64 test in CirrusCI.", "body": "There seems to be a shortage of arm64 hosts because queue times have been steadily increasing over the last few weeks. It can now take several hours to get an arm64 test queued, which makes it difficult to get development done.\n\nDisable for the time being and hope the resource issue gets resolved in the future." }, { "commit": "eda7706f53e925a701f85894ab3d8e861e5fcf06", "date": "2024-03-10 09:40:13 +1300", "subject": "Make valgrind libssh2 leak check less specific.", "body": "The leak kind is usually definite but sometimes flaps to possible. For stability purposes accept any leak kind.\n\nNote that this is a leak in a specific version of libssh2 and not a bug in pgBackRest." }, { "commit": "dddcbcd8e9a1cae578b0cc5bb2d1aa768330f3c5", "date": "2024-03-10 09:32:55 +1300", "subject": "Limit resume functionality to full backups.", "body": "Resume does not work correctly with delta diff/incr backups because the presence of a reference causes it to remove the file with the idea that it can just be referenced again. This is true for timestamp-based backups but for deltas all existing files need to be rechecked (which requires a reference).\r\n\r\nThis is fixable but not without significant effort and new tests and it calls into question the usefulness of non-full resumes. For diff/incr, if the file was changed since the prior backup there is a good chance it will be modified again before the resume occurs.\r\n\r\nIn order to keep this feature as useful as possible for the most valuable case, limit resumes to full backups." }, { "commit": "9d91d1b2f817b543fc8d970af56d1442c325ed7c", "date": "2024-03-08 15:07:43 +1300", "subject": "Detect files that have not changed during non-delta incremental backup.", "body": "02eea55 added code to load a buffer of data from a file being backup up to detect files that have been truncated to zero after manifest generation. This mechanism can also be used to detect files that have not changed since the prior backup.\r\n\r\nIf the result of the file copy fits into a single buffer, then the size and checksum can be compared to the prior file before anything gets stored. If the file matches then it is referenced to the file in to prior backup.\r\n\r\nThe size that can be compared for normal copies is limited by the buffer size but for block incremental it works with any size file since there is no output from block incremental when the file is identical." }, { "commit": "cf17515e409440eecfb04bf7186790d8baa8ab12", "date": "2024-03-08 12:34:11 +1300", "subject": "Improve archive-push WAL segment queue handling.", "body": "Infer the size of all WAL segments from the size of the first segment rather than getting info for all segments (up to queue size). If the segments are not the same size then there are larger issues than the WAL queue." }, { "commit": "4387250f2e159dc4f762ed4a7f4180eb81db3717", "date": "2024-03-08 10:07:03 +1300", "subject": "Improve sort comparators.", "body": "Improve sort comparators to use branchless comparisons when possible and avoid using subtraction. Only one comparator was using subtraction and it appears there was no overflow risk since the values were pretty small.\r\n\r\nInspired by https://www.postgresql.org/message-id/CA%2B14426g2Wa9QuUpmakwPxXFWG_1FaY0AsApkvcTBy-YfS6uaw%40mail.gmail.com." }, { "commit": "e00bfe2d2c3f124b2883e3c2d169a2ce506a1e98", "date": "2024-03-08 09:50:20 +1300", "subject": "Fix performance regression in storage list.", "body": "storageListP() returns a list of entries in a path and should not need to stat/head, etc. in order to get more detailed info. This was broken by 75623d4 which failed to set the level correctly.\r\n\r\nSet the correct level and update tests.\r\n\r\nThere's no easy way to directly test for a regression here but the SFTP tests will fail if more detailed info is requested since it would require script changes." }, { "commit": "794c5771305bfce6298ee529df682d3a873f6f7d", "date": "2024-03-06 11:00:09 +1300", "subject": "Migrate integration tests to C.", "body": "The Perl integration tests were migrated as faithfully as possible, but there was some cruft and a few unit tests that it did not make sense to migrate.\n\nAlso remove all Perl code made obsolete by this migration.\n\nAll unit, performance, and integration tests are now written in C but significant parts of the test harness remain to be migrated." }, { "commit": "7f1bb3a051077d8af2bcadfb261c873b1bdcea9f", "date": "2024-03-05 17:45:25 +1300", "subject": "Remove obsolete valgrind suppression." }, { "commit": "cb58bdc9752e720687e594fa953e1b4a93b8defe", "date": "2024-03-04 18:57:19 +1300", "subject": "Move assert in cmdServerSigChild().", "body": "It is possible for the server to receive notifications for children that were not spawned by it, e.g. when running as the root process of a container.\n\nOnly assert when the child process is found in our process list. Otherwise, the code may be something unexpected.\n\nSince this is an assert there should not be any real world consequences either way, but the issue has caused problems in testing." }, { "commit": "07051347fb8723ef1c6f951b17bcddb8fa0d8fcf", "date": "2024-03-04 18:14:41 +1300", "subject": "Update Cirrus CI BSD image to 13.3." }, { "commit": "f7a7ab16c9652179945717ebf9b68af3aa8a0b0e", "date": "2024-03-02 12:29:10 +1300", "subject": "Skip zero-length files for block incremental delta restore.", "body": "a42614e introduced the capability to preserve smaller than expected files for block incremental restore delta, but failed to take into account that zero-length files are both useless and cause the block checksum filter to error.\r\n\r\nFix this by skipping zero-length files during block incremental restore delta." }, { "commit": "6c45b57fa8eef0f570abb1ac216ac43e0acf314a", "date": "2024-02-24 11:22:48 +1300", "subject": "Add execOne() to simplify exec for build, documentation, and testing.", "body": "The core Exec object is efficient but geared toward the specific needs of core and not ease-of-use as required for build, documentation, and testing.\n\nexecOne() works similarly to system() except that it automatically redirects stderr to stdout and captures the output." }, { "commit": "6356a2b76c8f22dc0b3e1a0349b7f1f0cf1b65ae", "date": "2024-02-17 19:16:39 +1300", "subject": "Disable log expect testing for non-unit tests.", "body": "This has never been a problem for performance tests since they do not call functions that log at info level or above, but the upcoming integration tests may do so. In any case it is better to disable this functionality outside of unit tests." }, { "commit": "d41b21c8f70aa6ce7a39aeaddbbacafda51c609d", "date": "2024-02-17 10:52:37 +1300", "subject": "Remove mock integration tests.", "body": "These tests have not been maintained for several years, i.e. no tests for new features have been added. They are highly duplicative of the unit tests but do have the advantage of mixing in different storage drivers. They were allowed to remain because they were not doing any harm even if they were probably not doing any good.\n\nHowever, the real integration tests (that run directly against PostgreSQL) also test storage drivers and have been updated with new features over time. The real integration tests are now being migrated to C and as part of that effort the mock integration tests need to be removed or migrated, and they do not provide enough value to migrate.\n\nRemove all mock integration tests and a leftover Perl performance test." }, { "commit": "68351785f6816a23526900db595557cf6e3a0056", "date": "2024-02-16 11:37:31 +1300", "subject": "Update references to recovery.conf.", "body": "Update references to recovery.conf to include postgresql.auto.conf used in newer versions.\r\n\r\nAlso update a broken recovery URL and point it to the current version (with a hint to select the proper version of PostgreSQL)." }, { "commit": "273d5050f8bd74869408d9570a35761ba5de75f1", "date": "2024-02-07 11:01:55 -0300", "subject": "Add asserts to default switch cases in common/type/variant module.", "body": "This matches how switch is used in other parts of the code and prevents invalid values from being processed normally." }, { "commit": "ad8e92a9ac23a0123a080dafe6b83f0b71a22441", "date": "2024-02-05 10:10:59 -0300", "subject": "Remove unneeded backupTimeStart parameter from testBackupValidate().", "body": "5ce8b9df added this parameter, but it is better to get the value from the manifest, which is already present." }, { "commit": "45f070aa9a221c6f862ff6ccb4ff011bfa6c3888", "date": "2024-01-25 10:41:20 -0300", "subject": "Add release instructions to update PostgreSQL ecosystem wiki." }, { "commit": "5ce8b9dfae984a9a40c44282a59936da5c3dfbd9", "date": "2024-01-24 14:54:28 -0300", "subject": "Simplify output in command/backup unit tests.", "body": "The output combined a representation of the files/paths/links in the manifest along with output of what was validated on disk. This was redundant and made maintenance of the tests difficult, especially with all the quoting in the manifest output (which also made it hard to search for output).\n\nInstead use primarily the output created during validation and add fields from the manifest that were missing. Exclude paths from the output when there are files in the path since the path is then implied.\n\nOne major change here is that checksums are no longer output. This makes it easier to write tests that work on multiple architectures and all checksums are already verified during validation." }, { "commit": "1a0cc96c5a58cea2a21b8754909c6a2b824c3f9e", "date": "2024-01-23 09:56:26 -0300", "subject": "Migrate man page generation to C." }, { "commit": "676700d8ca6f2e0e0ab8123aa9606c8ec7aa81ae", "date": "2024-01-22 14:17:09 -0300", "subject": "Add detailed backtrace to autoconf/make build.", "body": "c8264291 added libbacktrace to the meson build (not used in production yet), but held off on adding it to autoconf/make before more performance testing was done.\r\n\r\nPerformance tests show there is no noticeable cost to adding libbacktrace, so add it to get more detail error stack traces." }, { "commit": "0aaa0772f56d59178518377b33b3e3d02f0f5483", "date": "2024-01-22 12:33:40 -0300", "subject": "Fix help not displaying help for the help command.", "body": "'pgbackrest help help' just displayed the help overview, rather than display help for the help command.\n\nFix this by making sure the command is set and routing correctly in the help command." }, { "commit": "68db3075d7ac4a48377967ad54e45b95b9ee6c61", "date": "2024-01-22 12:00:13 -0300", "subject": "Allow --version and --help for version and help.", "body": "It is a bit confusing that --help and --version do not work like most command-line programs. For example, git allows either --help or help.\r\n\r\nMake these work by making them shortcuts (not actual options) to the applicable commands.\r\n\r\nThe user will still need to use help (not --help) to get help on specific commands/options, but at least they can get to the main help (which will tell them this) via --help." }, { "commit": "db5bcff3b443833dce6ba0f10ce4293d65f0f436", "date": "2024-01-22 10:11:48 -0300", "subject": "Update config.guess and config.sub to latest versions." }, { "commit": "c618fd3b09fab8cde44ad8e5fe0cb984c1bc76d4", "date": "2024-01-22 10:09:38 -0300", "subject": "Begin v2.51 development." }, { "commit": "1add35624dbf4061bcb19521022b318418b0bba5", "date": "2024-01-22 09:54:59 -0300", "subject": "v2.50: Performance Improvements and Bug Fixes" }, { "commit": "fd8974914d8702e5d889f03e08eeda676435faa2", "date": "2024-01-12 13:55:08 -0300", "subject": "Cleanup tablespace tests in command/backup module.", "body": "Tablespaces were enabled for tests that did not have tablespaces, resulting in tablespace_map being present in backups even when it was not needed.\n\nInstead of specifying if tablespaces are present, automatically detect tablespaces in hrnBackupPqScript()." }, { "commit": "3926dd346ef7b6e239e939e9a046a4be8597c584", "date": "2024-01-04 14:55:44 -0300", "subject": "Update LICENSE.txt and PostgreSQL copyright for 2024." }, { "commit": "a484862763e85a7cc11b6a45366b3b78b06cd373", "date": "2024-01-04 14:53:44 -0300", "subject": "Remove installation of old yum.p.o repository key." }, { "commit": "55f22489630ae409f03a2ad9ec37c3e38a3f7ab2", "date": "2024-01-03 18:15:50 -0300", "subject": "Break up scripts in db unit test module.", "body": "Long scripts followed by a number of tests are really challenging to debug and update.\n\nInstead, break up the scripts to be inline with the tests that they drive. This should make maintenance of the tests much simpler." }, { "commit": "7c17eec3db455d3e8e660f4f8247b6975517648f", "date": "2024-01-03 13:01:50 -0300", "subject": "Improve SFTP storage error messages.", "body": "Use storageSftpEvalLibSsh2Error() in more locations to provide better error information. Also add storageSftpLibSsh2SessionLastError() for the same reason." }, { "commit": "802ae7914897f3d53788e526ce22562e434c92ea", "date": "2024-01-03 12:43:50 -0300", "subject": "Remove FreeBSD 12 and add FreeBSD 14 to Cirrus CI.", "body": "FreeBSD 12 is now EOL.\n\nAlso update the image version for FreeBSD 13." }, { "commit": "8a8cfba62bfe2293d6355f68d44bab5323a91118", "date": "2023-12-27 12:53:53 -0300", "subject": "Remove resolved comment." }, { "commit": "f4a4af299bee93ab7dde599ccdb35becc1c2afad", "date": "2023-12-27 12:49:47 -0300", "subject": "Fix short read in block incremental restore.", "body": "During restore it is possible to read all the blocks out of a compressed super block without reading all the input. This is because the compression format may have some trailing bytes that are not required for decompression but are required to indicate that data has ended. If a buffer aligned with the compressed data in a certain way, these last bytes might not be read.\r\n\r\nExplicitly read out any final bytes at the end of each super block to handle this case. This should always result in no additional data out and we check for that, but it does move the read position to the beginning of the next compressed super block so decompression can begin without error." }, { "commit": "c47b39acf67b375cbee9773904ad1afc6a1bb9ec", "date": "2023-12-27 12:39:45 -0300", "subject": "Fix incorrect test comment.", "body": "Left over from an older implementation." }, { "commit": "4324b568a922105b272e2031559dae5f374a04da", "date": "2023-12-26 21:07:56 -0300", "subject": "Move block testBlockDelta() to harness module.", "body": "This makes the function available to other test modules.\n\nAlso rename to hrnBlockDeltaRender()." }, { "commit": "9049fec2c02c4110e16ffdff7b56f83d8ff32b4a", "date": "2023-12-22 13:16:45 -0300", "subject": "Refactor skip files truncated during backup when bundling.", "body": "Refactor 02eea555 to always close the file immediately on EOF and use backupCopyResultCopy to continue processing. Closing the file immediately saves a later EOF check and is friendlier to added logic in this area. Using backupCopyResultCopy to continue is clearer also makes it easier to add new logic.\n\nAlso store zero checksum so the bulk of results collection can be moved within the copy block." }, { "commit": "c8795094d418802a0e24d4d19da66973c71b00e5", "date": "2023-12-22 12:48:01 -0300", "subject": "Allow const checksum buffers to be returned from backupFile().", "body": "This allows less duplication of buffers.\n\nFor delta check return file->pgFileSize/file->pgFileChecksum instead of pgTestSize/pgTestChecksum since this saves one buffer duplication and we know these values are equal since we just checked them.\n\nAlso add an assert to ensure copyChecksum is valid relative to size." }, { "commit": "4f760df417e17dc03ce07c0116dfaac506e04c84", "date": "2023-12-22 00:59:12 -0300", "subject": "Do not preserve block incremental if file is less than prior block size.", "body": "If a file stored with block incremental shrinks below the prior block size then the map is useless and the entire file needs to be stored again.\r\n\r\nIn this case use the new block incremental values (even if none) rather than preserving the old ones." }, { "commit": "3cd8249dbaa085bd075464d420f68a2a8b34a165", "date": "2023-12-21 15:28:37 -0300", "subject": "Update warning for backup resume invalid repo file.", "body": "This warning has had a note since the C migration that it should be moved below the backup file log message, so do that.\n\nAlso update the warning message a bit to correct for tense. This message was likely in a different place originally." }, { "commit": "701865eca1c8896a913e05f41a748c225b84af11", "date": "2023-12-21 15:20:03 -0300", "subject": "Refactor backupFile() to remove backupCopyResultReCopy.", "body": "Having two enum values for file copy makes things a bit more complicated than they need to be (especially in an upcoming commit).\n\nInstead add a flag to indicate that the repository file was invalid since the only purpose is to trigger a warning message." }, { "commit": "a42614e8f3a9e6871c16331f01c2cd765ab181aa", "date": "2023-12-21 15:08:07 -0300", "subject": "Preserve partial files during block incremental delta restore.", "body": "Previously files that were smaller than the expected size were not preserved for block incremental, even though it is possible that block incremental could make use of a partial file.\r\n\r\nOne example is when a restore encounters an error. On retry the partial file can be used as a starting point rather than copying again from the beginning. Another example is restoring a backup where a file is larger than what already exists in the data directory.\r\n\r\nPreserve any size file when block incremental will be used for the delta in order to reuse partial files when possible. If the file is smaller than expected then disable the whole-file checksum to reduce overhead." }, { "commit": "ad8febec0851473240f6f20bd4eb299b525a6d44", "date": "2023-12-21 13:32:10 -0300", "subject": "Refactor backup incremental manifest generation.", "body": "This refactor should provide more clarity on what factors affect an incremental, rather that just having one big expression do it all. Overall this may be slightly more efficient since some values are reused that before were recalculated.\r\n\r\nNo behavioral changes are introduced." }, { "commit": "f3584e2143de518bda52151a7102ed638dcb0e95", "date": "2023-12-21 11:20:10 -0300", "subject": "Add tests to command/backup and info/manifest modules.", "body": "These tests exercise various interesting cases and provide coverage for proposed improvements." }, { "commit": "25f14898babf4ad78baef3e8343933a863633975", "date": "2023-12-21 10:16:13 -0300", "subject": "Fix overflow suppressing backup progress in info output.", "body": "Writing the sz and szCplt parameters in the lock file used jsonWriteUInt64() but reading these parameters used jsonReadUInt(). This caused a silent exception for any backups larger than MAX_UINT and prevented the info command from reporting progress.\r\n\r\nCorrect this so the reads are symmetric and verified before/after with a test." }, { "commit": "8af3c1c9acc7117a0cf8e3ce862c5639c82bf78d", "date": "2023-12-17 13:15:03 -0300", "subject": "Use original file size to log size changes during backup.", "body": "c9703b35 added logging for file size changes during backup. Later 5ed6f8df added the sizeOriginal member to ManifestFile, which arguably is better to use for logging rather than size before backup since it will always contain the original size. Size could in theory be modified for deduplication purposes.\n\nUpdate logging to use sizeOriginal." }, { "commit": "bb6e5164ee6efd40fde73acd8f7b30c4afa1548d", "date": "2023-12-16 11:42:27 -0300", "subject": "Add block incremental test where timestamp changes but file is the same.", "body": "If delta is not enabled, then the timestamp is used to determine if a file has changed. If the timestamp changes but the file is the same then the prior map will be stored unchanged in the new backup. This is not quite as bad as storing the entire file but it is obviously not ideal.\n\nThis will be fixed in a future commit, but add the test now to show the current behavior." }, { "commit": "02eea555c7169e99892fdbc8ce7cc1ee59f50509", "date": "2023-12-14 14:21:06 -0300", "subject": "Skip files truncated during backup when bundling.", "body": "In bundle mode pgBackRest skips files of zero size, that is, it does not queue them for copying.\r\n\r\nAfter splitting the files into bundles, pgBackRest launches one or more processes that directly perform the backup, namely, read the files and, if necessary, write them to the bundles.\r\n\r\nIf during the time between the distribution of all files among bundles and the direct copying of a file to a bundle, this file of non-zero size was truncated to zero size (for example, when the table was truncated), then pgBackRest still unconditionally places such a zero-size file in the bundle, taking up space in it equal to the size of the headings, and additionally writes the original file size to the manifest.\r\n\r\nIn debug build an assertion was added, that does not allow zero-size files to be written to bundles, which leads to an error.\r\n\r\nTo solve the problem, this patch, when reading the next file, loads one buffer from the file to detect if it is zero-size. If so it marks the file as truncated and continues on to the next file.\r\n\r\nThe advantages of the solution are that, firstly, the assert will not fire on debug builds, and secondly, we will not place zero-size files in bundles, which exactly corresponds to the specification.\r\n\r\nThe patch adds the backupCopyResultTruncate value to the BackupCopyResult enumeration to use it to indicate the result when a non-zero size file is truncated to zero size during the backup process." }, { "commit": "89d5278b74ab0d880345575d7ac4c239cd1fce54", "date": "2023-12-14 13:28:52 -0300", "subject": "Add support for alternate compile-time page sizes.", "body": "Alternate pages sizes can be selected at compile-time, .e.g. 4096. While compile-time settings are generally not well tested by core, some established forks such as Greenplum use them." }, { "commit": "d205a61949ae8491f5ce9b2019cda79d2fe7b216", "date": "2023-12-01 11:54:30 -0300", "subject": "Fix flapping test on older ninja versions in test unit.", "body": "Older versions of ninja may fail to rebuild correctly when changes are made to the configuration. In this case there is an automatic retry but the unexpected log output would cause the test to fail.\n\nFor tests that are expected to succeed, check that the log is empty but also accept a retry message as long as the test does eventually succeed.\n\nAdd a new harness function, harnessLogResultEmptyOrContains(), to make this work and also clean up some adjacent code." }, { "commit": "7ce0f5a94c229abe415029ce80241a64665c93a2", "date": "2023-11-30 16:43:09 -0300", "subject": "Use unique port for each server unit test.", "body": "If the same port is reused too quickly bind may fail with this error:\n\nFileOpenError: unable to bind socket: [98] Address already in use\n\nWe specify SO_REUSEADDR when creating the socket but apparently this is not always enough if the port is reused very rapidly.\n\nFix this (hopefully) by using a unique port for each test that needs one. This does in theory limit the number of tests that can use ports, but we allow 768 per test, whereas the test that uses the most ports is common/io-tls with 4." }, { "commit": "a14732789baacb4d35d5da6d0adf337ceea7f595", "date": "2023-11-29 09:31:57 -0300", "subject": "Output coverage report on test failure in CI.", "body": "This allows analysis of coverage failures that only happen in CI. It is not ideal since the report needs to be copied from the log output into an HTML file where it can be viewed, but better than nothing." }, { "commit": "cb6bceb9f1074436b7d0b67a94d782113b5f2b35", "date": "2023-11-28 16:38:42 -0300", "subject": "Improve comments in socket test harness." }, { "commit": "70e15dacc77bcc2b7e4c2a222d683872d2180f5b", "date": "2023-11-28 16:35:37 -0300", "subject": "Allow custom type/message for errRetryAdd().", "body": "It may be useful to customize the message or add a message that was never thrown. The latter case will be used in an upcoming commit." }, { "commit": "85bc9f27d8b70a879051f604861078034263b095", "date": "2023-11-27 09:06:53 -0300", "subject": "Begin v2.50 development." }, { "commit": "3cb891e3ca49ed1166fb0b513b126412448f5b09", "date": "2023-11-27 08:55:56 -0300", "subject": "v2.49: Remove PostgreSQL 9.3 Support" }, { "commit": "7d51228bf5227494bf50327488032221bd3d30fa", "date": "2023-11-24 17:07:49 -0300", "subject": "Migrate backupFile() tests in command/backup module.", "body": "The backupFile() tests were written before the bulk of the backup command had been migrated to C. Some of them have been migrated to the complete backup tests, but others were left because there was no way to make changes to files during a backup.\n\nNow that we have the backup script harness introduced in 337da35a it is now possible to migrate all the tests. The new tests are better because they not only test backupFile() but all the functions upstream and downstream of it." }, { "commit": "337da35ab278e058ec4cdffeb4319f1a1e2cb0df", "date": "2023-11-24 12:25:40 -0300", "subject": "Add test to show behavior of bundled files truncated during backup.", "body": "This behavior violates an assertion but is completely possible with the current implementation. This behavior will be fixed in a future commit, but for now at least test how it works correctly and remove the assertion so the test runs without error.\n\nAlso add a new harness that allows changes during the backup to be scripted." }, { "commit": "ac78b965837440ee34f7b5bf0a039c6e9d78eeec", "date": "2023-11-18 11:12:41 -0300", "subject": "Remove unused fields from backupJobResult() test in command/backup unit.", "body": "These fields were not used because of the noop so it was hard to keep them up to date. Rather than attempt to do so, just remove them and add a comment to explain why they are missing." }, { "commit": "c4dc4665f88e9d67a3b66f44cd59d6dd4bb5107f", "date": "2023-11-18 10:47:58 -0300", "subject": "Fix recursive path remove in SFTP storage driver.", "body": "storageSftpPathRemove() used LIBSSH2_FX_FAILURE to determine when it was attempting to unlink a directory, but it appears that LIBSSH2_FX_PERMISSION_DENIED is also valid for this case.\r\n\r\nUpdate storageSftpPathRemove() to accept either error and adjust tests." }, { "commit": "e2b734eff97e3fac32895b799cc32126013aae27", "date": "2023-11-16 12:50:09 -0300", "subject": "Allow config/load unit test to run without libssh2 installed.", "body": "Add additional #ifdef HAVE_LIBSSH2 wrapping around tests requiring libssh2 in loadTest.c." }, { "commit": "05207bb8e4f462cd2fd5c685d43ad8911b219077", "date": "2023-11-15 09:41:40 -0300", "subject": "Fix storageReadRemote() to return actual read bytes.", "body": "All storage interface read methods should return actual read bytes. This patch refactors storageReadRemote() to eliminate duplicated code and return actual read bytes. The return value is calculated as the number of bytes written to the passed buffer.\r\n\r\nThis is technically a bug but does not express as an issue currently because this return value is not being used. It will be used in the future, though, so it needs to be fixed." }, { "commit": "ea317df5d9af66935175f7ec87845f30be4f5d36", "date": "2023-11-11 10:33:09 -0300", "subject": "Remove old version conditionals from user guide.", "body": "The user guide does not need to build for EOL versions of PostgreSQL, so remove some conditionals used to support versions older than 12." }, { "commit": "eb69e2ee6339047f0145b751437ac2bec82bb5ab", "date": "2023-11-10 17:00:57 -0300", "subject": "Update point-in-time recovery documentation for PostgreSQL >= 13.", "body": "PITR changed in PostgreSQL 13 to error when the recovery target is not reached. Update the documentation to work with PostgreSQL >= 13 as well as < 13.\n\nAlso update the versions built for RHEL and Debian since PostgreSQL 11 is now EOL." }, { "commit": "dcf078198761a2cfe9be6b4c7f4b4fb07ef53cbf", "date": "2023-11-09 12:59:12 -0300", "subject": "Remove support for PostgreSQL 9.3.", "body": "Per our policy to support five EOL versions of PostgreSQL, 9.3 is no longer supported by pgBackRest.\r\n\r\nRemove all logic associated with 9.3 and update the tests." }, { "commit": "fa5b2d44ad9f7591517eecaaaaeb07e9500dd51c", "date": "2023-11-09 12:04:25 -0300", "subject": "Fix regression in retries.", "body": "5314dbf aimed to make nested Wait objects more accurate with regard to wait time but it also got rid of the \"bonus\" retry that was implicit in the prior implementation. This meant that if an operation used up the entire allotted timeout, it would not be retried. Object stores especially are noisy places and some amount of retry should always be attempted. So even though removing the \"bonus\" retry was intended, it turned out not to be a good idea.\r\n\r\nInstead of an implicit retry, formalize two retries in the Wait object even if the wait time has expired. Any number of retries are allowed during the wait period. Also remove waitRemaining() since it is no longer needed.\r\n\r\nAdjust tests as needed to account for the extra timeouts.\r\n\r\nNote that there may still be an underlying issue here that is simply being masked by retries. That is, the issue expressing was that waiting for a socket to be writable was timing out and without a retry that caused a hard error. This patch does nothing to address the source of the write timeout and perhaps there is nothing we can do about it. It does seem similar to the write issue we had with our blocking TLS implementation, but it was never clear if that was a problem with TLS, the kernel, or a bug in pgBackRest itself. It cropped up after a kernel update and we switched to non-blocking TLS to address the issue (c88684e)." }, { "commit": "3c116e1829a0df62aaf50ce241c63d829eb438e0", "date": "2023-11-04 14:43:11 -0300", "subject": "Remove unused header." }, { "commit": "e0f5880b09624db2c9274a8a615a5309a0bddab1", "date": "2023-10-22 13:55:56 -0400", "subject": "Refactor of pq shim to allow more flexible scripting.", "body": "The pq scripts were pretty static which had already led to a lot of code duplication in the backup test harness.\n\nInstead allow the scripts to be built dynamically, which allows for much more flexibility and reduces duplication. For now just make these changes in the backup harness, but they may be useful elsewhere.\n\nWhile we are making big changes, also update the macro/function names to hew closer to our current harness naming conventions." }, { "commit": "306fdff93a072c72d6a40aab728ceeb44cc7b23f", "date": "2023-10-19 10:11:27 -0400", "subject": "Remove unused parameter in backupProcess().", "body": "Also fix a comment in the same function." }, { "commit": "04d92cca7eb42e0037e65701109da284b214c84f", "date": "2023-10-19 09:54:34 -0400", "subject": "Tidy and align site introduction and description.", "body": "Make the description more concise and use it for both the site description and introduction." }, { "commit": "459d59615a18c147140b1ae94a804be66541c0ab", "date": "2023-10-18 18:14:32 -0400", "subject": "More efficient/compact calculations for encoded sizes.", "body": "encodeToStrSizeBase64() is definitely more efficient (pulled from the PostgreSQL implementation).\n\nencodeToStrSizeBase64Url() is probably about as efficient as the prior implementation but is certainly more compact.\n\nAlso add tests for zero byte encoding sizes." }, { "commit": "21c8c8a66cb06eba714bfa41bed7239af3d14f08", "date": "2023-10-14 16:22:09 -0400", "subject": "Document maintainer options.", "body": "Document maintainer options in a separate section with appropriate explanation and caveats.\r\n\r\nAlso make the pg-version-force option user visible now that maintainer caveats have been documented." }, { "commit": "81536cd486d716ebd10eb0637ef6b1b9a61d9dd0", "date": "2023-10-10 16:49:05 -0400", "subject": "Simplify description rendering in command and configuration reference.", "body": "The reference documentation was still using a very old version of rendering from before the user guide was introduced. This was preserved in the initial C migration to reduce the diff between Perl and C for testing purposes. The old version used hard linefeeds to simulate paragraphs and reduce the amount of markup that needed to be used. In retrospect this was not a great idea.\n\nInstead use more natural rendering that does not depend on using hard linefeeds between paragraphs." }, { "commit": "45abea471ecec10a351bfd9e5fbed8737c32da42", "date": "2023-10-10 13:02:50 -0400", "subject": "Simplify section titles in configuration reference.", "body": "For some reason the internal section id was included in the title. This was probably copied from another section title where it made more sense, e.g. including the option name after the title.\n\nAlso add release note missed in 1eb01622." }, { "commit": "1eb0162208b37f577d35ba775f007202b285900a", "date": "2023-10-09 14:03:43 -0400", "subject": "Build command and configuration reference in C.", "body": "Migrate generation of these files from help.xml to the intermediate documentation format. This allows us to share a lot of code that is already in C and remove duplicated code in Perl. More duplicate code can be removed in Perl once man generation is migrated.\n\nAlso update the unit test harness to allow testing of modules in the doc directory." }, { "commit": "983cc1a9e3f2865bf5128b5065722fd995382ba1", "date": "2023-10-01 16:32:27 -0400", "subject": "Adjust timeouts in the common/io/tls test to fix flapping coverage.", "body": "This test was failing coverage pretty regularly because the retry in tlsClientOpen() was not always being reached. Make the TLS timeouts longer to ensure reliable coverage." }, { "commit": "af4621894a20ca0333726d459e2e6409bd4ee1e0", "date": "2023-09-30 12:47:15 -0400", "subject": "Fix common/lock test failing with -Werror=unused-result.", "body": "Wrap ftruncate() in TEST_RESULT_INT() to prevent this error." }, { "commit": "eccd9eed196ff9adc074061f26fec523eae2ba00", "date": "2023-09-30 12:45:24 -0400", "subject": "Update Minio test/documentation container version." }, { "commit": "33ba4db9cbd334c9a10f15880e13e3fb589c3b10", "date": "2023-09-30 09:40:44 -0400", "subject": "Use CSS to number sections in documentation.", "body": "This reduces churn in the HTML when sections are added or removed from the documentation." }, { "commit": "1d5563288cf1839acb237a188b865ecb01fe97da", "date": "2023-09-29 17:28:00 -0400", "subject": "Parse defaults and text sections in help.xml.", "body": "These will be required to build documentation in C." }, { "commit": "088026e6ffcd1b084099e11f8461fdd3895b2507", "date": "2023-09-29 17:06:37 -0400", "subject": "Allow documentation source file to be specified in manifest.", "body": "The help source file had previously been hardcoded and now that is no longer needed.\n\nA future commit will introduce more sources outside of the xml path." }, { "commit": "55fda01733f2a7e71ee175345e26344a99989927", "date": "2023-09-29 16:57:01 -0400", "subject": "Remove unused references to DocConfig and DocConfigData Perl modules." }, { "commit": "217584a2c4134667e82b2de8032b5334214e08f1", "date": "2023-09-29 16:52:08 -0400", "subject": "Add new XML functions required for building documentation." }, { "commit": "8f319b6fd34f3430b5b527825bd502113f8a957d", "date": "2023-09-25 09:43:30 -0400", "subject": "Update config.guess and config.sub to latest versions." }, { "commit": "084c8e1691f59aa412b859f9004e188383826c58", "date": "2023-09-25 09:40:45 -0400", "subject": "Begin v2.49 development." }, { "commit": "a7ab686d0e3d48b3200986d91ea3489488800382", "date": "2023-09-25 09:32:15 -0400", "subject": "v2.48: Repository Storage Tags" }, { "commit": "cb3ff6ed4336af9e4c0643d8224d5bc3d790a655", "date": "2023-09-23 13:41:03 -0400", "subject": "Fix command reference example for the filter option.", "body": "This example was broken by 24f7252. Revert to (almost) the prior code to fix this example until something better can be committed. The something better is in progress but it adds new build requirements so it is too late to include it for the release.\n\nTechnically this breaks some other examples, but they are all internal and not visible in the user-facing documentation." }, { "commit": "6f0f2b371e6604552001b56e8931c6d15ceab41f", "date": "2023-09-19 19:01:16 -0400", "subject": "Update help title for the --annotation option.", "body": "Fix capitalization and remove pluralization that is implied." }, { "commit": "c3c0834e170196a1241a368779162fcca65f9ad5", "date": "2023-09-19 16:26:13 -0400", "subject": "Allow storage/sftp unit test to run without libssh2 installed.", "body": "Add missing #ifdefs and update tests to prevent test compilation failure and test run issues when libssh2 is not present." }, { "commit": "bb752cd111b1257088e57137bdbe6715e96eb292", "date": "2023-09-19 12:08:09 -0400", "subject": "Remove duplicate tests from storage/sftp unit test module.", "body": "These tests are already run as part of storage/posix and do not need to be duplicated in storage/sftp." }, { "commit": "31de127cf42438b1fac04c161c47264aa1fb1060", "date": "2023-09-19 11:30:29 -0400", "subject": "Fix issue restoring block incremental without a block list.", "body": "It is currently possible for a block map to be written without any accompanying blocks. This happens when a file timestamp is updated but the file has not changed. On restore, this caused problems when encryption was enabled, the block map was bundled after a file that had been stored without block incremental, and both files were included in a single bundle read. In this case the block map would not be decrypted and the encrypted data was passed to blockMapNewRead() with unpredictable results. In many cases built-in retries would rectify this problem as long as delta was enabled since block maps would move to the beginning of the bundle read and be decrypted properly. If enough files were affected, however, it could overwhelm the retries and throw an error. Subsequent delta restores would eventually be able to produce a valid result.\r\n\r\nFix this by moving block map decryption so it works correctly no matter where the block map is located in the read. This has the additional benefit of limiting how far the block map can read so it will error earlier if corrupt. Though in this case there was no repository corruption involved, it appeared that way to blockMapNewRead() since it was reading encrypted data.\r\n\r\nArguably block maps without blocks should not be written at all, but it would be better to consider that as a separate change. This pattern clearly exists in the wild and needs to be handled, plus the new implementation has other benefits." }, { "commit": "88edea4571d954e54a561247de587f747eaaadca", "date": "2023-09-18 11:30:42 -0400", "subject": "Add block incremental info to restore detail logging.", "body": "Log that block incremental was used and the delta size if less than the entire file was updated." }, { "commit": "d27533b40f039ff8da3a284d77fccff101083492", "date": "2023-09-17 17:06:42 -0400", "subject": "Fix manifest command filter for files in a tablespace.", "body": "pg_data/ was appended at the beginning of the filter, which meant that files in tablespaces could never be queried directly.\n\nUpdate the filter to require the full path, including pg_data/ or pg_tblspc/." }, { "commit": "ed88f0483e8c87fde6dc098102b1c49e281d96e1", "date": "2023-09-16 09:35:27 -0400", "subject": "Add missing comma in protocolParallelJobToLog()." }, { "commit": "ce9ba0fadee652d409488bee701ea6a51cafec75", "date": "2023-09-15 20:22:38 -0400", "subject": "Add known hosts checking for SFTP storage driver.", "body": "By default require a known hosts match as part of the SFTP storage driver's authentication process, i.e. repo-sftp-host-key-check-type=strict. The match is expected to be found in the default list or in a list of known hosts files provided by the user. An exception is made if a fingerprint has been manually configured with repo-sftp-host-fingerprint or repo-sftp-host-key-check-type=accept-new can be used to automatically add new hosts.\r\n\r\nAlso allow host key verification to be skipped, as before, but require the user to explicitly set this (repo-sftp-host-key-check-type=none) rather than it being the default." }, { "commit": "f5c730fd03ff27088bd91c148ed285c63b5e7502", "date": "2023-09-15 18:15:02 -0400", "subject": "Add prefix to meson build target to avoid conflicts.", "body": "Older versions of meson fail when a build target in a subproject has the same name as another subproject.\n\nThis has been fixed in newer versions, but we still need to support older versions and in any case this seems cleaner and the help build target is already prefixed in this fashion." }, { "commit": "9039d20b5bd73d3732ed6dac0a2bba4558176d8f", "date": "2023-09-15 09:30:40 -0400", "subject": "Add report option to check command.", "body": "This option is intended to eventually create a comprehensive report about the state of the pgBackRest configuration based on the results of the check command.\r\n\r\nImplement a detailed report of the configuration options in the environment and configuration files. This should be useful information when debugging configuration errors, since invalid options and configurations are automatically noted. While custom config locations will not be found automatically, it will at least be clear that the config is not in a standard location.\r\n\r\nFor now keep this option internal since there is a lot of work to be done, but commit it so that it can be used when needed and tested in various environments.\r\n\r\nNote that for now when --report is specified, the check command is not being run at all. Only the config report is generated. This behavior will be improved in the future." }, { "commit": "657c1a3e069910106bc3642153c27b0542c61dc3", "date": "2023-09-14 18:41:36 -0400", "subject": "Finalize catalog number for PostgreSQL 16 release." }, { "commit": "1b4e0cce5f3ff5ea3ee0b889e6d4ce4de1430b04", "date": "2023-09-14 08:22:21 -0400", "subject": "Add --repo-storage-tag option to create object tags.", "body": "This new option allows tags to be added to objects in S3, GCS, and Azure repositories.\r\n\r\nThis was fairly straightforward for S3 and Azure, but GCS does not allow tags for a simple upload using the JSON interface. If tags are required then the resumable interface must be used even if the file falls below the limit that usually triggers a resumable upload (i.e. size < repo-storage-upload-chunk-size).\r\n\r\nThis option is structured so that tags must be specified per-repo rather than globally for all repos. This seems logical since the tag keys and values may vary by service, e.g. S3 vs GCS.\r\n\r\nThese storage tags are independent of backup annotations since they are likely to be used for different purposes, e.g. billing, while the backup annotations are primarily intended for monitoring." }, { "commit": "3b9c31f6e3a0c23674f97b02c42133192fb51724", "date": "2023-09-13 16:59:52 -0400", "subject": "Use PROJECT_NAME constant instead of string literal." }, { "commit": "39bb8a0d3a96eef63219dff262a9306a4f59dae9", "date": "2023-09-12 18:09:58 -0400", "subject": "Support for dual stack connections.", "body": "The prior code would only connect to the first address provided by getaddrinfo().\r\n\r\nInstead try each address in the list. If all connections fail then wait and try them all again until timeout.\r\n\r\nCurrently a round robin approach is used where each connection attempt must fail before the next connection is attempted. This works fine, for example, when an ipv6 address has no route to the host, but will work less well when a host answers but doesn't respond in a timely fashion.\r\n\r\nWe may consider a Happy Eyeballs approach in the future, but since pgBackRest is primarily a background process, it is not clear that slightly improved response time (in the case of failed connections) is worth the extra complexity." }, { "commit": "9d3a605900644059ff0e030d71a818b3965eb031", "date": "2023-09-10 12:45:58 -0400", "subject": "Optimize WAL segment check after successful backup.", "body": "The prior code did one list command against the storage for each WAL segment. This led to a lot of lists and was especially inefficient when the WAL (or the majority of it) was already present.\r\n\r\nOptimize to keep the contents of a WAL directory and use them on a subsequent search. Leave the optimizations for a single WAL segment since other places still use that mode." }, { "commit": "edbd520c81f5d437963aaf42d1c79b2c51d01665", "date": "2023-09-10 10:36:58 -0400", "subject": "Refactor address list lookup to include all returned addresses.", "body": "sckHostLookup() only returned the first address record returned from getaddrinfo(). The new AddressInfo object provides a full list of values returned from getaddrinfo(). Freeing the list is also handled by the object so there is no longer a need for FINALLY blocks to ensure the list is freed.\r\n\r\nAdd the selected address to the client/server names for debugging purposes.\r\n\r\nThis code does not attempt to connect to multiple addresses. It just lays the groundwork for a future commit to do so." }, { "commit": "f42d927d2dcd58cf5a80d68eaf6888bcf2c67a4b", "date": "2023-09-10 09:47:49 -0400", "subject": "Retry reads of pg_control until checksum is valid.", "body": "On certain file systems (e.g. ext4) pg_control may appear torn if there is a concurrent write while reading the file. To prevent an invalid read, retry until the checksum matches the control data.\r\n\r\nSpecial handling is required for the pg-version-force feature since the offset of the checksum is not known. In this case, scan from the default position to the end of the data looking for a checksum match. This is a bit imprecise, but better than nothing, and the chance of a random collision in the control data seems very remote considering the ratio of data size (< 512 bytes) to checksum size (4 bytes).\r\n\r\nThis was discovered and a possible solution proposed for PostgreSQL in [1]. The proposed solution may work for backup, but pgBackRest needs to be able to read pg_control reliably outside of backup. So no matter what fix is adopted for PostgreSQL, pgBackRest need retries. Further adjustment may be required as the PostgreSQL fix evolves.\r\n\r\n[1] https://www.postgresql.org/message-id/20221123014224.xisi44byq3cf5psi%40awork3.anarazel.de" }, { "commit": "c1805134b370f0c5c54c40a2df8978599328784f", "date": "2023-09-09 12:54:55 -0400", "subject": "Aggregate error retries in ErrorRetry output.", "body": "If there are a lot of retries then the output might be very large and even be truncated by the error module. Either way, it is not good information for the user.\n\nWhen a message is repeated, aggregate so that total retries and time range are output for the message. This provides helpful information about what happened without overwhelming the user with data." }, { "commit": "5314dbffc761bc7507db02693f2421c1578efe10", "date": "2023-09-09 11:22:33 -0400", "subject": "Adjust Wait object to be more accurate when nested.", "body": "The prior code gave a \"free\" extra iteration at the end of the wait, functionality that was copied directly from the equivalent code in Perl. This works and is mostly negligible except when wait loops are nested, in which case outer loops will always run twice even if an inner loop times out, which has a multiplying effect. For example, three nested wait loops with a timeout of three seconds will result in the inner loop being run four times (for a total of twelve seconds) even if it times out each time.\n\nInstead make waitMore() stop exactly when time is up. This makes more sense because a complete failure and timeout of an inner loop means retrying an outer loop is probably a waste of time since that inner loop will likely continue to fail.\n\nAlso make waitRemaining() recalculate the remaining time rather than depending on the prior result.\n\nSome tests needed to be adjusted to take into account there being one less loop. In general this led to a simplification of the tests.\n\nReinit a begin value in the wait unit tests. This is not related to the current change but it does make the time measurements more accurate and less likely to fail on an edge case, which has been observed from time to time.\n\nThis change appears to have a benefit for test runtime, which seems plausible especially for nested waits, but a larger sample of CI runs are needed to be sure." }, { "commit": "d57900839addeac56b89a445352a8efc96a197f0", "date": "2023-09-04 18:51:44 -0400", "subject": "Improve backup test harness to handle very large quantities of WAL.", "body": "The current tests only generate small quantities of WAL per backup but sometimes it is useful to generate large quantities for testing.\n\nFix the issues with generating large quantities of WAL and also improve memory management." }, { "commit": "43524a47a4c6a847f76514812f94b87cb7af4c05", "date": "2023-09-04 13:39:11 -0400", "subject": "Fix configuration reference example for the tls-server-address option.", "body": "The prior example (::*) was not valid and would result in the following error:\r\n\r\nERROR: [049]: unable to get address for '::*': [-2] Name or service not known\r\n\r\nCorrect values are either * for IPv4 or :: for IPv6. The IPv4 value is used as the example since only one example is allowed." }, { "commit": "c0935b79924686ebf4efef99c7dc69aea9efb28d", "date": "2023-09-04 12:00:06 -0400", "subject": "Split release notes into individual files.", "body": "The release.xml file was getting pretty unwieldy so break release notes into separate files. Also break contributors into a separate file.\n\nIn theory most of release.xml could now be generated automatically but adding a new release does not represent a serious maintenance burden, so for the time being it does not seem worth it." }, { "commit": "6cb9c40fb8ea18e6572bf3f6e44bbb1d8f922682", "date": "2023-09-04 10:39:19 -0400", "subject": "Improve GCS multi-part performance.", "body": "The prior code avoided uploading a chunk if it was not clear whether the write was complete or not. This was primarily due to the GCS documentation being very vague on what to do in the case of a zero-size chunk.\r\n\r\nNow chunks are uploaded as they are available. This should improve performance and also reduces the diff against a future commit that absolutely requires zero-size chunks." }, { "commit": "fd9c6b0e9ddd38de57475d1325bc53b79833a983", "date": "2023-09-01 12:03:39 -0400", "subject": "Add missing const qualifier to errorTypeList." }, { "commit": "4f52015b14a1f4e5df9373707179a5447cb5a4a6", "date": "2023-08-31 20:22:01 -0400", "subject": "New CI container build for PostgreSQL 16 rc1." }, { "commit": "e3fcd8cd9e0be42a32fc4cbd2ae3db2c072eabcd", "date": "2023-08-31 16:36:52 -0400", "subject": "Add backup size completed/total to info command JSON output.", "body": "This allows for backup progress to be monitored by external processes, whereas before backup progress was only available via text output." }, { "commit": "fbd992adc7b18db5e70774d675c160c8c8e2f031", "date": "2023-08-29 13:49:49 -0400", "subject": "Allow archive-get command to run when stanza is stopped.", "body": "The restore command can run while the stanza is stopped so it makes sense for the archive-get command to follow the same rule.\n\nThe important thing is to ensure that all commands that write to the repository are stopped when the stanza is stopped." }, { "commit": "8424737697edfee7cbea195b3bccb4077e1c2ee5", "date": "2023-08-29 12:35:29 -0400", "subject": "Encode key in HTTP query.", "body": "The key also needs to be encoded (not just the value).\n\nThis is not currently an issue because none of the keys in use require encoding." }, { "commit": "d24180e4da49a8e2ef2a10a02e76cd204fe1ae8f", "date": "2023-08-29 12:28:02 -0400", "subject": "Fix conflicting group in test containers.", "body": "The tests expect the group name/id to match between the host system and the container. If there is a conflict rename the group with the required id to the expected name.\n\nThis could have unintended consequences but it seems reasonably safe since we control everything that runs in the container and there should never be any system processes running." }, { "commit": "1e6efd73e1fcf7bbe3d3f04d925fefde44847109", "date": "2023-08-23 12:56:49 -0400", "subject": "Refactor IoRead handling in backupFile().", "body": "Store the value of storageReadIo() rather than calling it each time. This is slightly more efficient, but more importantly it will be needed for an upcoming commit." }, { "commit": "82c12fed123df057fce109757aa5c72dce605440", "date": "2023-08-21 13:10:48 -0400", "subject": "Adjust range check per Coverity complaint.", "body": "Coverity complained that: Overrunning array \"optionGroupIndexKeep[optionGroupIdx]\" of 512 bytes at byte offset 4294967550 using index \"keyIdx\" (which evaluates to 4294967294).\n\nThis does not seem possible but adjust the code to make Coverity happy, as usual." }, { "commit": "eb32d6de5e1575a37efb166abbf7df400d93352e", "date": "2023-08-08 08:41:34 +0100", "subject": "Accept leading tilde in paths for SFTP public/private keys.", "body": "The documentation indicates that leading tilde file paths for public/private keys are valid but the functionality was omitted from the original implementation." }, { "commit": "1141dc20708cf0c68296f83e74c872a7db9c2bd9", "date": "2023-08-07 17:03:09 +0100", "subject": "Multi-stanza check command.", "body": "Check command now checks multiple stanzas when the stanza option is omitted.\r\n\r\nThe stanza list is extracted from the current configuration rather than scanning the repository like the info command. Scanning the repository is a problem because configuration for each stanza may not be present in the current configuration. Since this functionality is new for check there is no regression.\r\n\r\nAdd a new section to the user guide to cover multi-stanza configuration and provide additional coverage for this feature.\r\n\r\nAlso fix a small issue in the parser when an indexed option has a dependency on a non-indexed option. There were no examples of this case in the previous configuration." }, { "commit": "995a8e96691d23eaf3914a425669f89deb3ce002", "date": "2023-08-07 13:30:50 +0100", "subject": "Reload GCS credentials before renewing authentication token.", "body": "This allows the service key to be updated while a command is running. The new key should be written atomically and ideally the old key should remain valid for some period of time to avoid a race condition if the old token happens to expire at the same time that the new key is being generated." }, { "commit": "cbafcfabf2eb593de9ee95483da93f278c9ff1ee", "date": "2023-08-07 12:38:33 +0100", "subject": "Remove UNCONSTIFY() from gz compression module.", "body": "The ZLIB_CONST macro may be used since 1.2.5.2 (17 Dec 2011) to specify that the input buffers are const. This is sufficiently old to cover all non-EOL distributions, i.e. everything we test on.\n\nCompiling on older distributions may generate warnings but will continue to work." }, { "commit": "6c75c99d0b3024311ea7b254953f1c6a901bcb70", "date": "2023-07-31 11:09:17 +0200", "subject": "Update xxHash to v0.8.2.", "body": "This release fixes compilation errors on some platforms and contains numerous performance improvements." }, { "commit": "f55c8f1b813fadbc7b19a1cc4eb8f90e36b2b18c", "date": "2023-07-26 10:45:12 +0200", "subject": "Rename HRN_PG_CONTROL_OVERRIDE_PUT().", "body": "Rename to HRN_PG_CONTROL_OVERRIDE_VERSION_PUT() since other types of overrides are possible (and coming)." }, { "commit": "65fb3b3b2d89e2fc80698e52a8378395b18e6d2a", "date": "2023-07-24 09:17:46 +0200", "subject": "Update config.guess and config.sub to latest versions." }, { "commit": "c585ddaf48b806b5d038f0c90e7848875f7a533e", "date": "2023-07-24 09:15:38 +0200", "subject": "Begin v2.48 development." }, { "commit": "b5b033cc1ce152d387f20681f023b5e1f7c3ed3b", "date": "2023-07-24 09:12:30 +0200", "subject": "v2.47: Performance Improvements and Bug Fixes" }, { "commit": "5ed6f8df1487174985c42d6e13c367d70c2d8b8a", "date": "2023-07-18 07:35:12 +0200", "subject": "Fix spurious automatic delta backup on backup from standby.", "body": "When performing backup from standby the file sizes on the standby may not be equal to file sizes on the primary. This is because replication continues during the backup and by the time the file is copied from the standby it may have changed. Since we cap the size of all files copied from the standby this practically applies to truncation and in particular truncation of free space maps (at least, every case we have seen so far is an fsm). Free space maps are especially vulnerable since they are only partially replicated, which amplifies the difference between the primary and standby.\r\n\r\nOn an incremental backup it may look like the size has changed on the primary (because of the final size recorded by the standby in the prior backup) but the timestamp may not have changed on the primary and this will trigger a checksum delta for safety. While this has no impact on backup integrity, checksum delta incrementals can run much longer than regular incrementals and backup schedules may be impacted.\r\n\r\nThe solution is to preserve the original size in the manifest and use it to do the time/size check. In the case of backup from standby the original size will always be the size on the primary, which makes comparisons against subsequent file sizes on the primary consistent. Original size is only stored in the manifest when it differs from final size, so there should not be any noticeable manifest bloat." }, { "commit": "4c27d74bbd3a44d8ab1af140277d3cd8302060c2", "date": "2023-07-17 22:37:02 +0200", "subject": "Preserve block incremental info in manifest during delta backup.", "body": "It was possible for block incremental info to be lost if a file had been modified in such a way that block incremental would be disabled if the file were new, e.g. if the file shrank below the block incremental limit or the file timestamp regressed far enough into the past. In those cases the block incremental info would not be copied in manifestBuildIncr().\r\n\r\nInstead always copy the block incremental info in case the file ends up being referenced to a prior backup.\r\n\r\nThe validation tests were not robust enough to catch this issue so they were improved in 1d42aed.\r\n\r\nIn the particular case that exposed this bug, a file had a timestamp that was almost four weeks in the past at full backup time. A few days later a fail over occurred and the next incremental ran on the new primary (old standby) in delta mode. The same file had a timestamp just a few hours older than in the full backup, but now four weeks older than the current backup. Block incremental was disabled for the file on initial manifest build because of its age, which meant the block incremental info was not copied into the new manifest. The delta then determined the file had not changed and referenced it to the full backup. On restore, the file appeared to be a normal file stored in a bundle but could not be decompressed because it was in fact a block incremental." }, { "commit": "cdb7e26350822b55c00771d6d575242b1202e741", "date": "2023-07-14 17:53:58 +0300", "subject": "Fix block incremental file names in verify command.", "body": "The verify command was not appending the .pgbi extension instead of the compression extension when verifying block incremental files stored outside a bundle.\r\n\r\nOriginally the idea was that verify would not need any changes (since it just examines repo-size and checksum) but at some point the new extension was added and broke that assumption.\r\n\r\nUse backupFileRepoPathP() to generate the correct filename (Just like backup, restore, etc)." }, { "commit": "1d42aed152563fff6ec70c517d86eb39158b3f20", "date": "2023-07-14 11:18:25 +0300", "subject": "Improve validation of referenced files in command/backup unit test.", "body": "Referenced files were not being checked for validity unless they were hard linked to the current backup (which a lot of the tests did). Newer tests with bundling do not have hard links and so missed these checks.\n\nImprove the validation code to check referenced files in the manifest even when they are not hard linked into the current backup.\n\nAdd a delta test for bundling/block incremental that includes a file old enough to get a block size of zero. These are good tests by themselves but they also reduce the churn in an upcoming bug fix." }, { "commit": "c9703b35304c66a0bd0ebaaaebb389150ef207d6", "date": "2023-07-13 13:47:27 +0300", "subject": "Log when file size changes during backup.", "body": "It is possible for the size of a file to change during the backup. In most cases we won't notice since files sizes are usually capped but it is possible for some files to grow or shrink between when they are recorded in the manifest and when they are actually copied. The new size is recorded in the manifest but the old size may be useful for debugging.\n\nThe new code has coverage but no test changes because it is covered by the parallel backup testing, which does not have deterministic log output. It doesn't seem worth creating a new test to check the log format as it is not very critical (though the output was manually verified)." }, { "commit": "b6b13bd6344a1f3ed7279f5a8da771643c0e948a", "date": "2023-07-12 13:23:16 +0300", "subject": "Mention block-level backups in feature list." }, { "commit": "c8afbed5308d31302c5853850e4ed56d48b6bb4b", "date": "2023-07-12 13:09:34 +0300", "subject": "Increase protocolServerError() logging to debug level.", "body": "Errors should be rare enough that it makes sense to log them at debug level. Right now if there is an error if won't be logged at debug level, which makes it harder to tell why the main process may have terminated the local/remote process." }, { "commit": "06536b5814f2fed3e710d1d7b578aa6be53195cb", "date": "2023-07-12 10:20:09 +0300", "subject": "Rename macros in command/verify unit test.", "body": "These macros were redefined, which worked since they were identical to their prior definitions, but this will not always be true." }, { "commit": "aa229a1deef3afeebb66db1443b19e24559adf96", "date": "2023-07-09 21:56:05 +0300", "subject": "Modify time_t casts based on complaints from Coverity.", "body": "Coverity complained about time_t being cast directly to unsigned int, so instead cast the result of the operation.\n\nWe are confident in both cases that the time_t values will not be out of unsigned int range but Coverity has no way to know that.\n\nOne of these is new (introduced by 9efd5cd0) but the other one (from a9867cb0) remained unnoticed for a while, though it has not caused any production impact." }, { "commit": "28b6b2d4659d78ce67c1bd3104d96e5705d955c9", "date": "2023-07-07 10:36:15 +0200", "subject": "Improve performance of SFTP storage driver.", "body": "The initial implementation used simple waits when having to loop due to getting a LIBSSH2_ERROR_EAGAIN, but we don't want to just wait some amount of time, we want to wait until we're able to read or write on the fd that we would have blocked on.\r\n\r\nThis change removes all of the wait code from the SFTP driver and changes the loops to call the newly introduced storageSftpWaitFd(), which in turn checks with libssh2 to determine the appropriate direction to wait on (read, write, or both) and then calls fdReady() to perform the wait using the provided timeout.\r\n\r\nThis also removes the need to pass ioSession or timeout down into the SFTP read/write code." }, { "commit": "125676ae0e98d3730d2dc0d47e37ca1df145d044", "date": "2023-07-07 09:56:26 +0200", "subject": "Cleanup of init/handshake in storageSftpNew().", "body": "Rename handshakeStatus to rc to be consistent with the rest of the module.\n\nAdd comments and do some reformatting." }, { "commit": "9efd5cd0bb40c5dc2fcac2e21c9b62e554b94db6", "date": "2023-07-06 18:46:31 +0200", "subject": "Add timezone offset to info command date/time output.", "body": "This makes it easier to use timestamps from the info command directly in PostgreSQL recovery settings, especially the --target option." }, { "commit": "762498f4cd5f1425db4c176fc707926d62cc7c04", "date": "2023-07-06 11:27:00 +0200", "subject": "Fix time-based archive expiration when no backups are expired.", "body": "In the case that no backups were expired but time-based retention was met no archive expiration would occur and the following would be logged:\r\n\r\nINFO: time-based archive retention not met - archive logs will not be expired\r\n\r\nIn most cases this was harmless, but when retention was first met or if retention was increased, it would require one additional backup to expire earlier WAL. After that expiration worked as normal.\r\n\r\nEven once expiration was working normally the message would continue to be output, which was pretty misleading since retention had been met, even though there was nothing to do.\r\n\r\nBring this code in line with count-based retention, i.e. always log what should be expired at detail level (even if nothing will be expired) and then log info about what was expired (even if nothing is expired). For example:\r\n\r\nDETAIL: repo1: 11-1 archive retention on backup 20181119-152138F, start = 000000010000000000000002\r\nINFO: repo1: 11-1 no archive to remove" }, { "commit": "e280ed9098a6f5da9bdabb541e9c69d4f24ffcf2", "date": "2023-07-06 08:58:16 +0200", "subject": "Make result code handling in storage/sftp more consistent.", "body": "Initializers are useless since rc is always set later. Make rc checks consistent with the rest of the module." }, { "commit": "1fd8845c7ffd76ee335ec7960ccd531eef43bd29", "date": "2023-06-29 11:08:58 +0200", "subject": "Remove unresolved todo from 87087fac.", "body": "Seems easiest just to make the additional config required since it tests that custom ports are being used correctly. The test for synthetic was a noop since SFTP is not used in synthetic tests." }, { "commit": "0051d7ca872c84231dd55057586f957aa2bc1fa3", "date": "2023-06-29 09:42:12 +0200", "subject": "Update comments for removal of chunking and block numbers.", "body": "dd4e526 should have updated this comment but failed to do so." }, { "commit": "0ac09344dc17fb54e250705ce49399b86329bb3d", "date": "2023-06-28 18:19:20 +0200", "subject": "Fix unique label generation for diff/incr backup.", "body": "If there were at least two full backups and the last one was expired, it was impossible to make either a differential or incremental backup without first making a new full backup. The backupLabelCreate() function identified this situation as clock skew because the new backup label was compared with label of the expired full backup.\r\n\r\nIf the new backup is differential or incremental, then its label is now compared with the labels of differential or incremental backups related to the same full backup.\r\n\r\nAlso convert a hard-coded date length to a macro." }, { "commit": "5cbef3ade2ab08d8d2431f40d09683efe55c9586", "date": "2023-06-25 17:36:57 +0200", "subject": "Fix missed memory auditing in FUNCTION_LOG_RETURN_VOID().", "body": "9ca492c missed adding auditing to this macro and as a result a few memory leaks have slipped through. Add auditing to the macro to close this hole.\n\nOf the leaks found the only possibly serious one is in blockIncrProcess(), which would leak a PackRead of about eight bytes with every superblock. Since superblocks max out at a few thousand per file this was probably not too bad.\n\nAlso change the ordering of auditing in FUNCTION_TEST_RETURN_VOID(). Even though the order does not matter, having it different from the other macros is confusing and looks like an error." }, { "commit": "ecae001653489ac734c3424ea10393989c218ab5", "date": "2023-06-24 10:18:29 +0200", "subject": "Add test for configuration hash type override behavior.", "body": "This behavior is different than regular options where a repeated value will result in an error. It appears to be a legacy of the original Perl implementation, which used a hash as the underlying data type in the built-in command-line parser, and the C command-line parser was written to match." }, { "commit": "434938e32b2f71301196f933c3b9a62ed1183c31", "date": "2023-06-22 18:23:06 +0200", "subject": "Update Fedora test image to Fedora 38.", "body": "This adds testing for the latest tool versions, e.g. gcc." }, { "commit": "5531e2662d2abcd52c24f25b4a5836c9e07a8ac9", "date": "2023-06-18 12:55:29 +0300", "subject": "Add --tz param missed in C unit test migration.", "body": "This was missed in the C unit test migration and since then a new test was added that was not setting its timezone correctly.\n\nThis feature exists to make sure the tests will run on systems with different timezones and has no impact on the core code." }, { "commit": "a36f480c85c7add6fdf0e5517748d740e3a7b424", "date": "2023-06-14 14:39:40 +0200", "subject": "Clarify preference to install from packages in the user guide." }, { "commit": "9b1bc7d11bdea9cdb1bbf1210d66d53c15d72a46", "date": "2023-06-14 12:28:00 +0200", "subject": "Use new LibXML error handler.", "body": "The old error handler has been deprecated." }, { "commit": "4adf6eed09da3f0819abef813c5a44deb9c91487", "date": "2023-06-12 23:10:14 +0200", "subject": "Clarify comments for the handling of pg_control during restore." }, { "commit": "396e237d53a94b04878bfea23b4de2400256de82", "date": "2023-06-12 15:41:39 +0200", "subject": "Update NEWS.md for new version and features." }, { "commit": "6875358d11828cbbaffb51f2297a866af32ee8a4", "date": "2023-06-12 11:26:03 +0200", "subject": "Fix typo in \"Upgrading PostgreSQL\" section of user guide." }, { "commit": "818ef4b71d3ec77c0003ea8ca83981977b4447ac", "date": "2023-06-08 16:36:25 +0300", "subject": "Improve comments in protocol/command module." }, { "commit": "0bf1b4a3b8cc6caf12a4174cd52a1782cda4dc31", "date": "2023-05-26 19:50:08 +0300", "subject": "Move delete stanza section in user guide.", "body": "Deleting a stanza after all the storage driver stanzas were created was causing problems because the SFTP driver is slow and the GCS driver has no server (so it threw errors). This delayed the shutdown of PostgreSQL, which for some reason caused systemctl to hang when the documentation was being built on a RHEL host.\n\nMove the section up and add a comment about why the location is required. Also add a comment to the GCS section about its location.\n\nThis does not address the issue of systemctl hanging on RHEL container hosts but it will hopefully make it less common." }, { "commit": "5d671c63d898e9bf1c08c8ed9e830070361c861e", "date": "2023-05-25 20:08:12 +0300", "subject": "New CI container build for PostgreSQL 16 beta1." }, { "commit": "9cceed6ac4a32c14a797eba89a4ad1dc06d89d80", "date": "2023-05-24 16:34:21 +0300", "subject": "Skip recovery.signal for PostgreSQL >= 12 when recovery type=none.", "body": "Bring PostgreSQL >= 12 behavior in line with other versions when recovery type=none.\r\n\r\nWe are fairly sure this did not work correctly when PostgreSQL 12 was released, but apparently the issue has been fixed since then. Either way, after testing we have determined that the behavior is now as expected." }, { "commit": "36ff81dc6ff7e98e40c0c05a4a343a71b91b1678", "date": "2023-05-24 14:17:07 +0300", "subject": "Centralize error handling for unsupported features.", "body": "Some features are conditionally compiled into pgBackRest (e.g. lz4). Previously checking to see if the feature existed was the responsibility of the feature's module.\r\n\r\nCentralize this logic in the config/parse module to make the errors more detailed and consistent.\r\n\r\nThis also fixes the assert that is thrown when SFTP storage is specified but SFTP support is not compiled into pgBackRest." }, { "commit": "de46276bf60b8a2181621b123d1a99e56808a089", "date": "2023-05-24 09:56:32 +0300", "subject": "Refactor allow list processing in config/parse module.", "body": "Combine StringId and int checking into a single loop. This seems more compact and makes it easier to add code that affects both types (and possibly more types in the future)." }, { "commit": "48b26bf569664da59bff08eb3ef959f0cd440e29", "date": "2023-05-23 12:50:12 +0300", "subject": "Fix sftp-all=y in the user guide so it creates a valid configuration.", "body": "This was not tested in 87087fac and the generated config was only valid for pushing from the primary. Also do some general cleanup.\n\nUpdate the SFTP server user to be \"pgbackrest\" instead of \"postgres\".\n\nEven though sftp-all=y now creates a valid configuration, the user guide build still fails because SFTP is too slow and operations time out (particularly starting PostgreSQL). This will need to be addressed in a future commit." }, { "commit": "c633b187db32d18ccab24e77ab3413335e067775", "date": "2023-05-23 10:58:51 +0300", "subject": "Remove user-facing documentation references to --vm=none.", "body": "This parameter is now optional and defaults to none so there is no reason to explicitly show it in user-facing documentation.\n\nAlso make the vm parameter in ci.pl optional to be consistent with how test.pl behaves." }, { "commit": "4ec51cdb2f92e3fd814063a2bb606640965f8c5a", "date": "2023-05-23 10:40:56 +0300", "subject": "Explicitly create SSH directory in SFTP setup.", "body": "RHEL 9 (at least) will warn on stderr that the directory has been created which causes the documentation build to fail." }, { "commit": "5bbe98758919a38692cd859809203cb029543c59", "date": "2023-05-23 10:14:00 +0300", "subject": "Build u20 image to speed contributing document generation.", "body": "This image was left out of the last round of builds, which forced the contributing document to build it from scratch." }, { "commit": "c2c60350d376379322d6c14c27cbb93bf6e86987", "date": "2023-05-23 08:25:17 +0300", "subject": "Add missed --no-log-timestamp in unit tests and improved formatting.", "body": "The --no-log-timestamp option was missed when unit test building was migrated to C, which caused test timings to show up in the contributing guide. This caused no harm but did create churn in this file during releases.\n\nAlso improve the formatting when test timing is disabled." }, { "commit": "9dbf76d8e87287786f03b65c94305b3dc126029b", "date": "2023-05-22 11:19:00 +0300", "subject": "Begin v2.47 development." }, { "commit": "b461f7c6f884dba8056858464d1b571e708dad77", "date": "2023-05-22 11:13:13 +0300", "subject": "v2.46: Block Incremental Backup and SFTP Storage" }, { "commit": "a28b72c62431bcf9df02791219d9d6f5d0e60b54", "date": "2023-05-18 09:32:44 +0300", "subject": "Add warning about using recovery type=none." }, { "commit": "c61115e8adccd17d8afb6bd527fb6a396ec6b72a", "date": "2023-05-15 17:46:41 +0300", "subject": "Add bundling and block incremental to stress testing.", "body": "This makes the stress testing more robust and provides additional testing for important features." }, { "commit": "87087fac231d2110160b3219daee3f9fb91aec02", "date": "2023-05-13 19:16:16 +0300", "subject": "SFTP support for repository storage." }, { "commit": "0c1f823e7a3abf1cadb44de3832ec1ddb2da7ec8", "date": "2023-05-12 15:46:04 +0300", "subject": "Add note about running stanza-create on already-created repositories." }, { "commit": "c7ac17330405510e56243e967d108b8d3b387ce0", "date": "2023-05-10 09:22:26 +0300", "subject": "Increase check timeouts in real/all integration tests.", "body": "The prior timeouts were a bit aggressive and were causing timeouts in the Azure tests. There have also been occasional timeouts in other storage drivers.\n\nThe performance of CI environments is pretty variable so increased timeouts should make the tests more stable." }, { "commit": "2fc641383709ce1f91f35bb1edf4e7c1ab0f4eb3", "date": "2023-05-06 12:16:58 +0300", "subject": "Fix typos in test titles." }, { "commit": "6f250bf6860054397181f5678220faaa6574993e", "date": "2023-05-06 12:15:21 +0300", "subject": "Fix groupId type in StorageInfo.", "body": "gid_t is the correct type but there was no bug because uid_t and gid_t have the same underlying type." }, { "commit": "ad9196cb49ce86cef678e453046803fb2be2cf70", "date": "2023-05-05 10:08:49 +0300", "subject": "Update Debian user guide to build on Ubuntu 20.04.", "body": "Ubuntu 18.04 will be EOL at the end of the month so update to a newer version." }, { "commit": "023d3c99d8d00497501b21fb20b07f71ea49ee40", "date": "2023-05-04 08:52:31 +0300", "subject": "Fix incorrect commit hash in 2f322e2a." }, { "commit": "2f322e2a49f20ff3036db6382363efb40246922e", "date": "2023-05-03 09:49:24 +0300", "subject": "Add efac3474 (remove double spacing) to .git-blame-ignore-revs." }, { "commit": "d49907239eb37c3e4e905f97543824181a1bd406", "date": "2023-05-03 09:37:59 +0300", "subject": "Remove double spaces missed in 1bd5530a." }, { "commit": "efac3474cffc4f01154c13cc358601c4c98bcd42", "date": "2023-05-02 12:59:20 +0300", "subject": "Add 1bd5530a (remove double spacing) to .git-blame-ignore-revs." }, { "commit": "1bd5530a59cd8ddbabc279802d1ede4f8fbd5314", "date": "2023-05-02 12:57:12 +0300", "subject": "Remove double spaces from comments and documentation.", "body": "Double spaces have fallen out of favor in recent years because they no longer contribute to readability.\n\nWe have been using single spaces and editing related paragraphs for some time, but now it seems best to update the remaining instances to avoid churn in unrelated commits and to make it clearer what spacing contributors should use." }, { "commit": "5ea7b91bf9ab57a29778d2332c0971e84f14c4c6", "date": "2023-05-02 12:07:28 +0300", "subject": "Remove extraneous space from error messages." }, { "commit": "9637d9449091632f158f8e2be8afbad4bda200fc", "date": "2023-05-02 09:37:42 +0300", "subject": "Use boolean variant constants where possible instead of redeclaring." }, { "commit": "822f2a584260a012b833455a7a2bd1c8487e3010", "date": "2023-05-01 19:26:48 +0300", "subject": "Finalize block incremental feature.", "body": "Remove beta status and update documentation to remove beta references and warnings.\r\n\r\nThe repo-block-* sub-options have been marked internal. Most users will be best off with the default behavior and we may still decide to change these options for remove them in the future." }, { "commit": "c510046dd77c40487d2d6c659834927a3bab1106", "date": "2023-05-01 14:27:56 +0300", "subject": "Remove extraneous spaces in user guide." }, { "commit": "dd4e52679ecdc43539bd038b5aa9bd4ff9055cbf", "date": "2023-04-27 23:29:12 +0300", "subject": "Removing chunking and block numbers from incremental block list.", "body": "These were intended to allow the block list to be scanned without reading the map but were never utilized. They were left in \"just in case\" and because they did not seem to be doing any harm.\r\n\r\nIn fact, it is better not to have the block numbers because this allows us set the block size at a future time as long as it is a factor of the super block size. One way this could be useful is to store older files without super blocks or a map in the full backup and then build a map for them if the file gets modified in a diff/incr backup. This would require reading the file from the full backup to build the map but it would be more space efficient and we could make more intelligent decisions about block size. It would also be possible to change the block size even if one had already been selected in a prior backup.\r\n\r\nOmitting the block numbers makes the chunking unnecessary since there is now no way to make sense of the block list without the map. Also, we might want to build maps for unchunked block lists, i.e. files that were copied normally." }, { "commit": "3fc3690dd77ef5fa032abb006558184164db53ec", "date": "2023-04-27 10:30:50 +0300", "subject": "PostgreSQL 16 support.", "body": "Add catalog version and WAL magic for PostgreSQL 16.\r\n\r\nThe GUC to force parallel mode has be renamed so update that in the tests." }, { "commit": "39059dc4e7a74e6473099eefeed4e9e0efe65e60", "date": "2023-04-26 15:00:36 +0300", "subject": "Remove extra linefeed." }, { "commit": "3ff88ffbb495fd669e9a361478e60192930ee0a1", "date": "2023-04-25 11:52:28 +0300", "subject": "Avoid chown() on recovery files during restore.", "body": "The chown() was already skipped on the files restored from the repository but the same logic was not applied to the generated recovery files, probably because chown'ing a few recovery files does not have performance implications. Use the same logic for recovery files to determined if they need to be chown'd.\r\n\r\nUltimately this behavior is pretty hard to test, so add a fail safe into the Posix driver that will skip chown if the permissions are already as required." }, { "commit": "750ab8e55c276fa0127d19961a59fec254e24c61", "date": "2023-04-21 16:21:10 +0300", "subject": "Add MacOS back to CirrusCI.", "body": "9e29c01 removed MacOS testing due to breaking changes in the update to arm on the platform.\n\nUpdate the scripts to correctly work with the version of Homebrew deployed with the arm images." }, { "commit": "23d7d67d4961c9bb38d73ef1c409369303276846", "date": "2023-04-21 15:08:59 +0300", "subject": "Fix missing void parameter." }, { "commit": "f5e6bc26983396ec51c6579aade37db39807f3a5", "date": "2023-04-20 13:24:12 +0300", "subject": "Allow page header checks to be skipped.", "body": "These checks cause false negatives for page checksum verification when the page is encrypted because pd_upper might end up as 0 in the encrypted data. This issue is rare but reproducible given a large enough cluster.\r\n\r\nMake these checks optional, but leave them enabled by default." }, { "commit": "8240eb5da57d2950ebbd3492c163b4e0151d3cb7", "date": "2023-04-16 17:41:27 +0300", "subject": "Autogenerate PostgreSQL versions.", "body": "This will make adding/removing versions of PostgreSQL more reliable." }, { "commit": "a05bf6bb15ba7f7e4b610dbb84e87ebcd359dd38", "date": "2023-04-16 17:32:24 +0300", "subject": "Rename PG_VERSION_*_STR constants to PG_VERSION_*_Z.", "body": "This is more consistent with other zero-terminated string constants and also has the benefit of being shorter." }, { "commit": "75254c9285e70913920af9ab6e9f362bc4d11d7d", "date": "2023-04-11 16:28:29 +0400", "subject": "Parameterize configLoad() as cfgLoadP().", "body": "There is one existing optional parameter and there are more to come." }, { "commit": "d6cb3de17afff0a9c57f312455a5ca3cf202cba6", "date": "2023-04-11 14:50:59 +0400", "subject": "Update command/check module to recent coding standards.", "body": "Add const as appropriate and remove an unneeded declaration." }, { "commit": "a9c5cd9749f6b5ae1005453b1039d91f37fb245b", "date": "2023-04-11 12:46:00 +0400", "subject": "Update command/backup module to recent coding standards.", "body": "Add const as appropriate and avoid setting results to NULL if the result will definitely be set later on." }, { "commit": "f33e1cfb1600643820a1ae76bdeb15c62ac1fcc7", "date": "2023-04-06 11:38:18 +0400", "subject": "Add error retry detail for HTTP retries.", "body": "This should make it clearer when retries have happened and for how long." }, { "commit": "df419b34b55a15a902ff5cc1912aaebc7e023738", "date": "2023-04-06 11:03:46 +0400", "subject": "Improve retry error messages.", "body": "Centralize the code to allow it to be used in more places and update the protocol/server module to use the new code.\n\nSince the time measurements make testing difficult, also add time and errorRetry harnesses to allow specific data to be used for testing. In the case of errorRetry, the production behavior is turned off by default during testing and only enabled for the errorRetry test module." }, { "commit": "801e396dace2a4d9f4125ef0e751c90dc0cc644a", "date": "2023-04-06 10:38:49 +0400", "subject": "Move error modules to common/error directory.", "body": "There are already enough files to warrant a separate directory and more are coming.\n\nAlso remove extraneous includes." }, { "commit": "ff98636e41b8ad14babe1e9fa1fdc15aad2244cd", "date": "2023-04-06 08:22:14 +0400", "subject": "Update 32-bit CI host to Debian 10 from Ubuntu 18.04.", "body": "Ubuntu 18.04 will be EOL before the next release, so update to the oldest available Debian version.\n\nAlso fix one incorrect return value type, a test cast, and adjust some test timeouts." }, { "commit": "a9f39857cf7d8c0eda9f5f19f3d4c9497d4d47f2", "date": "2023-04-05 11:24:53 +0400", "subject": "Remove unused strOS parameter in Common::HostTest." }, { "commit": "8f7f73e4af916bf1bc6a0cb72be35b7061b297df", "date": "2023-03-30 17:46:25 +0500", "subject": "Sleep using nanosleep() instead of select().", "body": "This is a safer way to sleep due to select's not-portable interaction with signals.\n\nBased on https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=a948e49e2ef11815be0b211723bfc5b53b7f75a8 from the PostgreSQL project." }, { "commit": "2cfbee903a0a16d1fdeca52dbf6e775cdcddcc74", "date": "2023-03-30 15:43:37 +0500", "subject": "Use Buffer object for page buffer in PageChecksum filter.", "body": "This avoids the object needing to support allocations." }, { "commit": "3dbf518c3a9df538ca110039758fbfe161e3f714", "date": "2023-03-30 14:50:46 +0500", "subject": "Convert XmlNode from allocation to object.", "body": "This makes memory accounting more accurate and objects that use Xml no longer need to allow allocations." }, { "commit": "047472144b5fd92425225700aa11b351044244e5", "date": "2023-03-30 14:42:20 +0500", "subject": "Remove allocations from common crypto module.", "body": "The allocations are better done with a struct and Buffer object." }, { "commit": "84ca7b9b2713b6d36d15d592e78942d8e1ac8a35", "date": "2023-03-30 11:42:22 +0500", "subject": "Cleanup for objects that do not require allocations.", "body": "A lot of these are left over from when object interfaces required allocations (changed in f6e30736 and 9ca9c8e4). Others are likely copy/paste errors.\n\nThis saves some space in the mem context and makes it clear that no allocations will be made." }, { "commit": "fe0fd71be2e9754491cbbdcde79e7a7cc87f2e7b", "date": "2023-03-30 10:00:58 +0500", "subject": "Convert the result of zNewFmt() into an object.", "body": "The result is not intended to be freed directly so this makes memory tracking more accurate. Fix a few places where memory was leaking after a call to zNewFmt().\n\nAlso update an assert to make it clearer." }, { "commit": "295b53e8453b4ac4889b62bc1c7f1d6b78b09421", "date": "2023-03-28 16:24:43 +0600", "subject": "Update Github actions to new versions.", "body": "These updates remove various deprecation warnings." }, { "commit": "b111599badccb1c196a224e3d6ac8657b8d172a2", "date": "2023-03-28 15:05:18 +0600", "subject": "Simplify object creation with OBJ_NEW_BEGIN() macro.", "body": "Eliminate the boilerplate of declaring this and assigning memory to it, which is the same for the vast majority of object creations.\n\nKeep the old version of the macro as OBJ_NEW_BASE_BEGIN() for a few exceptions in the core code and (mostly) in the tests." }, { "commit": "91f9301b9d0026ee72875d943e0ed01b14ec887b", "date": "2023-03-27 16:42:35 +0600", "subject": "Fix command/manifest unit test title." }, { "commit": "9ca9c8e4c97b60a4cceef1c6f9fcb93e413e15d7", "date": "2023-03-27 14:32:37 +0600", "subject": "Improve interface handling in remaining modules.", "body": "As in f6e30736, make the interface object the parent of the driver object rather than the interface being allocated directly in the driver object. Allow exceptions to this general rule for objects that need to retain ownership of their interfaces." }, { "commit": "5f001248cce08137a601ce5671aa67d26bfdd366", "date": "2023-03-25 14:59:47 +0700", "subject": "Fix indentation in config.yaml." }, { "commit": "8ff956ad7e8608dd7582825521105c289b249810", "date": "2023-03-25 14:07:31 +0700", "subject": "Add lock module initialization.", "body": "Each call to lockAcquireP() passed enough information to initialize the lock system. This was somewhat inefficient and as locks become more complicated it will lead to more code duplication. Since a process can only take one type of lock it makes sense to do most of the initialization up front.\n\nAlso reduce the log level of lockRelease() since it is only called at exit and the lock will be released in any case." }, { "commit": "f1caecc4ff38557cf81314738ac83d420d8bcf23", "date": "2023-03-24 10:34:42 +0800", "subject": "Convert lockAcquire() to lockAcquireP().", "body": "This makes a few current parameters optional and allows for more optional parameters with less code churn." }, { "commit": "c8ec114c8cb392d227fa267c62d11cc6b4fb9677", "date": "2023-03-21 12:29:45 +0800", "subject": "Add reference filter and output to manifest command.", "body": "This allows the file list to be filtered by reference. The reference is output when it is not the default reference for the backup." }, { "commit": "1807bfcff5550bb40fa4f92dee1e15580a0b752d", "date": "2023-03-21 12:22:47 +0800", "subject": "Clear buffer limit when buffer is limited to allocated size.", "body": "This minor optimization automatically clears the limit flag when limit is set to allocated size. This has no impact on the current code but will simplify a future commit where a conditional bufLimitClear() is being used." }, { "commit": "5b5786c082b698e637bd44b9f06fbca5e4e4cc21", "date": "2023-03-20 10:25:44 +0800", "subject": "Begin v2.46 development." }, { "commit": "6ad79d16ca638b7ccdc81b5530a39007b8e01aed", "date": "2023-03-20 09:37:23 +0800", "subject": "v2.45: Block Incremental Backup (BETA)" }, { "commit": "04d224b88e73194ff1c6497dbecaaff392008dfe", "date": "2023-03-18 20:47:03 +0800", "subject": "Add beta feature and documentation for block incremental backup." }, { "commit": "78c036efb7ebf7060949f3d2f2e6d93986734414", "date": "2023-03-16 17:46:32 +0700", "subject": "Updates required for new manifest command to work remotely.", "body": "Additional options are required and the BlockChecksum must work remotely." }, { "commit": "505a639f1e627b506c2e5a3351e8d4aacae9ed0b", "date": "2023-03-14 21:56:05 +0700", "subject": "Prototype manifest command.", "body": "Output a manifest in text or JSON format. Neither format is complete but they cover the basics.\n\nIn particular the manifest command outputs the complete block restore when the filter option is specified and the block delta when the pg option is also specified. This is non-destructive so it is safe to execute against a running cluster." }, { "commit": "c30d3e439b77d544477e289606d30d0c8ffe301f", "date": "2023-03-14 17:48:25 +0700", "subject": "Block incremental map fixes and improvements.", "body": "Bug Fixes:\r\n\r\n* Remove the distinction between maps where super block size is equal to block size and maps where they are not. In practice, maps with equal blocks are now rare and most of the optimizations can be applied directly to super blocks where the blocks are equal. This fixes a bug where a map that was created with equal size blocks and then converted to differing block sizes would generate an invalid map.\r\n\r\n* Free reads during restore to avoid running out of file handles.\r\n\r\nImprovements:\r\n\r\n* Store super block sizes in the block map. This allows the final block size to be removed from the block list and provides a more optimal restore and better potential for analysis.\r\n\r\n* Always round the super block size up to the next block size. This makes the number of blocks per super block more predictable.\r\n\r\n* Allow super block sizes to be changed at will in the map. The first case for this is to store the reduced super block size required when the last super block is short but it could be used to dynamically change the super block size to optimize compression.\r\n\r\n* Store a block count rather than a list of blocks in a super block. Blocks must always be sequential, though there may be an offset to the first block in a super block. This saves 11-14% on space for checksum sizes 6-7.\r\n\r\n* In the case that all the blocks for a super block are present, and there is no offset, the block size is omitted." }, { "commit": "5c1f78d4dd62c37218285d13b09a9450eb3a588e", "date": "2023-03-12 22:38:38 +0700", "subject": "Fix typo in blockIncrProcess()." }, { "commit": "1281a6eaf8552232c8c9af4d7fc15c082a7794d9", "date": "2023-03-12 16:21:43 +0700", "subject": "Ensure no continuations when block size equals super block size.", "body": "In this case each super block contains a single block so continuations are not possible." }, { "commit": "d7704a8bc837b0be5564737e491bf5f73796ca0a", "date": "2023-03-11 14:04:43 +0700", "subject": "Move backup pq test script generation to backup harness.", "body": "This allows scripted online backups to be used by other test modules." }, { "commit": "2fffd64213b7d51b6444f61502f4225584667731", "date": "2023-03-10 16:11:30 +0700", "subject": "Fix error handling in build-code binary.", "body": "120a49b6 improved the error handling but due to a copy/pasto errors were being handled as asserts, which tripped an assertion in the log module." }, { "commit": "19f3a1d3040df56e73f19674618eb5f447dabe8d", "date": "2023-03-10 16:08:30 +0700", "subject": "Add missing header." }, { "commit": "24f725212d0c1dedc0f34fe4e939e481585ff697", "date": "2023-03-10 15:30:27 +0700", "subject": "Add beta feature infrastructure.", "body": "This allows options to be marked as beta, which will require that the --beta option be supplied to prevent accidental usage of a beta feature.\n\nThe online and command-line documentation also show warnings when options are beta." }, { "commit": "6b409d049eb935146f7996b75b97744b75c411a9", "date": "2023-03-10 14:01:38 +0700", "subject": "Update default block size and super block values based on testing.", "body": "Block sizes are incremented when the size of the map becomes as large as a single block. This is arbitrary but it appears to give a good balance of block size vs map size.\n\nThe full backup super block size is set to minimize loss of compression efficiency since most blocks in the database will likely never be modified. For diff/incr backup super blocks, a smaller size is allowable since only modified blocks are stored. The overall savings of not storing unmodified blocks offsets the small loss in compression efficiency due to the smaller super block and allows more granular fetches during restore." }, { "commit": "1119a5353911d6d326edac325f530bb796a806ae", "date": "2023-03-09 11:04:03 +0700", "subject": "Rename BlockHash to BlockChecksum.", "body": "Checksum is the generally used terminology in the code base, even when a hash is being used as a checksum." }, { "commit": "6252c0e4485caee362edec13302a5f735a69bff4", "date": "2023-03-09 10:30:57 +0700", "subject": "Exclude backup set size from info for block incremental backups.", "body": "As calculated this size is not correct since it does not include the parts of prior block incrementals that are required to make the current block incremental valid. At best this could be approximated and the resulting values might be very confusing.\n\nFor now, at least, exclude this metric for block incremental backups." }, { "commit": "210bed4511ae5523b170eb193b877346d787140d", "date": "2023-03-09 10:02:04 +0700", "subject": "Use xxHash instead of SHA-1 for block incremental checksums.", "body": "xxHash is significantly faster than SHA-1 so this helps reduce the overhead of the feature.\n\nA variable number of bytes are used from the xxHash depending on the block size with a minimum of six bytes for the smallest block size. This keeps the maps smaller while still providing enough bits to detect block changes." }, { "commit": "8b5153ad210011812c8fe6db2f2ed829a5aa543a", "date": "2023-03-09 09:39:54 +0700", "subject": "Block-level incremental backup super blocks.", "body": "Small blocks sizes can lead to reduced compression efficiency, so allow multiple blocks to be compressed together in a super block. The disadvantage is that the super block must be read sequentially to retrieve blocks. However, different super block sizes can be used for different backup types, so the full backup super block sizes are large for compression efficiency and diff/incr are smaller for retrieval efficiency." }, { "commit": "740c2258e36613b1b4e76bab452d0e610a399e7f", "date": "2023-03-09 08:23:15 +0700", "subject": "Add pg-version-force option for fork integration.", "body": "Forks may update pg_control version or WAL magic without affecting the structures that pgBackRest depends on.\r\n\r\nThis option forces pgBackRest to treat a cluster as the specified version when it cannot be automatically identified." }, { "commit": "2fa7e53c5d4b5a5fd79f62d39763746727a5b9be", "date": "2023-03-08 19:05:23 +0700", "subject": "Skip writing recovery.signal by default for restores of offline backups.", "body": "When restoring an offline backup on PostgreSQL >= 12, skip writing recovery.signal by default since this will error if the backup was made with wal_level=minimal. If the user explicitly sets the type option to something other than none, then write recovery.signal as usual since it is possible to do Point-In-Time-Recovery from an offline backup as long as wal_level was not minimal." }, { "commit": "7e5adc03594a3b7a975001e80902a51df3ddd577", "date": "2023-03-07 18:46:24 +0700", "subject": "Use raw compression/encryption to bundling and block incremental backup.", "body": "Raw encryption was already being used for block incremental. This commit adds raw compression to block incremental where possible (see da918587).\n\nRaw compression/encryption is also added to bundling for a backup set when block incremental is enabled on the full backup. This prevents a break in backward compatibility since block incremental is not backward compatible." }, { "commit": "da9185870222057b97365f5db0acf797f959ec5e", "date": "2023-03-07 18:31:17 +0700", "subject": "Add optional raw format for compression types.", "body": "Raw format saves 12 bytes of header for gzip and 4 bytes of checksum for lz4 (plus CPU overhead). This may not seem like much, but over millions of small files or incremental blocks can really add up. Even though it may be a relatively small percentage of the overall backup size it is still objectively a large amount of data.\n\nUse raw format for protocol compression to exercise the feature.\n\nRaw compression format will be added to bundling and block incremental in a followup commit." }, { "commit": "f6e307365f5a55a8bb4eb694e0a11fb4f32fb6e7", "date": "2023-03-07 11:27:41 +0700", "subject": "Improve interface handling in storage module.", "body": "Make the interface object the parent of the driver object rather than the interface being allocated directly in the driver object.\n\nThe prior method was more efficient when mem contexts had a much higher cost. Now mem contexts are cheap so it makes more sense to structure the objects in a way that works better with mem context auditing. This also means the mem context does not need to be stored separately since it can be extracted directly from the interface object.\n\nThere are other areas that need to get the same improvement before the specialized objMoveContext() and objFreeContext() functions can be removed." }, { "commit": "120a49b6590a549a72bfe0a6a5a22905e835b2ce", "date": "2023-03-06 21:07:08 +0700", "subject": "Improved error handling in build-code binary.", "body": "Show a full stack trace instead of just the error." }, { "commit": "0818601c055724bde3b97d4b02d3a38b0a53899a", "date": "2023-03-06 09:49:33 +0700", "subject": "Set online flag in manifest in command/restore unit tests.", "body": "This flag does not currently affect restore behavior but it will in an upcoming commit. Set the flag here to simplify the test diff in the upcoming commit." }, { "commit": "c656669ac2354dab504d94b604e305c3b68bb493", "date": "2023-03-04 12:50:02 +0700", "subject": "Allow control version and WAL magic to be overridden in test harness.", "body": "This makes it easier to write tests for invalid control version/WAL magic.\n\nAlso add HRN_PG_WAL_TO_BUFFER() to simplify generation of WAL headers." }, { "commit": "1648c133d6bc1ffd0682ae6219934d00295aa580", "date": "2023-03-04 12:45:08 +0700", "subject": "Keep only one all-default group index.", "body": "It is possible for a group index to be created for an option that is later found to not meet dependencies. In this case all values would be default leading to a phantom group, which can be quite confusing.\r\n\r\nRemove group indexes that are all default (except the final one) and make sure the key for the final all default group index is 1." }, { "commit": "439447977681ea8730935535e25a657334596942", "date": "2023-02-28 08:47:51 +0700", "subject": "Fix typo and remove extraneous linefeed." }, { "commit": "16ac5ee8d3b926464cd27e8f064579f1e7935f4c", "date": "2023-02-26 16:13:44 +0700", "subject": "Rename block incremental manifest keys.", "body": "Since the keys need to be read/written in order, these keys make the logic a bit simpler." }, { "commit": "a9867cb0b8e8d7abcac9b18b76e3e490c265f3cb", "date": "2023-02-26 14:49:34 +0700", "subject": "Add repo-block-age-map and repo-block-size-map options.", "body": "Make these options configurable. This is primarily for testing purposes so the new options will be kept internal." }, { "commit": "15d5dcdd3bb387aab28bbe7262011c38c7d6d22c", "date": "2023-02-26 14:41:32 +0700", "subject": "Add explicit instructions for upgrading between 2.x versions.", "body": "Add an explicit statement that there is nothing special to do when upgrading between 2.x versions.\r\n\r\nLeave the previous paragraph about the default location that changed between 2.00 and 2.02, as it is more a matter of transitioning from 1.x to 2.x." }, { "commit": "dffc933384e4327e2fd52868dd19668ed9d3ed91", "date": "2023-02-13 09:17:30 +0700", "subject": "Rename DeltaMap to BlockHash.", "body": "This more accurately describes what the object does." }, { "commit": "779efe0d7a54340141e01597279e9ab1d0bd0773", "date": "2023-02-09 13:01:56 +0700", "subject": "Consistently declare block incremental size as size_t.", "body": "The block is often allocated in memory so size_t makes more sense than uint64_t." }, { "commit": "d520816acf9ecb01ef805cecbc9de7e8d0a9f468", "date": "2023-02-09 08:11:05 +0700", "subject": "Remove parameter list from deltaMapNew().", "body": "Since this filter cannot be used remotely (yet) there is no reason to create a parameter list." }, { "commit": "3feed389a2199454db68e446851323498b45db20", "date": "2023-02-08 22:34:23 +0700", "subject": "Improve IoChunkedRead end-of-file handling.", "body": "Determine end-of-file earlier to improve throughput.\n\nAlso clean up some comments and formatting." }, { "commit": "089fae035bf0a9778fff0527ea3cbb113625a403", "date": "2023-02-07 14:09:50 +0700", "subject": "Add block incremental to real/all test output." }, { "commit": "31cad5e09e1910fc9d36d18306524aabca3ff36b", "date": "2023-02-01 12:57:04 +0700", "subject": "Check for stray execute permissions in test.pl --code-format.", "body": "Sometimes these can get set while updating permissions from with a VM or container.\n\nAlso fix a few permissions that were not correct." }, { "commit": "8e7e9d36a133f2ec01f4864b5b0c80344888b737", "date": "2023-01-31 21:28:28 +0700", "subject": "Fix contributors in release notes." }, { "commit": "c5907a2e7193ca8f79cf56dca23e0df118e686c3", "date": "2023-01-31 08:28:32 +0700", "subject": "Remove references to SSH made obsolete when TLS was introduced.", "body": "Also remove details about SSH compression that are not helpful." }, { "commit": "9ae6b6ef0f344b49f6153bf842ac815b0fcb9ea3", "date": "2023-01-31 08:04:09 +0700", "subject": "Update comments missed in copy/paste." }, { "commit": "ce0ea2cfab17e6770de3897133183af891df943c", "date": "2023-01-30 12:22:41 +0700", "subject": "Use uncrustify for code formatting.", "body": "The code is not completely reflowed yet so there are some cases that uncrustify will not catch. The formatting will be improved over time.\n\nSome block of code require special formatting so have been surrounded with the {uncrustify-off}/{uncrustify-on} markers. These exceptions should be kept to a minimum.\n\nAdd --code-format (to reformat code) and --code-format-check (to check formatting) to test.pl.\n\nAdd a CI test that will check code formatting. Code must be correctly formatted before it can be merge to integration.\n\nAdd documentation to the coding standards for code formatting." }, { "commit": "d4070c90641a61fa3cb1169f3bd53067193bab4e", "date": "2023-01-30 11:55:54 +0700", "subject": "Reformat code with uncrustify.", "body": "uncrustify has been configured to be as close to the current format as possible but the following changes were required:\n\n* Break long struct initializiers out of function calls.\n* Bit fields get extra spacing.\n* Strings that continue from the previous line no longer indented.\n* Ternary operators that do not fit on a single line moved to the next line first.\n* Align under parens for multi-line if statements.\n* Macros in header #if blocks are no longer indented.\n* Purposeful lack of function indentation in tests has been removed.\n\nCurrently uncrustify does not completely reflow the code so there are some edge cases that might not be caught. However, this still represents a huge improvement and the formatting can be refined going forward.\n\nSupport code for uncrustify will be in a followup commit." }, { "commit": "b2202c36d9e7c4557ac37087757df80193d516b5", "date": "2023-01-30 11:16:31 +0700", "subject": "Fix formatting errors.", "body": "Errors in our current (manually-maintained) code format discovered by uncrustify." }, { "commit": "a96837d2eda4b7497e74954b24d9323f33465c50", "date": "2023-01-30 10:55:14 +0700", "subject": "Refactor loop limit in removeExpiredArchive().", "body": "This simplifies the formatting, which was a bit tortured. Also, there is no need to check the limit on each iteration so this is a bit more efficient." }, { "commit": "f91af305845aa1e99d830a7a6addaee058b518d1", "date": "2023-01-30 10:51:48 +0700", "subject": "Add FN_PRINTF() macro.", "body": "Bring the format(printf) attribute in line with the FN_NO_RETURN and FN_INLINE_ALWAYS macros.\n\nThis is simpler to read and can be customized for different compilers." }, { "commit": "f8ea51bf8f4de61fe3541e2f2701b9dccb5ede70", "date": "2023-01-30 10:27:17 +0700", "subject": "Refactor stackTraceToZ() to avoid multiple versions of the function.", "body": "stackTraceToZ() was split this way in c8264291 to allow complete coverage. 0becb6da added a shim to improve coveage but missed simplifying the function." }, { "commit": "0d8e08f69c2ece7a060bd30dff0b375d013fe2fd", "date": "2023-01-30 09:29:07 +0700", "subject": "Update config.guess and config.sub to latest versions." }, { "commit": "240312110c5b1c07deb200b06755a886b723485b", "date": "2023-01-30 09:27:04 +0700", "subject": "Begin v2.45 development." }, { "commit": "053468bfb156a8ecbd5d0325d35141056c89ec52", "date": "2023-01-30 09:15:44 +0700", "subject": "v2.44: Remove PostgreSQL 9.0/9.1/9.2 Support" }, { "commit": "a28f3d49c23d7dbd8873d07b7d513b6b7e59345a", "date": "2023-01-28 18:55:53 +0700", "subject": "Add --no-back-trace option to test.pl.", "body": "Running valgrind and backtrace together has been causing tests to timeout in CI, mostly likely due to limited resources. This has not been a problem in normal development environments.\n\nSince it is still important to run backtraces for debugging, split the u22 test that was doing all this work to run coverage and backtrace together and valgrind-only as a separate test. As a bonus these tests run faster separately and since they run in parallel the total execution time is faster." }, { "commit": "16c625353db2569ebcc0c28e09d76a9811d2a241", "date": "2023-01-28 16:22:04 +0700", "subject": "Skip test modules in coverage report.", "body": "Coverage of the test code is useful during development but it does not seem worth adding it to the core code coverage report." }, { "commit": "d5ef4ce747f0d743b76c5fe05c4e4a02cdd9bd09", "date": "2023-01-28 11:01:29 +0700", "subject": "Fix permissions on source files." }, { "commit": "3aea997df5ace85251d0579ccbe13fa938f587ee", "date": "2023-01-25 10:59:13 +0700", "subject": "Add warning about enabling \"hierarchical namespace\" on Azure storage.", "body": "If this feature is enabled expire will fail since directories need to be deleted separately.\r\n\r\nIdeally we would add support for this feature but for now we'll just document the issue." }, { "commit": "ed818b31867c640d72ef829dd36cd0a7feff12df", "date": "2023-01-25 10:35:03 +0700", "subject": "Add dark mode to the website.", "body": "The colors could use more tweaking but at least the website will no longer blind users running dark mode." }, { "commit": "1da2666a9ef41457ac64ddf2cabccc93b3b05aeb", "date": "2023-01-21 14:03:27 +0700", "subject": "Add manifest test harness.", "body": "These macros make adding paths/files/etc to a manifest simpler and easier to read." }, { "commit": "912eec63bb971c10a545e582f628c982f4e00005", "date": "2023-01-20 16:48:57 +0700", "subject": "Block-level incremental backup.", "body": "The primary goal of the block incremental backup is to save space in the repository by only storing changed parts of a file rather than the entire file. This implementation is focused on restore performance more than saving space in the repository, though there may be substantial savings depending on the workload.\r\n\r\nThe repo-block option enables the feature (when repo-bundle is already enabled). The block size is determined based on the file size and age. Very old or very small files will not use block incremental." }, { "commit": "008a18555c6474be4327bfe0830ef13166a18ece", "date": "2023-01-20 15:44:01 +0700", "subject": "Fix comment indentation and wrapping." }, { "commit": "bfc9a5c33e032ee7f95180cafcec07ce19e11aa9", "date": "2023-01-14 18:01:48 +0700", "subject": "Fix incorrect FUNCTION_TEST_END() in hrnPgWalToBuffer()." }, { "commit": "ccee5c0fb1653f00e74de13d917a24b8b3506c22", "date": "2023-01-14 17:12:15 +0700", "subject": "Reduce log level of pgVersionFromStr() and pgVersionToStr()." }, { "commit": "596c62c54e375fd25e7e34e9604b47ecc1ab73ff", "date": "2023-01-14 15:25:25 +0700", "subject": "Simply return in pgVersionToStr()." }, { "commit": "34e4835ff30e7f44cb36b3f1ed29d670e74a75ca", "date": "2023-01-12 21:24:28 +0700", "subject": "Refactor common/ini module to remove callbacks and duplicated code.", "body": "The callbacks in iniLoad() made the downstream code more complicated than it needed to be so use an iterator model instead.\r\n\r\nCombine the two functions that were used to load the ini data to remove code duplication. In theory it would be nice to use iniValueNext() in the config/parse module rather than loading a KeyValue store but this would mean a big change to the parser, which does not seem worthwhile at this time." }, { "commit": "9ca492cecfd40516b8815080af23ee3e64efc22a", "date": "2023-01-12 17:36:57 +0700", "subject": "Audit mem contexts returned from functions into the calling context.", "body": "It is possible for functions to accidentally leak child contexts into the calling context, which may use a lot of memory depending on the use case and where it happens.\n\nUse the function return type to determine what should be returned and error when something else is returned. Add FUNCTION_AUDIT_*() macros to handle exceptions.\n\nThis checking is only performed during unit tests on the code being covered by the specific unit test.\n\nNote that this does not work yet for memory allocations, i.e. memNew(). These are pretty rare so are not as much of an issue and they can be added in the future." }, { "commit": "de1dfb66ca022e7ab8370503eacda94629f6ae3f", "date": "2023-01-12 17:14:36 +0700", "subject": "Refactor logging functions to never allocate memory.", "body": "Allocating memory made these functions simpler but it meant that memory was leaking into the calling context when logging was enabled. It is not clear that this was an issue but it seems that trace level logging could result it a lot of memory usage depending on the use case.\n\nThis also makes it possible to audit allocations returned to the calling context, which will be done in a followup commit.\n\nAlso rename objToLog() to objNameToLog() since it seemed logical to name the new function objToLog()." }, { "commit": "890b9d009390e472bd96ac74f6b347d7b1cc5a12", "date": "2023-01-12 15:04:49 +0700", "subject": "Remove mostly useless mem context in storageRemoteInfoProtocolPut().", "body": "Maybe this once had a deeper purpose but now at least it is just used to avoid a few trivial allocations. If we really wanted to do that a flag would be better but it does not seem worth the trouble." }, { "commit": "6ec2241e15c4d6d6c8fe9a09c2cd562631ba8423", "date": "2023-01-12 14:24:20 +0700", "subject": "Fix leak in protocolServer()." }, { "commit": "9d42435ffbaa20febae5a9a5c62446a0fe5d0090", "date": "2023-01-12 14:22:09 +0700", "subject": "Fix leaks in db module." }, { "commit": "7f7d515441ee28e10d5f2aa63f986128638342bc", "date": "2023-01-12 14:19:10 +0700", "subject": "Fix leak in configOptionRemote()." }, { "commit": "123d5e77ae188dfe75a431fcb131dcca1b1913bb", "date": "2023-01-12 14:17:48 +0700", "subject": "Minor optimizations on config/parse module." }, { "commit": "0becb6da3147e7992b7e91f78032a961e7c352be", "date": "2023-01-12 10:22:26 +0700", "subject": "Enhance libbacktrace to handle incomplete stack traces.", "body": "This fills in backtrace info at the bottom of the call stack when the stack trace is incomplete due to testing. This does not affect release builds, which is why it did not make the first cut, but it turns out to be useful for testing and barely changes the release code (when we do release this).\n\nThe recursion test in common/error was simplified because it would now return a very large trace." }, { "commit": "57fc4aaeb9b2aea84fce5bb813980116ad01dbe7", "date": "2023-01-12 09:06:01 +0700", "subject": "Update RHEL user guide to PostgreSQL 11/12.", "body": "PostgreSQL 10 has expired from the PGDG yum repository." }, { "commit": "74ca9b50bbdbb33dafedcc8e1230a83cb619c3f5", "date": "2023-01-11 19:43:10 +0700", "subject": "Revert macro changes from 38d32045.", "body": "This seemed like it would be cleaner but an important detail was missed (logAny) and it does not seem simpler when factoring that back in.\n\nKeep the removal of the extraneous semicolons and all the downstream changes required by the removal." }, { "commit": "9347313cbf1dbd5590b7ca481bd37bcb5de14b00", "date": "2023-01-11 19:16:31 +0700", "subject": "Fix error detail output to stderr.", "body": "The error detail should be output when the error is an assert (this part was working) or the log level is at least debug. In cases where log-level-console was at least debug but log-level-stderr was not the detail was lost.\n\nImprove the range checking to output error detail to stderr when log-level-console is at least debug." }, { "commit": "c8264291238e0bd199e4babab7013060e7fcba7d", "date": "2023-01-11 11:19:26 +0700", "subject": "Improve libbacktrace implementation.", "body": "The libbacktrace feature has not been working since the move to meson because libbacktrace detection was not added to the meson build. Add libbacktrace to meson and improve the feature so that it can be compiled into release builds.\n\nThe prior implementation fetched line numbers with each stack trace push. Not only was this slow but it missed any functions that were not being tracked on our stack.\n\nInstead just examine the backtrace when an error happens and merge it with the info we have on our stack. If the backtrace is not available then the output remains as before.\n\nAlso remove --backtrace from test.pl since the library is now auto-detected.\n\nLeave this library out of the production build for now to give it a little time to shake out in testing." }, { "commit": "0dd2997714ed31b4a005fc0061048f3697c9ea1a", "date": "2023-01-10 11:45:48 +0700", "subject": "Fix leak in jsonReadVarRecurse()." }, { "commit": "675b4ae251564742e23a9e6acdfb1d764b9b159d", "date": "2023-01-10 11:17:08 +0700", "subject": "Fix leak in archivePushDropWarning()." }, { "commit": "bf9e331ac15b0bd261f1a71e1307b51c9934aa85", "date": "2023-01-10 10:14:43 +0700", "subject": "Fix incorrect return type in backupFile()." }, { "commit": "b722d2b6ca31edead49b6dcc8fe0286bb508f5b2", "date": "2023-01-10 09:28:57 +0700", "subject": "Fix formatting of archiveGetFile()." }, { "commit": "38d320455336ab6da16c7c1b86e52d3118bcbd73", "date": "2023-01-09 14:47:57 +0700", "subject": "Remove duplication from FUNCTION_LOG_BEGIN_BASE() macro definitions.", "body": "It is a bit simpler to define STACK_TRACE_TEST_START()/STACK_TRACE_TEST_STOP() in a separate #ifdef so FUNCTION_LOG_BEGIN_BASE() does not need to be defined twice.\n\nAlso add missing semicolons exposed by this change." }, { "commit": "6633ccd18d66f3354ce6af27c93916bce330981b", "date": "2023-01-05 14:03:43 +0700", "subject": "Do not output stderr on unit/integration test errors.", "body": "Since stderr is being redirected to stdout this results in duplication of the error output." }, { "commit": "4429bc82f530fd87cd4e370c012054be117b76b9", "date": "2023-01-05 12:59:06 +0700", "subject": "Add unit tests for the unit test build code.", "body": "When this code was migrated to C the unit tests were not included because there were more important priorities at the time.\n\nThis also requires some adjustments to coverage because of the new code location." }, { "commit": "a5499afabc7729efc2254010b47aeced74d0e918", "date": "2023-01-05 12:44:00 +0700", "subject": "Add CHECK_FMT() macros and tests for CHECK*() macros.", "body": "The CHECK_FMT() macro will be used in some upcoming tests but could also be useful in the core code.\n\nThe CHECK*() macros never had tests so add them." }, { "commit": "2e3513984ef528404341e4e399f5257d6f2f1ccb", "date": "2023-01-05 12:33:17 +0700", "subject": "Remove extra linefeed." }, { "commit": "877bb2ac9e452ddeef6ddd4845c4fb59d83379fe", "date": "2023-01-03 08:26:44 +0700", "subject": "Update LICENSE.txt and PostgreSQL copyright for 2023." }, { "commit": "2b2a6edf355f50c0d399243e6d4d9fa495a1ec34", "date": "2023-01-02 18:53:05 +0700", "subject": "Cleanup of common/ini module in preparation for a refactor." }, { "commit": "92d5fd17b4ddb2e72e4cf5c10fd7708d5a9fad56", "date": "2023-01-02 17:32:09 +0700", "subject": "Fix formatting of restoreFile() definition." }, { "commit": "d51a86c6210001359f86aadc36285137fe8f2da9", "date": "2023-01-02 17:30:57 +0700", "subject": "Add BUFFER_EXTERN() and cleanup hash zero constants.", "body": "BUFFER_EXTERN() provides a clean way to create buffer constants.\n\nConvert HASH_TYPE_SHA256_ZERO_STR to HASH_TYPE_SHA256_ZERO_BUF to be consistent with HASH_TYPE_SHA1_ZERO_BUF." }, { "commit": "66f108ea8a01ac881a47f4cbd8d5fb3ca8102e1d", "date": "2023-01-02 17:26:53 +0700", "subject": "Cleanup terminology in Buffer/Variant/String macros." }, { "commit": "f0189129088456cf22f7bf86fa505cf9c28a5e37", "date": "2023-01-02 15:24:51 +0700", "subject": "Split VR_EXTERN/FN_EXTERN macros from FV_EXTERN.", "body": "This should make it a little clearer what the variable (VR) macros are doing since the declaration/definition cannot both be set to extern (but functions can).\n\nSplitting the variable macros out also allows them to be changed in the future with little churn, while changing the function macro creates a large amount of churn." }, { "commit": "9e29c01891225dd5f371b76d324366bbb3694ca4", "date": "2023-01-02 15:11:37 +0700", "subject": "Remove MacOS from CirrusCI.", "body": "CirrusCI stopped supporting Intel but the arm builds are not working, even with the same steps that work on an arm Mac.\n\nRemove to unstick the build pipeline until this can be resolved." }, { "commit": "8993267df9864852efd59a9b5366e9a521a6c08e", "date": "2022-12-31 23:33:45 +0700", "subject": "Fix formatting." }, { "commit": "1793839e5a821a7177922e4cdad104eba772c3ec", "date": "2022-12-31 19:12:35 +0700", "subject": "Clean up storage headers and move posix internal type." }, { "commit": "4fb8a0ecdd12bdb01273cebbecf61f6beb22146d", "date": "2022-12-31 17:13:41 +0700", "subject": "Add meson unity build and tests.", "body": "This is immediately useful because it will detect any extern'd functions or variables that are not being used. It also detects functions or variables that are declared but not defined.\n\nIf a FV/VR_EXTERN macro is missing it will be detected either because of a mismatch in the declaration/definition or because a new defined symbol will appear in the nm test.\n\nEventually the unity build will be used to create a more optimized pgbackrest binary but that will need to wait." }, { "commit": "8aa2b101bbd9fe07971794bf4bb4c8d2a61c7b07", "date": "2022-12-31 16:41:01 +0700", "subject": "Put logging functions unused by release builds into DEBUG blocks.", "body": "Also inline some functions that are needed in non-DEBUG builds." }, { "commit": "4a64c5d80c9af2a3498943f326de7295679c63e6", "date": "2022-12-31 15:49:32 +0700", "subject": "Use memcpy() instead of strncpy when source size is known.", "body": "In this case the destination will be large enough to hold the source so memcpy is more efficient.\n\nAlso, in highly optimized builds the compiler may warn for strncpy() when it can see that the source size won't include the terminator." }, { "commit": "2332ce8ffc16494766f1f6ee07341242e2b7b7c2", "date": "2022-12-31 13:14:27 +0700", "subject": "Move storageHelperFree() to storageHelper test harness.", "body": "This function was only used for testing so move into a test harness." }, { "commit": "8b218158ae70136d7a2ddc5a7347f79e69fbeb13", "date": "2022-12-31 12:54:33 +0700", "subject": "Move regExpMatchPtr()/regExpMatchStr() to build/common/regExp module.", "body": "Similar to b9be4fa5, these functions are not used by the core code so move them to the build module. The new implementation is a little less efficient but that is much less of a worry in the build/test code.\n\nAlso remove regExpMatchSize() since it was not longer needed." }, { "commit": "45ece13678f94c57777180710a7ea71a1c60971a", "date": "2022-12-31 11:57:06 +0700", "subject": "Use error*() getters where possible.", "body": "This makes it a little easier to change the underlying representation and ensures that the functions are used." }, { "commit": "fa9d831f9f722f28567c4997551cd6bd1d7077d3", "date": "2022-12-31 11:09:50 +0700", "subject": "Move xmlNodeAttribute() to build/common/xml module.", "body": "Similar to b9be4fa5, this function was not used by the core code so move it to the build module." }, { "commit": "c6264ad4cf55920558513127d4e221ce8e85eb28", "date": "2022-12-31 10:31:15 +0700", "subject": "Fix comment typo for strLower()." }, { "commit": "163a004f30675403eba2a619412bd803eba4d71a", "date": "2022-12-31 10:26:11 +0700", "subject": "Move strReplace() and strUpper() to build/common/string module.", "body": "Neither of these functions were used by the core code. strReplace() is only used in the tests but it doesn't hurt to put it in build since the build code is not distributed." }, { "commit": "d517d4a3283c0ccc5ca5d06593d1b244dd5e68b4", "date": "2022-12-31 10:10:44 +0700", "subject": "Add explicit keyword for covered modules included in other modules.", "body": "This was done by checking the extension but it is possible to include a module that does not have a vendor or auto extension. Instead make it explicit that the module is included in another module.\n\nAlso change the variable from \"include\" to \"included\" to make it clearer what it indicates." }, { "commit": "729d7f071dafe22f85201bfaf2bae461ec54d285", "date": "2022-12-30 18:45:11 +0700", "subject": "Put memContextSize() in a DEBUG block.", "body": "This function is only used in unit tests so no need to include it in the release build." }, { "commit": "cebbf0d012ad7204c4770efa3308daddc267415f", "date": "2022-12-30 16:26:48 +0700", "subject": "Remove unused functions.", "body": "These functions were either added with the intention that they would be used or they became obsolete over time." }, { "commit": "6f0c652e2c50f288fc4f541a994281032856be2b", "date": "2022-12-30 14:59:16 +0700", "subject": "Remove declaration for function that is no longer defined." }, { "commit": "416fadd60cc0401e5e730867d98ae59ff54f8dcd", "date": "2022-12-30 14:48:31 +0700", "subject": "Mark functions not used externally as static.", "body": "Also cleanup a stray linefeed." }, { "commit": "b9be4fa54039e2c79fad0dc1552c7ffaf883004c", "date": "2022-12-29 15:37:27 +0700", "subject": "Restore errors when no backup matches the current version of PostgreSQL.", "body": "It is probably not a good idea to restore the latest backup when it was not made from the current PostgreSQL version. If there is no backup after a stanza-upgrade then replicas might be built with a prior version leading to failures.\r\n\r\nAdd an error in this case if the latest backup would be used, i.e. --set or --type=time/lsn is not specified." }, { "commit": "aa1e72dfe662678919d3b01aa0e6454b677873ad", "date": "2022-12-29 15:05:45 +0700", "subject": "Fix improper formatting in config/parse module.", "body": "The prior formatting worked but was confusing to read." }, { "commit": "36ee30d118ce6cc7720653a3d3a49a06c655c463", "date": "2022-12-28 19:15:44 +0700", "subject": "Updates and clarifications to index page.", "body": "In particular the section about other backup solutions not supporting parallel processing was no longer accurate, so reword it.\r\n\r\nAlso update some other sections that used older nomenclature, had awkward wording, or needed clarification." }, { "commit": "84a3ff8b7ac1243c0b47c6585697f4fcf3c12065", "date": "2022-12-28 10:48:44 +0700", "subject": "Clarify target-action behavior on various PostgreSQL versions.", "body": "The behavior of pause depends on the hot_standby parameter and the PostgreSQL version so mention both.\r\n\r\nThis behavior has been verified on PostgreSQL 9.6–15. PostgreSQL 12 is an inflection point because the behavior of an unset recovery_target_action with hot_standby=off changed in https://git.postgresql.org/gitweb/?p=postgresql.git;h=2dedf4d9a899b36d1a8ed29be5efbd1b31a8fe85." }, { "commit": "ae258f604e634f1d2a760ffdb92dd359fd1129bb", "date": "2022-12-27 20:28:38 +0700", "subject": "Add replacement for linefeeds in monitoring example.", "body": "The copy command was converting \\n to a linefeed, which the json conversion did not like. In a healthy repository there won't be any linefeeds but certain errors can contain them.\r\n\r\nFix by loading into a text field and then replacing the linefeed when converting to jsonb." }, { "commit": "44da314adb2b63f96b1d571f822e91a0e412e984", "date": "2022-12-27 20:05:08 +0700", "subject": "Add compress-level range checking for each compress-type.", "body": "The prior range checking was done based on the valid values for gz. While this worked it was a subset of what is available for lz4 and zst.\r\n\r\nAllow the range to be specified for each compress-type. Adding this functionality to the parse module would be a better solution but that is a bigger project than this fix deserves, at least for now." }, { "commit": "34110cfa0134d181c749cc040446f703b643bc9c", "date": "2022-12-27 11:31:59 +0700", "subject": "Fix typo." }, { "commit": "0b67389185558d4593be1ca56143de6a43e45f9f", "date": "2022-12-27 09:18:17 +0700", "subject": "Improve comment on forced WAL switch at backup start." }, { "commit": "a63e87ea2cd0efbf9e9b78adccdf7285a6876b3b", "date": "2022-12-23 14:06:35 +0700", "subject": "Move function prototype to a better location in the header." }, { "commit": "56b55f81e894c0b2875f92913bda8f0679009b17", "date": "2022-12-22 09:26:26 +0700", "subject": "Add repository checksum to make verify and resume more efficient.", "body": "Calculate a checksum of the data stored in the repository when a file is transformed (e.g. compressed). This allows resume and verify to operate without needing to decompress/decrypt the data.\r\n\r\nThis can also be used to verify more complex formats such as block incremental and allow backups from the repository without needing to decompress the data to verify the checksum.\r\n\r\nAdd some basic encrypted tests to maintain coverage. These will be expanded in a future commit." }, { "commit": "2ab845e2636fcd865032075b4dce9e520d094d87", "date": "2022-12-20 16:35:27 +0700", "subject": "Store manifest checksums in memory more efficiently.", "body": "Manifest checksums were stored as hex-encoded strings due to legacy compatibility with Perl. Storing the checksums as binary in memory uses half the space and avoids many conversions.\r\n\r\nThere is no change to the on-disk manifest format which stores the checksum as a hex-encoded string." }, { "commit": "77c721eb635c6d757d2d5ed6f2ffc083720c10a7", "date": "2022-12-20 12:20:47 +0700", "subject": "Remove support for PostgreSQL 9.0/9.1/9.2.", "body": "Our new policy is to support ten versions of PostgreSQL, the five supported releases and the last five EOL releases. As of PostgreSQL 15, that means 9.0/9.1/9.2 are no longer supported by pgBackRest.\r\n\r\nRemove all logic associated with 9.0/9.1/9.2 and update the tests.\r\n\r\nDocument the new support policy.\r\n\r\nUpdate InfoPg to read/write control versions for the history in backup.info, since we can no longer rely on the mappings being available. In theory this could have been an issue after removing 8.3/8.4 if anybody was using a version that old." }, { "commit": "17ce738ff749c50f4ca721144cd9addce9e895f2", "date": "2022-12-11 20:05:31 +0700", "subject": "Use smaller type for decode tables.", "body": "This saves a bit of space in the binary and should not have a noticeable impact on performance." }, { "commit": "010efffb0c3c7935f789e5c560104e5a3019a28f", "date": "2022-12-11 19:46:48 +0700", "subject": "Add hex encode/decoding to decode module.", "body": "This replaces the bufHex() function and also allows hex to be decoded." }, { "commit": "9a9ee8e6406721c5a1825b8a25baef944802c92d", "date": "2022-12-11 18:55:25 +0700", "subject": "Rename EncodeType enum to EncodingType.", "body": "This avoids constructs such as decodeToBin(encodeBase64, ...) which are confusing since decode and encode are used in the same function call. decodeToBin(encodingBase64, ...) makes it clearer what is happening." }, { "commit": "1345caa3f70e24ff759967692c10c07e63f44d00", "date": "2022-12-07 20:30:38 +0700", "subject": "Update missing flag in storage/posix test.", "body": "The storageNewItrP() permissions test was running twice with the errorOnMissing flag set to false. Fix by setting to true for one test.\n\nAlso update the comments to be clearer about what the tests are doing and fix minor formatting." }, { "commit": "81d84ab495bfeaf0102adce2425ab3a528bb8845", "date": "2022-12-07 10:57:29 +0100", "subject": "Fix typos." }, { "commit": "4dc632d57064420134f3a198fa18dc951d5bf574", "date": "2022-12-05 14:15:15 +0800", "subject": "Add backup test harness.", "body": "This allows test backups to be run in other test modules.\n\nIt is likely that more logic will be moved here but for now this suffices to get test backups working in the restore module." }, { "commit": "96cf479d3c46b310b87e69bd2e19a30e71e5824c", "date": "2022-12-05 14:00:25 +0800", "subject": "Add missing header in lock module." }, { "commit": "c972a9359beb5b75c4b0ea57bcb12a34fe5e7d84", "date": "2022-11-28 17:56:59 +0800", "subject": "Begin v2.44 development." }, { "commit": "cc2ffd8264bb4e10e6b289ce3978452018f725e8", "date": "2022-11-28 17:47:48 +0800", "subject": "v2.43: Bug Fix" }, { "commit": "c4bf775099c85e3cd081f9c1ce35efd4c71d9161", "date": "2022-11-28 16:42:35 +0800", "subject": "Fix missing reference in diff/incr backup.", "body": "When loading prior manifests without the new reference list, the code failed to add the current backup to the reference list. Since the current backup is never explicitly referenced, building references from the file list was not sufficient to generate a complete list.\r\n\r\nThe main problem here was a bad test, fixed in 28f6604. This masked the issue and prevented it from being found. Now it is clear in the test that the current label is missing from the reference list.\r\n\r\nFix by adding the current label to the reference list if a reference list is not stored in the manifest." }, { "commit": "28f660482aa5abb97850a9d7c89c2e5e9f23a4c9", "date": "2022-11-26 10:48:02 +0800", "subject": "Move manifestBackupLabelSet() test in info/manifest unit.", "body": "Changing the label of a manifest that already had a label was not a good test and it ended up masking a bug where the current backup label was not being added to the reference list on manifest load, since manifestBackupLabelSet() added the label to the reference list. In fact, manifestBackupLabelSet() should never be called after a manifest load or even after the label has been set.\n\nAdd an assertion to prevent manifestBackupLabelSet() being called when the label is already set.\n\nThe bug exposed here will be fixed in a subsequent commit." }, { "commit": "3f363cb3aed1a7c6836ee6c32eae600129a6f6b8", "date": "2022-11-22 15:04:13 +0800", "subject": "Add hint when an option is specified without an index.", "body": "Hopefully this will make it a little clearer to the user what is wrong when they specify an indexed option without an index.\r\n\r\nAlso fix an ambiguous use of cfgParseOptionP(). The prior code worked in that it set prefixMatch = true but it was not very readable." }, { "commit": "27460862e77b7fba327c92448b2ec8ad41f2016c", "date": "2022-11-22 10:29:32 +0800", "subject": "Update config.guess and config.sub to latest versions." }, { "commit": "092e254794c6d9eba9fc3175357bc91257f2eedf", "date": "2022-11-22 10:27:20 +0800", "subject": "Begin v2.43 development." }, { "commit": "70b75532bf20a025323b2a3138497ef01eb1774f", "date": "2022-11-22 10:20:59 +0800", "subject": "v2.42: Bug Fixes" }, { "commit": "3ad588443b452e04f3abcc107e825dc47e078103", "date": "2022-11-14 12:47:27 +0800", "subject": "Update test.pl --psql-bin option to match command-line help.", "body": "The option to specify the path to psql was shown in the command-line help as --psql-bin but the option was actually named --pgsql-bin.\r\n\r\nRename to match the help so they are consistent." }, { "commit": "58b3c91babc6af9e003e5ef09bf99becf8a6f827", "date": "2022-11-10 10:28:49 +0930", "subject": "Add raw mode to CipherBlock to save space.", "body": "The magic in the header is only required so that command-line openssl will recognize the file as being encrypted. In cases where the encrypted data cannot be read with the command-line tool it makes sense to omit the header magic to save some space.\n\nUnfortunately this cannot be enabled for file bundling because it would break backward compatibility. However, it should be possible to enable it for the combination of bundling and block incremental." }, { "commit": "c9db7bc274211820030c4ab6ab8c70c1b1896254", "date": "2022-11-06 16:12:23 +0930", "subject": "Update cipherBlockNew() to allow optional parameters.", "body": "This simplifies calls a bit since digest is never passed and allows for new optional parameters." }, { "commit": "221db610d2a054ed00ad27377663941abfb9fb98", "date": "2022-10-18 18:02:17 +1300", "subject": "Shorten names in real/all integration test matrix.", "body": "This should allow one or two more parameters to be added without going to a new line, which keeps the matrix easier to read." }, { "commit": "5fceee88a9439460bf24082f275ec2e695cc6bf6", "date": "2022-10-18 17:39:59 +1300", "subject": "Add backupFileRepoPathP().", "body": "The path for a backup file in the repository was being generated in four different places, so move the logic to a function." }, { "commit": "fee38c2c7cc3ae318bf0d3b50fe23c22900c2311", "date": "2022-10-18 16:11:35 +1300", "subject": "Pass filters to remote storage as a handler array.", "body": "The prior code required coverage in the storage/remote module for all filters that could be used remotely.\n\nNow the filter handlers are set at runtime so any filter list can be used with a remote. This is more flexible and makes coverage testing easier. It also resolves a test dependency.\n\nMove the command/remote unit test near the end so it will have access to all filters without using depends." }, { "commit": "1730ef4ac3d88137bd218e6128b0107a8bf2c121", "date": "2022-10-18 11:33:19 +1300", "subject": "Add noTruncate flag to storageNewWriteP().", "body": "This flag skips truncation when opening a file for write on drivers that support it, currently Posix and CIFS. This is convenient for cases where the file needs to be manipulated directly using the file descriptor. Using the file descriptor is not ideal and additional functionality should be added to the storage interface, but for now at least this avoids code duplication, especially on close which updates owners, the timestamp, syncs, etc.\r\n\r\nThe remote driver forbids no truncate because a file descriptor is never available for a remote storage write object.\r\n\r\nUpdate two instances in the current code which benefit from this new functionality, but the primary reason for the change is to support more complex restore deltas in the upcoming block incremental feature." }, { "commit": "7967c750d83466d030081e9deb1d70a1ba240271", "date": "2022-10-18 11:09:00 +1300", "subject": "Fix protocol error on short read of remote file.", "body": "If a remote file read was stopped before the read was complete or if an error occurred in the middle of the read then the protocol would end up in a bad state and produce this error:\r\n\r\nProtocolError: client state is 'data-get' but expected 'idle'\r\n\r\nPrevent this by reading the rest of the file on close() or free() to leave the protocol in an idle state for the next command.\r\n\r\nThis was a possible issue for bundling because the amount to read is known in advance and therefore eof may not be reached. However, I was only able to reproduce this issue with unreleased code.\r\n\r\nOn error this issue would cause the original error to be lost. The process may still fail with this fix (if the error comes from another source) but hopefully we'll get better information about the original error." }, { "commit": "ddd966cadc30440a81100144204d4084a0f2ac3a", "date": "2022-10-18 09:59:15 +1300", "subject": "Use more generic descriptions for pg_start_backup()/pg_stop_backup().", "body": "The names were changed in PostgreSQL 15, so update the code and docs to make the naming more generic where needed to avoid using a version-specific name in the logs and documentation." }, { "commit": "65be4c64a984d51306db4c6febe3a124c3230f51", "date": "2022-10-16 09:58:35 +1300", "subject": "Finalize catalog number for PostgreSQL 15 release." }, { "commit": "ea162e821634919b7c839e866b99bf82010dbacc", "date": "2022-10-14 12:54:43 +1300", "subject": "Move storageReadRemoteOpen() in storage/remote/read module.", "body": "This will make an upcoming important fix easier to review." }, { "commit": "909be412f8ad0d87f23ce2a2dbca94c3e0732f9f", "date": "2022-10-14 12:08:40 +1300", "subject": "Swap command/backup and command/restore unit tests.", "body": "Logically restore belongs after backup and in a future commit restore will have a dependency on some backup objects." }, { "commit": "8f67fb6db2645c2dae24429555ead66147ba176f", "date": "2022-10-14 10:59:07 +1300", "subject": "Enable FreeBSD 12/13 builds on Cirrus CI.", "body": "The builds were disabled in bb11539a and 164548f2 due to an error that seems to have been caused by a bad package dependency for rsync. In any case adding this fixed it:\n\npkg update && pkg upgrade -y libiconv" }, { "commit": "e7e106f781be01027d4829b258902549eb36eb7b", "date": "2022-10-11 10:52:34 -1100", "subject": "Use read-only storage to calculate checksum in restoreFile().", "body": "Writable storage is not required here so use read-only storage instead." }, { "commit": "164548f2b744db5e6c7a3089176f6df2a638d105", "date": "2022-10-11 10:20:44 -1100", "subject": "Disable FreeBSD 12 builds on Cirrus CI.", "body": "Build have begin failing with this error:\n\nld-elf.so.1: /usr/local/bin/rsync: Undefined symbol \"locale_charset\"\n\nThere does not appear to be a new version so hopefully this is a transient error (hoping the same for FreeBSD 13, see bb11539a). Disable for now to free the build pipeline." }, { "commit": "352cbf1d5738a720292b5067051a05efc2b9a8ac", "date": "2022-10-06 10:15:38 -1000", "subject": "Update ManifestFile booleans to bit fields." }, { "commit": "46a0af353139f3238dbe96671e99ea0456028940", "date": "2022-10-05 18:05:56 -1000", "subject": "Return binary as result from CryptoHash filter.", "body": "The prior result was hex-encoded, which is not optimal. This was legacy from the interface with Perl and then the JSON protocol. The new binary protocol natively supports binary so it makes sense to use it and convert to hex where needed.\n\nA number of these hex conversions can now be removed but that will need to be handled in another commit." }, { "commit": "5602f179a177a8513b96c762590829c012c28175", "date": "2022-10-05 17:01:35 -1000", "subject": "Add varint-128 encode/decode to IoRead/IoWrite.", "body": "This makes it more efficient to read/write (especially read) varint-128 to/from IO.\r\n\r\nUpdate the Pack type to take advantage of the more efficient read and remove some duplicate code." }, { "commit": "102ce5dee47e8d9a8d2eeaf2b2cc2bac105914ef", "date": "2022-10-05 16:28:31 -1000", "subject": "Add persistent reference list to manifest.", "body": "The reference list was previously built at load time from whichever references existed in the file list. This was sufficient since the list was for informational purposes only.\r\n\r\nThe block incremental feature will require a reference list that contains all prior backups, even those that are not explicitly referenced from the manifest. Therefore it makes sense to build and persist a manifest list rather than building it at load time.\r\n\r\nThis list can still be used for informational purposes, though it needs to be sorted since the list it sill built for older manifest versions and may not be in sorted order.\r\n\r\nAdd strLstFindIdx() to find references in the list." }, { "commit": "c647bcb50954f623d29104cdb8a1457ed8e94579", "date": "2022-10-05 13:14:15 -1000", "subject": "Add manifest flags for file processing during backup.", "body": "The prior method was to check a combination of fields to determine if a file needed to be copied, delta'd, or resumed. This was complicated and ultimately imposed a limitation on the number of operations that could be performed.\r\n\r\nIntroduce copy, delta, and resume flags in the manifest to make it clearer which operations need to be performed and to reduce complex and duplicated logic.\r\n\r\nThis also allows zero-length bundled files to be completed during manifest build rather than later on during backup processing." }, { "commit": "bb11539a37e830ed2a836633218b7184a7a57f2d", "date": "2022-10-05 12:20:02 -1000", "subject": "Disable FreeBSD 13 builds on Cirrus CI.", "body": "Build have begin failing with this error:\n\nld-elf.so.1: /usr/local/bin/rsync: Undefined symbol \"locale_charset\"\n\nThere does not appear to be a new version so hopefully this is a transient error. Disable for now to free the build pipeline." }, { "commit": "1ea6a4142e47e2e0b56911ad1f028dfcddc5fd64", "date": "2022-10-04 14:19:12 -1000", "subject": "Improve manifest file updates.", "body": "The prior manifestFileUpdate() function was pretty difficult to use since all the parameters had to specified. Instead, pass a ManifestFile struct that has all members set as needed.\r\n\r\nWhen new struct members are added the manifestFileUpdate() call sites will still need to be reviewed, but this should make the process of adding members a bit simpler." }, { "commit": "f981fb45d9931e597afec764e13b1a53d8365d25", "date": "2022-10-04 13:22:31 -1000", "subject": "Do not store references for zero-length files when bundling.", "body": "This appears to have been an oversight in 34d6495. Storing the reference is not really correct since the file is not stored in a prior backup. It also uses more space.\r\n\r\nThere is no real harm in storing the reference, since it is always ignored on restore, but the code is simpler if the zero-length files can be dealt with during the manifest and don't need additional handling later on. This is also an important part of some upcoming optimizations." }, { "commit": "f0acc195c0527495857f1afa8b24bdf2a4f477b5", "date": "2022-10-03 10:02:44 -1000", "subject": "Fix assert message and add a clarifying comment." }, { "commit": "6e26860c2648f6e23ad99bb4d38ecb1bfc45dd0c", "date": "2022-10-02 17:48:43 -1000", "subject": "Do not log bundle info when a file is delta matched from a prior backup.", "body": "It is possible to log the bundle info correctly but the information is useless with the backup reference, which does not appear until later. For now just omit the bundle info so we are not logging something incorrect." }, { "commit": "4722ad87a78f1085ef8d823b7460d191477e768e", "date": "2022-10-02 17:41:31 -1000", "subject": "Add test for differential file bundles to the command/backup unit.", "body": "This test exposes a small logging issue. The bundle information for the matched delta on PG_VERSION is not correct. This issue will be fixed in the next commit.\n\nThe information stored in the manifest *is* correct so this bug is essentially cosmetic." }, { "commit": "ac99201c0ebde616279214e600e49fdf506f7469", "date": "2022-10-02 17:32:48 -1000", "subject": "Add bufferSize to cvtUInt64FromVarInt128() to further limit reads.", "body": "The current call site, manifestFileUnpack(), does not know the total buffer size but the buffer has always been maintained in memory so there should be no corruption. However, there are upcoming use cases where the buffer will be read from IO, the buffer size will be known, and additional sanity checking on buffer overruns will be valuable.\n\nAlso rename params to align better with cvtUInt64ToVarInt128()." }, { "commit": "01b81f9d374701eb489dd1888bcba25fb5d6df84", "date": "2022-10-01 15:26:44 -1000", "subject": "Move link creation to storage interface.", "body": "Direct link creation via Posix functions has been moved to the Posix driver.\r\n\r\nThis change allows adding SFTP softlink creation in the SFTP driver using the standard interface." }, { "commit": "2a4137ed2eeaa769508301056226d6f86a67fd06", "date": "2022-09-25 16:15:23 -0800", "subject": "Add zero-length chunked content test to common/ioHttp unit.", "body": "It seems wasteful to chunk content when there is nothing to send but make sure we handle it gracefully just in case a server decides to do it." }, { "commit": "64b64b614c8272d2a87ffcde2fa7f27cf7eee365", "date": "2022-09-25 12:30:30 -0800", "subject": "Fix comment typo." }, { "commit": "f1e8e49fa933dd0f5dfab446f511ac010eb4d236", "date": "2022-09-23 14:00:58 -0700", "subject": "Use large error/log buffers in test harness.", "body": "Ninja produces quite a bit of output so error messages are often truncated by the default error/log buffers. Use large buffers in the test harness to capture the error even when there is a lot of output.\n\nNinja has introduced a --quiet option, but it is currently too new to be in any of our test distributions." }, { "commit": "cd8db7d9e53e041ad6fb31f2f015541c1d24abc2", "date": "2022-09-22 22:42:01 -0700", "subject": "Fix memory leak in file bundle backup/restore.", "body": "When converting restoreFile() to support file bundling in 34d64957 there were some I/O objects that were only freed at the end of the function that should have been freed at the end of each loop. Wrap the loops in temp mem contexts to fix this.\r\n\r\nDo the same to backupFile() since it would have a similar leak when resuming a backup. Since file bundles cannot be resumed the leak would not be as severe, but still seems worth doing to protect against future leaks." }, { "commit": "d50a4442e45efa8d1a16c9bf68f2337367c42835", "date": "2022-09-22 10:35:41 -0700", "subject": "Add missed release note for b05d31f5." }, { "commit": "9483844f7fc9a5eac64a87d3c80cfea3fd592e4e", "date": "2022-09-19 10:25:38 -0700", "subject": "Update config.guess and config.sub to latest versions." }, { "commit": "ab4209ebcbe23b5c3b759ba67f0e9bdc6cb66280", "date": "2022-09-19 10:17:25 -0700", "subject": "Begin v2.42 development." }, { "commit": "6b355806cc288e6f473254412337596287291798", "date": "2022-09-19 10:08:10 -0700", "subject": "v2.41: Backup Annotations" }, { "commit": "c39c9f220ed328b4175d912e109697b726e9ea9b", "date": "2022-09-15 12:00:44 -0700", "subject": "Fix issue when listing directories recursively with a filter.", "body": "While recursing and filtering, if the last entry in a directory was another directory containing entries then the parent list would get freed too early, causing a double free error or segfault.\r\n\r\nFix by ensuring that the completed list is at the top of the stack before freeing it. This will defer freeing parent lists until the contents of paths have been processed." }, { "commit": "08a44be4c38e4bc72a20c19d764be8a673c9b909", "date": "2022-09-14 16:22:48 -0700", "subject": "Fix incorrect storage for rendering errors.", "body": "Coverity complained of a copy/paste error here, but the actual error was what it took to be the correct \"copied from\" code.\n\nIn any case, the prior code would have blown up as soon as a new error type was added. Fix by updating to the writable build storage." }, { "commit": "240cd755d19ae2688d0deee65c00daf0651bb601", "date": "2022-09-14 10:06:06 -0700", "subject": "Add mem context test missing from 0f7b6a33.", "body": "A coverage exception was added during development but was not removed before commit.\n\nRemove the exception and add a test for coverage." }, { "commit": "8fb61a809dc63936a78e5233a2e8bc8dc616c762", "date": "2022-09-08 18:36:03 -0600", "subject": "Add FN_INLINE_ALWAYS macro.", "body": "Eliminate a lot of useless repetition for a commonly used pattern." }, { "commit": "2c9cbbcc4e72f5c1809e0001e40bfb3e7d8bbe01", "date": "2022-09-08 18:16:38 -0600", "subject": "Update Cirrus CI FreedBSD 13 image version." }, { "commit": "3b5df1e089234d425dd225f8c1976755dc947c8a", "date": "2022-09-02 10:03:18 -0500", "subject": "Update archive.info timestamps after a successful backup.", "body": "Lifecycle policies can cause the archive.info file and its copy to be removed since they are only updated on a stanza-upgrade. Update the timestamps after a successful backup to prevent this.\r\n\r\nThis does not mean that lifecycle policies should be used as a replacement for expiration. However, in some cases there may be policies in place that are out of admin control. If the lifecycle expiration is less than pgbackrest expiration then corruption of the earliest backup will occur at the very least and there might be other corruption which would make the repo unrecoverable." }, { "commit": "0f7b6a3344b676d79afa7cd16ab54f67be870fdf", "date": "2022-08-31 12:44:55 -0400", "subject": "Skip mem context cleanup in CATCH_FATAL() block.", "body": "An error that gets raised all the way to the top TRY block might need to free a lot of resources and any of these callbacks could throw an error and mask the original error. In fact this is pretty likely since we are already in an error state. For example, the Db object will try to close the remote db connection, but if the protocol is in a bad state it will not be able to do so.\r\n\r\nSolve this, for now, by not freeing memory or calling callbacks in the CATCH_FATAL() block. This gives us a better chance if being able to report the error without encountering another error first.\r\n\r\nFor the most part, we don't need to worry about freeing resources (file handles, TLS contexts, etc.) if the program is going to exit immediately. However, it is important to attempt to terminate all active protocol connections, which is done by protocolFree() in main() since the protocol objects live in the top context.\r\n\r\nAnother way to handle this would be to implement an error stack and that is probably something we will do in the future. But, in the case of a segfault the original error would still be lost. Yet another option would be to still do cleanup but defer it until after the CATCH_FATAL() block." }, { "commit": "eda7f81ee4fd6818ce458768144fc3fc1e1f0f21", "date": "2022-08-31 10:01:12 -0400", "subject": "Fix incorrect time expiration being used for non-default repositories.", "body": "If a repo is not specified for the expire command then the lowest repo becomes the default. The repo-retention-full value for time was being retrieved from the default rather than a specific repo which led to an incorrect expiration being applied.\r\n\r\nGet the value from the specific repo and add a test.\r\n\r\nIt would be better if the default repo could not be queried in this case but it is not clear how to do that since the repo option is valid for expire (unlike, e.g., archive-push)." }, { "commit": "f1cb8485917bc07e00e77efe4593fb737f72136a", "date": "2022-08-30 18:04:32 -0400", "subject": "Fix comment typos in command/expire unit test." }, { "commit": "db75ffd27037d690a11eb02902f929250fe2bf5b", "date": "2022-08-25 10:12:22 -0400", "subject": "Support --set in JSON output for info command.", "body": "Allow detailed information (e.g. error list, tablespace list) in JSON output that is already available in text output with the --set option." }, { "commit": "381fd0a5a4de61624615b150ba98beeafac31f7e", "date": "2022-08-24 10:52:33 -0400", "subject": "Backup key/value annotations.", "body": "Allow key/value annotations to be added with the backup command and added/modified/removed with the new annotate command.\r\n\r\nAnnotations can be viewed with the info command in text mode when --set is specified and are always included in JSON output." }, { "commit": "b05d31f531b55b747175cf94c963f95574c54d2b", "date": "2022-08-24 09:33:26 -0400", "subject": "Allow upload chunk size to be configured for object stores.", "body": "There are performance benefits to increasing the upload chunk size as long as the tradeoff with additional memory consumption is acceptable.\r\n\r\nMake the chunk size configurable for S3, GCS, and Azure, but don't attempt to do any validation of the chunk size beyond some sane limits. The defaults remain as is for each storage type to avoid any unintentional regressions." }, { "commit": "37b4592e522289a0523f03f8e9b144ff28fa21d3", "date": "2022-08-24 08:45:44 -0400", "subject": "Allow host memory limits in the user guide to be disabled.", "body": "These limits can cause errors in some environments, e.g. Docker in Docker on Mac M1.\n\nEntirely remove limits from the build, s3, and azure hosts since memory usage on these hosts is out of our control and not useful for testing.\n\nAlso allow empty variables to be rendered as blank." }, { "commit": "ff1188f92dfc9e8f6d4aeca8b71a9886f24f58b4", "date": "2022-08-22 13:51:05 -0400", "subject": "Allow quote tag in command-line help.", "body": "The quote tag will be used in an upcoming commit." }, { "commit": "2a99ac4324b52b8b42db639e645378840159af00", "date": "2022-08-17 11:24:20 -0400", "subject": "Clear ProtocolClient callback after connection error.", "body": "Attempting to shut down the connection will fail since the server has already disconnected and a new error will be thrown, masking the original error." }, { "commit": "82786da1545c0bfe8185837a35d13a50b5e11ade", "date": "2022-08-16 16:15:48 -0400", "subject": "Do not allow CATCH() to catch a fatal error.", "body": "Catching individual fatal errors was only used in testing so the tests have been updated to use other errors instead. CATCH_FATAL() is now the only way to catch fatal errors.\r\n\r\nThis simplifies the logic a bit for upcoming changes to error handling and cleanup.\r\n\r\nAlso fix an issue where passing errorMessage() directly to THROW*() would attempt to copy the message buffer instead of preserving it, which is undefined behavior. Since there were no instances of this behavior before this commit, this was not a live bug." }, { "commit": "02665a5894dc4b025172b32c7a1ce42c81d6c582", "date": "2022-08-02 11:05:31 -0400", "subject": "Update Minio test/documentation container version." }, { "commit": "eb287b18c882a16eecfb1445a513c4da4ccab1b7", "date": "2022-07-29 10:31:36 -0400", "subject": "Add profiling, performance, and optimization to C test harness.", "body": "All unit and performance tests are now built by the C harness.\n\nRemove all unit/performance test build code from Perl.\n\nRemove code from C harness that is no longer used. This code was included so the C harness could be run separately, but that is no longer needed with this full integration." }, { "commit": "1e83f2a022b36191c2d12185ed57cf4d96462862", "date": "2022-07-28 14:53:48 -0400", "subject": "Add coverage to C test harness.", "body": "Coverage reports are still generated in Perl, but all the settings have been added to the C harness to generate raw coverage data." }, { "commit": "c99ea54f17c1f26a5e996bb8cfd3df81684f813b", "date": "2022-07-27 10:32:32 -0400", "subject": "Integrate C test harness with Perl test harness.", "body": "The C test harness is used for unit tests from the Perl harness where possible. Currently, unit tests can be run in the C harness when --no-coverage is specified and --profile is not specified.\n\nC harness tests work on meson 0.45.\n\nThe C harness runs with valgrind by default. Valgrind can be disabled with --no-valgrind.\n\nAlso rebuild containers to add meson and update the documentation so that meson builds will work (even though we don't do them yet)." }, { "commit": "2caef37fd55ea12959015f02a106774f6ad711b7", "date": "2022-07-26 18:25:24 -0400", "subject": "Add reviewer for c267ba51." }, { "commit": "e9ff5248039eedb25da2fee550ec64fcebbc51c7", "date": "2022-07-26 15:15:12 -0400", "subject": "Add absolute path for kill in pgbackrest.service." }, { "commit": "79d9884141f90a9e93fb2ec8faef15e460dd3442", "date": "2022-07-25 09:39:54 -0400", "subject": "Move release build check to src/meson.build.", "body": "This allows the C harness to perform release builds which are required for some tests." }, { "commit": "c267ba51b1778b5872f4ee928a9614545d743f3f", "date": "2022-07-22 16:24:55 -0400", "subject": "Move standby timeline check after checkpoint.", "body": "The standby timeline check was being performed using pg_control data loaded before the backup started. If the backup was started immediately after a promotion the standby might not have executed a checkpoint and written the new timeline to pg_control.\r\n\r\nInstead perform the timeline check after the checkpoint is executed. This should ensure that the new timeline is in pg_control." }, { "commit": "cbbe93f592d8097393c5ffe214f0a8c5fb0549c1", "date": "2022-07-22 14:45:38 -0400", "subject": "Improve warning message on backup resume.", "body": "The prior warning made it sound as if some action was required on the part of the user.\n\nThe new message should make it clearer that this action will be performed by pgBackRest." }, { "commit": "4c47cc5d4786b88a06c38aefe628356342433f64", "date": "2022-07-22 09:24:29 -0400", "subject": "Remove Debian package patch now that it has been merged upstream." }, { "commit": "19d9941367f68d6effb516fbb1444877fea9a9ce", "date": "2022-07-21 20:10:51 -0400", "subject": "More C test harness improvements and CI.", "body": "Build pgbackrest binary and auto-generated code automatically.\n\nRemove --module option and allow modules to run by parameter. This is less verbose and multiple modules can be run at a time.\n\nAllow filtering of modules. Multiple tests can be passed as parameters and if the module ends in / it will be used as a prefix filter. For example, common/ will run all the common modules.\n\nIf a test errors the remaining tests will still run but the test process will eventually exit with an error.\n\nCI tests are included but unit tests remain on the development branch.\n\nWith these changes all unit tests run except those that specify the define (e.g. common/assert-off) or containerReq (e.g. protocol/protocol) keywords.\n\nBuilding the C test harness has been simplified:\n\nmeson -Dwerror=true -Dfatal-errors=true -Dbuildtype=debug test/build/none pgbackrest\nninja -C test/build/none test/src/test-pgbackrest\n\nTo run all modules:\n\ntest/build/none/test/src/test-pgbackrest test\n\nJust the common/error module:\n\ntest/build/none/test/src/test-pgbackrest test common/error\n\nAll info modules:\n\ntest/build/none/test/src/test-pgbackrest test info/" }, { "commit": "edfcf1652c6ab07fe5a147b97df0c563e09d3445", "date": "2022-07-21 19:27:58 -0400", "subject": "Test Dockerfile improvements.", "body": "Add tzdata package so timezone tests in command/restore work correctly.\n\nMark default git path as safe. This is a security fix that is not applicable in this environment, but must be set.\n\nAlso remove package cleanup, which is inconvenient when new packages need to be installed. It makes sense for containers that will be downloaded from Dockerhub but not so much for a locally-maintained container." }, { "commit": "5e5b04be3741b4e903c9a7764dadfbe5bda121ae", "date": "2022-07-21 18:19:47 -0400", "subject": "Fix common/lock test creating \"750\" path.", "body": "This was clearly an attempt to set the mode when creating a directory, but it never worked and instead created a \"750\" directory in the current working directory.\n\nDetected when running in an environment where the current working directory was read-only." }, { "commit": "f9bbafbf3fbd67fc289cf849900799b4718dace1", "date": "2022-07-20 19:02:14 -0400", "subject": "C test harness improvements.", "body": "Add harness depends when present.\n\nInclude libyaml in all test builds.\n\nFix mode on paths before trying to remove and set test path with mode 770 to match the Perl test harness.\n\nWith these changes all unit tests run except those that specify the define (e.g. common/assert-off), binReq (e.g. command/archive-get), or containerReq (e.g. protocol/protocol) keywords.\n\nBuilds and code generation need to be done in advance. The following commands are required for setup:\n\nmeson setup -Dwerror=true -Dfatal-errors=true -Dbuildtype=debug build pgbackrest\nninja -C build test/src/test-pgbackrest\nbuild/src/build-code help pgbackrest\nbuild/src/build-code postgres pgbackrest\n\nNow tests can be run, e.g.:\n\nbuild/test/src/test-pgbackrest --module=postgres/interface" }, { "commit": "c625f05a13e0238bf06feb26d0d8757dd2bedffd", "date": "2022-07-20 17:45:39 -0400", "subject": "Unify code builder binaries into a single binary.", "body": "Creating new binaries was convenient at first but has now become a maintenance issue.\n\nSolve this by combining that into a single binary that takes an additional parameter to indicate which code should be built.\n\nAlso clean up path handling to make it easier to build code from the command line." }, { "commit": "7eb5d679857999faebe78730c5a30e450ef80d5c", "date": "2022-07-19 18:03:39 -0400", "subject": "Add module harness and shim support to the C test harness.", "body": "This allows running unit tests up to common/ini, excluding common/debug-off and common/assert-off." }, { "commit": "8fdeed780787a2c6df6672f5d9c75fb7b5b613bc", "date": "2022-07-19 17:57:13 -0400", "subject": "Copy repository links as files for testing.", "body": "This makes the test code a bit simpler where we are listing a path but not following links.\n\nLinks in the repository can be used for testing but should never be committed to the main branch." }, { "commit": "eb775790087cd8594f6649893895c42a407302cd", "date": "2022-07-19 17:25:07 -0400", "subject": "Fix comment typo." }, { "commit": "f3a10b921bf3d87152d27347df23b487877abe94", "date": "2022-07-18 10:52:57 -0400", "subject": "Update config.guess to latest version." }, { "commit": "20f9f726478a4d1c8897cc31a5bf7df52275b84f", "date": "2022-07-18 10:49:58 -0400", "subject": "Begin v2.41 development." }, { "commit": "1ff531090b9d1eccd9107592398148a51486c9f8", "date": "2022-07-18 09:32:30 -0400", "subject": "v2.40: OpenSSL 3 Support" }, { "commit": "1df1e0281b94a06ff0f16622e2cbb76598d430d1", "date": "2022-07-14 17:13:33 -0400", "subject": "Replace AC_PROG_CC_C99 configure macro with AC_PROG_CC.", "body": "AC_PROG_CC_C99 is now obsolete.\n\nThe AC_PROG_CC macro does not guarantee C99 compliance but we add an option to check for that a bit later." }, { "commit": "364af1635d40431276cc6fd87d6ddd689b611e09", "date": "2022-07-14 08:26:03 -0400", "subject": "Force target-timeline=current when restore type=immediate.", "body": "Explicitly set target timeline to \"current\" when type=immediate and PostgreSQL >= 12. We do this because type=immediate means there won't be any actual attempt to change timelines, but if we leave the target timeline as the default of \"latest\" then PostgreSQL might fail to restore because it can't reach the \"latest\" timeline in the repository from this backup.\r\n\r\nThis is really a PostgreSQL bug and will hopefully be addressed there, but we'll handle it here for older versions, at least until they aren't really seen in the wild any longer.\r\n\r\nPostgreSQL < 12 defaults to \"current\" (but does not accept \"current\" as a parameter) so no need set it explicitly." }, { "commit": "75623d4583bfdaccb3a19b935fd04d744b363923", "date": "2022-07-08 17:21:39 -0400", "subject": "Create snapshot when listing contents of a path.", "body": "Previously a callback was used to list path contents and if no sort was specified then a snapshot was not required. When deleting files from the path some filesystems could omit files that still existed, which meant the path could not be removed.\r\n\r\nFilter . out of lists in the Posix driver since this special entry was only used by test code (and filtered everywhere in the core code).\r\n\r\nAlso remove callbacks from the storage interface and replace with an iterator that should be easier to use and guarantees efficient use of the snapshots." }, { "commit": "f9ac53db92574e8b499187ee4f2a7258bc2f4794", "date": "2022-07-08 14:38:14 -0400", "subject": "Fix typo." }, { "commit": "74a4ac801db921f7b11b63b9f927228c6a6af861", "date": "2022-07-08 11:13:55 -0400", "subject": "Add link to PostgreSQL configuration in repository host section.", "body": "This should make the documentation clearer when starting from this section." }, { "commit": "0eccbc8bf47073ab8ec1f13fed9865d0bd7c3635", "date": "2022-07-06 18:17:52 -0400", "subject": "Meson builds work on version 0.45.", "body": "v0.45 ships with Ubuntu 18.04, which is currently the oldest distro we support. We may never do a Meson release on Ubuntu 18.04 but this allows us to start running unit tests with Meson in the meantime.\n\nSome more granular options are not available so we use buildtype in more places.\n\nThe check for a in-tree autoconf/make build had to be removed since the filesystem APIs are not available.\n\nFinally, alias_target was removed. This means that full paths must be used for build targets, which does not seem too bad. For instance, test/src/test-pgbackrest must now be used as a build target instead of simple test-pgbackrest." }, { "commit": "72960bbf179b4e8051bd4fc50be57d95159e9e13", "date": "2022-07-06 09:55:32 -0400", "subject": "Rename strTrunc() to strTruncIdx() and add strTrunc().", "body": "strTrunc() is now shorthand for truncating to index 0. This is convenient when a string is being reused." }, { "commit": "cd25cec1471dde60f85404744252b675efff2522", "date": "2022-07-05 14:34:31 -0400", "subject": "Add comment to indicate that type is in a different info level." }, { "commit": "1c0bf0b15da8c0d15cc639851ff1d1bd05b51d06", "date": "2022-07-05 14:28:40 -0400", "subject": "Reorder null user/group checks in remote protocol and add tests.", "body": "Coverage for these checks was dependent on the order the files were read from disk, which made the tests fragile.\n\nRearrange the checks and add a test that won't depend on order." }, { "commit": "326d152a14121cb298089847a3694aa9f50bcbd2", "date": "2022-07-05 06:58:19 -0400", "subject": "Update contributor name." }, { "commit": "845d82f6825d4101e6a465a435ef056ff6ec15a2", "date": "2022-07-01 20:32:10 -0400", "subject": "Use S3 IsTruncated flag to determine list continuation.", "body": "Previously we were just checking for the existence of NextContinuationToken, which the S3 documentation indicates will not be present when the list is not truncated. However, recent versions of Scality send a blank NextContinuationToken when IsTruncated is false. Sending the blank continuation token back causes Scality to send another blank continuation token and an infinite loop occurs.\r\n\r\nInstead use IsTruncated (which is required to be present) to determine whether NextContinuationToken should be present. Error if NextContinuationToken is then missing or empty, since an empty token caused an infinite loop with the Scality server (which arguably should have errored when passed an empty token)." }, { "commit": "61ca9b58a000aa8789ef3267682519bdb15e8a6b", "date": "2022-06-28 14:15:08 -0400", "subject": "Replace hrnStorageInfoListCallback() with TEST_STORAGE_LIST() in tests.", "body": "The TEST_STORAGE_LIST() macro is more robust and hides the callback mechanism from the caller.\n\nAdd features to TEST_STORAGE_LIST() that hrnStorageInfoListCallback() had.\n\nUpdate tests to use the abbreviated type output (e.g. path/) generated by TEST_STORAGE_LIST()." }, { "commit": "59f148bf6ea8c090d4d65ce5b135a71501e8ae22", "date": "2022-06-26 08:42:43 -0400", "subject": "Generate -fmacro-prefix-map to strip relative path from __FILE__.", "body": "This provides reproducible builds and minimizes the file path in debug messages, just like an in-tree make build.\n\nFor test source, prefix with test/ in case there are any module name collisions." }, { "commit": "f863fc98886f4d6c2803b9a2396539597d0d7be2", "date": "2022-06-23 12:20:56 -0400", "subject": "Add experimental unit test harness written in C.", "body": "Having the test harness in C will allow us to remove duplicated Perl code and test on systems where Perl support is not present.\n\nCustom harnesses and shims are currently not implemented, which means only the following tests in the common module will run: error, stack-trace, type-convert, assert-on, mem-context, time, encode, type-object, type-string, type-list, type-buffer, type-variant, reg-exp, log.\n\nThe experimental test harness is being committed with partial functionality so it can be used in Windows development. The remaining features will follow as needed." }, { "commit": "b7a1b3ec2c16fde6a201f404a27d8343595900b6", "date": "2022-06-23 10:57:58 -0400", "subject": "Used DEBUG instead of NDEBUG in common/stackTrace module.", "body": "In some testing cases these might not be in sync, which causes unpredictable behavior." }, { "commit": "29d2f0f9fda4dbba0dc5eca6f84368a6f655bd28", "date": "2022-06-22 09:52:30 -0400", "subject": "Add cast to handle compilers (e.g. MSVC) that coerce to signed int.", "body": "MSVC changes the sign in this case, presumable because of the subtraction. Cast so that MSVC does not also trigger a mixed sign warning." }, { "commit": "9fd85a104ad56061b028afd9057ce8c1d636d3dd", "date": "2022-06-21 09:50:38 -0400", "subject": "Disable meson for all but debug builds.", "body": "The meson builds are still experimental so for now the configure/make build process is preferred for release builds. This message should help prevent any automated build systems from picking up meson instead." }, { "commit": "665da12ae7f519077512e60ce3a5ec6c6a9997fd", "date": "2022-06-17 16:41:48 -0400", "subject": "Update meson.build include comment to be more general.", "body": "Eventually multiple subdirs will be included so update the comment to reflect that." }, { "commit": "5ecae90f0254ef752d8323ea5be3b99673ec46f7", "date": "2022-06-17 16:31:48 -0400", "subject": "Use constants rather than replacements when possible in test.c.", "body": "Some of the replacements that were being done already existed as constants, so use the constants instead.\n\nAlso fix a minor formatting error introduced when testAdd() was renamed to hrnAdd()." }, { "commit": "dab1e4b6c66018c03c6e01309c41a89ef82ff5bc", "date": "2022-06-17 16:27:31 -0400", "subject": "Add strReplace().", "body": "Allows substrings to be replaced with another string." }, { "commit": "fb9acc1c93d504d366493bc42038f15faa80300d", "date": "2022-06-17 16:17:02 -0400", "subject": "Add higher level YAML functions.", "body": "These functions make parsing YAML simpler." }, { "commit": "55bcb933eee753a0cdd13f9861a00c33d194c1e7", "date": "2022-06-17 11:17:52 -0400", "subject": "Move protocol module from common to command.", "body": "This module has dependencies on command/command so it does not make sense for it to be in the common module. Also move protocolFree() to main() since this is a very large dependency.\n\nAdjust the tests so command/exit can be tested later. This is a bit messy but will get adjusted as we improve the test harness." }, { "commit": "eb72a80b471145d97ac4170e3215f7c67aa83082", "date": "2022-06-15 09:06:25 -0400", "subject": "Fix continuation character spacing." }, { "commit": "716bba5800d504a76d4bdc99588448824be3d83f", "date": "2022-06-14 08:13:22 -0400", "subject": "Fix hard-coded WAL size assertion in walSegmentNext().", "body": "PG_WAL_SEGMENT_SIZE_DEFAULT is used to compare and check WAL size on pre-11 installations. However, there is a hard-coded assertion in walSegmentNext() which doesn't respect PG_WAL_SEGMENT_SIZE_DEFAULT.\r\n\r\nUpdate the assertion to use PG_WAL_SEGMENT_SIZE_DEFAULT." }, { "commit": "282edda661b5f1eb189998e7db680b91a2b8a66b", "date": "2022-06-09 17:42:00 -0400", "subject": "Remove storageLocal() dependency from common/lock module.", "body": "The storage/helper module is a very heavy dependency to introduce in the common module. Creating Posix storage objects is cheap so just do that instead." }, { "commit": "04f5ef25faf276e51bd546199f1197b5c70003e8", "date": "2022-06-09 17:18:32 -0400", "subject": "Add valgrind to Dockerfile missed in a16cf5ea." }, { "commit": "79443bea3634f7dd3858b6f65838e31fb9b87c3f", "date": "2022-06-09 16:55:07 -0400", "subject": "Move bldEnum() to the build/common/render module.", "body": "This function will be useful for other build/test modules so extern it.\n\nAlso skip the first upper-case when no prefix is provided." }, { "commit": "1a00ab10033e9ac6e6f797627567e8b91e143432", "date": "2022-06-09 07:34:11 -0400", "subject": "Fix compile error when DEBUG_EXEC_TIME is defined without DEBUG.", "body": "If DEBUG is not defined then the ASSERT() macro expands to nothing. In this case the timeBegin variable is never used and a compilation error occurs.\r\n\r\nThis test should work without DEBUG defined so use CHECK() instead of ASSERT()." }, { "commit": "0dabf88e9d85e4db2ff65ebbb14b4320e1092a8b", "date": "2022-06-08 17:43:23 -0400", "subject": "Add FN_NO_RETURN macro.", "body": "Change all instances of __attribute__((__noreturn__)) to a macro in meson.build / build.auto.h.in.\r\n\r\nAs compiler attributes written in the form of __attribute__ are not supported by MSVC, this is one of several commits to make the code-base more robust and allow using MSVC-style attributes later." }, { "commit": "8babd558bc3a5b930fd096ffb306551773f1f14d", "date": "2022-06-08 07:26:15 -0400", "subject": "Add missing build.auto.h includes.", "body": "These are required for proper building but were probably not noticed before since they are not part of the core code." }, { "commit": "a16cf5eac7217e79d8669fab5dff1863e26208c9", "date": "2022-06-06 16:32:20 -0400", "subject": "Update CI to use Ubuntu 22.04 and Fedora 36.", "body": "Both have newer gcc and OpenSSL 3.\n\nFedora 36 runs horribly slow with valgrind enabled so run the valgrind tests on Ubuntu 22.04. Fedora 36 has a newer gcc so it is still worth testing on." }, { "commit": "08242ee6ac93b9eb9c05cc39e3db15bbafb9052c", "date": "2022-06-06 14:47:47 -0400", "subject": "OpenSSL 3 support.", "body": "There are two changes:\r\n\r\n* Suppress deprecation warnings so we can build with -Werror and -Wfatal-errors. At some point we'll need to migrate to the new APIs but there does not seem to be a good reason to support two sets of code right now.\r\n\r\n* Update the handling for unexpected EOF to handle EOF or error. The error code for EOF has changed and become harder to identify, but we probably don't care whether it is an error or EOF." }, { "commit": "f92ce674f79e2d6056794c1af4002c01dad93aed", "date": "2022-06-06 13:52:56 -0400", "subject": "Automatically create PostgreSQL version interfaces.", "body": "Maintaining the version interfaces was complicated by the fact that each interface needed to be in separate compilation unit to avoid type conflicts. This also meant that various build/test files needed to be updated to add the new interfaces.\r\n\r\nSolve these problems by auto-generating all the interfaces into a single file. This is made possible by parsing defines and types out of the header files and creating macros to rename the types. At the end of the version interface everything is undef'd. Another benefit is that the auto-generated interfaces can be static and included directly into postgres/interface.c.\r\n\r\nSince some code generation is now always required for tests, change --no-gen to --min-gen in test.pl.\r\n\r\nIt would also make sense to auto-generate the version defines in postgres/version.h, but that will be left for a future commit." }, { "commit": "b8fc20d5b8e7ca003ff39f29ea6e3ba57653f55c", "date": "2022-06-03 14:13:56 -0400", "subject": "Add experimental Meson build.", "body": "Meson is a new build system that offers simpler syntax and superior performance to autoconf/make. In addition, Windows is supported natively.\r\n\r\nThe Meson build appears complete, but currently is used only for auto-generation of code and the host build of pgbackrest. Some container upgrades will be required before Meson can be used for container builds.\r\n\r\nAlso patch the Debian package to force autoconf/make rather than Meson." }, { "commit": "148956aed8aca201aa800af321d946094b96f176", "date": "2022-06-01 10:13:57 -0400", "subject": "Remove useless command/check unit test.", "body": "This test was a placeholder and did not provide any coverage, but it did give inconsistent results on different shell versions." }, { "commit": "29b2a54fcc74457af5c63ffb505c5fbb24bb2ade", "date": "2022-05-31 17:28:58 -0400", "subject": "Allow any path to be passed to the --test-path option.", "body": "A hard-coded path prevented this from working correctly." }, { "commit": "be354c489017ea84ed825f226b50d170bec1ff68", "date": "2022-05-31 16:52:26 -0400", "subject": "Update CodeQL to version 2.", "body": "Version 1 will be deprecated on December 12, so upgrade now to avoid nasty surprises." }, { "commit": "2c38c9a56f95efea42d489792751b77acfc470e4", "date": "2022-05-31 16:43:18 -0400", "subject": "Skip stopping PostgreSQL 9.1 in real/all integration test.", "body": "Stopping the cluster has started consistently running out of memory on PostgreSQL 9.1. This seems to have happened after pulling in new packages at some point so it might be build related.\n\nStopping the cluster is not critical for 9.1 so skip it." }, { "commit": "2feaaeaac803d6c0193477b56dbeb622fd991156", "date": "2022-05-31 16:06:41 -0400", "subject": "Add .inc extension to C files included in other C files.", "body": "These files were never intended to be compiled on their own so the .c extension was a bit misleading. In particular Meson does not like .c files that are not intended to be compiled independently.\n\nLeave header files as is since they are already protected against being included more than once and are never expected to be compiled." }, { "commit": "cb891fa2d402ee338b72d2c0962cc5cd49eea4f1", "date": "2022-05-31 13:02:51 -0400", "subject": "Run remaining Github Actions CI on Ubuntu 20.04.", "body": "The s390x/ppc64le tests are already running on 20.04." }, { "commit": "2643050be0dc4b7363c434f58373919bd6fdac68", "date": "2022-05-31 12:36:21 -0400", "subject": "Skip internal options in the configuration reference.", "body": "Most internal options were being skipped, but not in the case where an option was marked internal for a specific command.\n\nThe command-line help was not affected by this issue." }, { "commit": "f0aae6ceab9a95c6d63b8955abd2592ddbbce90a", "date": "2022-05-31 12:26:57 -0400", "subject": "Remote repo options should not be valid for the backup command.", "body": "Some of the remote repo options were gated by repo-local, but the rest relied on repo-host-cmd.\n\nRemove backup from the repo-host-cmd option since none of the dependent options are valid for backup." }, { "commit": "a902c7808d8bf651389e2479d4622b0fd49ab09d", "date": "2022-05-31 12:24:21 -0400", "subject": "Make all repo-* options visible for stanza create/update/delete.", "body": "31c7824a allowed these commands to run remotely but neglected to remove some internal flags, which prevented all the repo-* options from being visible in the documentation." }, { "commit": "60d70fa66fc3f709914f40dd088bac1b467001f2", "date": "2022-05-25 18:18:37 -0400", "subject": "Disable incremental manifest save when resume=n.", "body": "The manifest is saved on a regular basis during a backup so a failed backup can be resumed. For backups that the user has configured/invoked as not resumable, skip the incremental save of the manifest." }, { "commit": "8c2b3a044fa175031366fb4f676b5c1a0a319ec0", "date": "2022-05-25 15:27:53 -0400", "subject": "Use StringId for type in cryptoHash() and cryptoHmacOne().", "body": "This brings hash types in line with cipher types (i.e. a StringId enum) and allows removal of some Strings." }, { "commit": "38ad2838d553febae1b7fe0244d942dfaa9cf548", "date": "2022-05-25 14:20:19 -0400", "subject": "Set backup percent complete to zero before copy start.", "body": "Waiting to write percent complete until the first file completed resulted in a period of time where the backup was running without status available to the user.\r\n\r\nRemedy this by initializing percent complete to zero when the backup is ready to start copying files." }, { "commit": "7b1935c71005a5a928e34caf7b8b48e5d871cc66", "date": "2022-05-25 10:23:43 -0400", "subject": "Remove unused errors.", "body": "Most of these were probably never ported from Perl to C and others became obsolete over time.\n\nFix one error that was the wrong type.\n\nAlso fix/improve some comments." }, { "commit": "f6f2f2e2a34c01e20bbf6c26e5e8a232a6bd232d", "date": "2022-05-25 09:42:49 -0400", "subject": "Add missing static keywords.", "body": "Add static keyword to local variables where missing." }, { "commit": "7ec51e7e6279ea576ad3b4b5c2ea1fc389d08ab5", "date": "2022-05-24 16:39:35 -0400", "subject": "Truncate files during delta restore when they are larger than expected.", "body": "Previously the behavior was to download the file from the repository when it was not exactly the same size in PGDATA. However, it may just be that the file was extended and the contents are the same up to the file size recorded in the manifest. This could also be very valuable for files that are always append only, like logs.\r\n\r\nChange info.size to file->size in one place. Both are technically correct but file->size makes more sense.\r\n\r\nUse the new fileName variable in a few existing places.\r\n\r\nAlso adjust some existing comments to make them clearer." }, { "commit": "c98baab6b56967e222b825e22e5808e17c02d025", "date": "2022-05-19 18:24:09 -0400", "subject": "New CI container builds for PostgreSQL 15 beta1 and minor releases.", "body": "Remove VM_OS_REPO since it is no longer required.\n\nRebalance PostgreSQL versions for more efficient test times.\n\nAlways print version of PostgreSQL when testing. This helps verify that new minor releases are being used." }, { "commit": "69adb990dced3bf1d76a3e2482aad38016165d6c", "date": "2022-05-19 12:25:58 -0400", "subject": "Use storagePathP() instead of cfgOptionStr() to get base repo path.", "body": "cfgOptionStr() may not have the correct value if the repo is remote.\n\nUse storagePathP() instead since it can ask the remote for the correct value when required." }, { "commit": "c7a66ac1afb9a55b049dc18493b602b7bf62bda4", "date": "2022-05-18 10:52:01 -0400", "subject": "Improve memory usage of mem contexts.", "body": "Each mem context can track child contexts, allocations, and a callback. Before this change memory was allocated for tracking all three even if they were not used for a particular context. This made mem contexts unsuitable for String and Variant objects since they are plentiful and need to be as small as possible.\r\n\r\nThis change allows mem contexts to be configured to track any combination of child contexts, allocations, and a callback. In addition, the mem context can be configured to track a single child context and/or allocation, which saves memory and is a common use case.\r\n\r\nAnother benefit is that Variants can own objects (e.g. KeyValue) that they encapsulate. All of this makes memory accounting simpler because mem contexts have names while allocations do not. No more memory is used than before since Variants and Strings still had to store the memory context they were originally allocated in so they could be easily freed.\r\n\r\nUpdate the String and Variant objects to use this new functionality. The custom strFree() and varFree() functions are no longer required and can now be a wrapper around objFree().\r\n\r\nLastly, this will allow strMove() and varMove() to be implemented and used in cases where strDup() and varDup() are being used to move a String or Variant to a new context. Since this will be a bit noisy it is saved for a future commit." }, { "commit": "83af3f1b7ae78b150849939766e2e555366b95d5", "date": "2022-05-18 08:48:48 -0400", "subject": "Add additional detail to warnings when delta checksum is auto-enabled.", "body": "Hopefully this will help with debugging when it is not clear why delta checksum is being enabled." }, { "commit": "5dfd00bb6ce0d6d2ead0ba6c899c18983f9a7ac9", "date": "2022-05-18 08:18:34 -0400", "subject": "Fix RHEL container build for documentation.", "body": "For some reason /lib/systemd/system/sysinit.target.wants no longer exists in the rockylinux:8 container.\n\nCreate this directory explicitly in case it does not exist." }, { "commit": "5360f2ec0af08ac1755d48f234347c240ff09f4a", "date": "2022-05-16 10:50:07 -0400", "subject": "Fix comment indentation." }, { "commit": "b598f49dedbfb2bdd19d43dae9fa9ba654f2287c", "date": "2022-05-16 09:17:00 -0400", "subject": "Udpate parse.auto.c with labels from 4dcc9df2.", "body": "Committed separately so it can be ignored in history/blame." }, { "commit": "4dcc9df222d2ecef6721c9ecd52b05e24024cc3d", "date": "2022-05-16 09:14:46 -0400", "subject": "Add labels in parse.auto.c to make diffs easier to read.", "body": "Because there is a lot of repetition in this file, changes can look very jumbled with existing data in a diff. Also, if can be hard to tell what is being modified if the diff does not show enough lines before and after.\n\nThis change adds labels to the end of the line to localize the diff and make it easier to see what has been changed. Also, remove some linefeeds and make separators more consistent.\n\nThe change to parse.auto.c will be committed separately so it can be ignored in history/blame." }, { "commit": "f5023a769d4580258a336cf26f2b293aa2341b29", "date": "2022-05-16 08:54:19 -0400", "subject": "Update config.guess to latest version." }, { "commit": "243eef1e520bb72ae2b28a627dda323305e38862", "date": "2022-05-16 08:51:37 -0400", "subject": "Begin v2.40 development." }, { "commit": "901e829f6dd05fdc15d899bc7f1108883002b1d5", "date": "2022-05-16 08:46:24 -0400", "subject": "v2.39: Verify and File Bundling" }, { "commit": "19dd015d589ed3efd31a2a238249a150fb3748c1", "date": "2022-05-13 09:41:53 -0400", "subject": "Fix issues in improved path validation for repo-* commands.", "body": "If the user requested the exact repo path then strSub() would be passed an invalid start value leading to an assertion:\r\n\r\n$ pgbackrest --stanza=test repo-ls /var/lib/pgbackrest\r\nASSERT: [025]: start <= this->pub.size (on dev builds)\r\nASSERT: [025]: string size must be <= 1073741824 bytes (on prod builds)\r\n\r\nFix this by checking if the requested path exactly equals the repo path and returning an empty relative path in this case.\r\n\r\nAnother issue was that invalid subpaths were not detected if they started with the repo path. For example, /var/lib/pgbackrestsub would not generate an error if the repo path was /var/lib/pgbackrest. Fix this by explictly checking for a / between the repo path and the subpath. This also requires special handling when the repo path is /.\r\n\r\nThis is not a live bug since the issues were found in an unreleased feature introduced in 5ae84d5." }, { "commit": "024500782e9fde91cf236db70a584beb52586d38", "date": "2022-05-12 17:02:08 -0400", "subject": "Reduce encrypted WAL segments sizes in command tests.", "body": "The encrypted archive-push and repo tests were running very slowly on 32-bit with Valgrind enabled. This appears to be an issue with a newer version of Valgrind, but it has been going on long enough that bisecting does not seem to be worthwhile.\n\nReduce the size of the encrypted test segments where possible to improve overall test performance." }, { "commit": "7d9b2e267c3f68463000cfcfec6018cc12203f23", "date": "2022-05-12 11:57:12 -0400", "subject": "Move 32-bit CI testing from Debian 9 to Ubuntu 18.04.", "body": "Debian 9 will be EOL before our next release, so move 32-bit testing to Ubuntu 18.04, which is older than Debian 10." }, { "commit": "24f138b0672ed55aeef1673b816b803487bca932", "date": "2022-05-11 16:20:41 -0400", "subject": "Remove excessive const usage in build module.", "body": "This was an experiment that attempted to create immutable structs (at least without casting). It turned out to be a bit burdensome and required unsafe-looking casting in some cases." }, { "commit": "3d8ee552fe85aae6f2f00cd2a9436fb6b34513e1", "date": "2022-05-11 15:59:37 -0400", "subject": "Move some PostgreSQL integration tests to rh7.", "body": "This makes the u20 test run faster. Since u20 is currently the longest running test, the time to complete all tests is also improved." }, { "commit": "a913113fda5886ffd2ff136cdd52a3f469eca026", "date": "2022-05-11 15:23:41 -0400", "subject": "Add option type descriptions.", "body": "This cuts down on repetition of the size descriptions and adds basic descriptions for the other option types." }, { "commit": "5fbea6da811ee93dcd966b8f7d84d6da2e65c6c5", "date": "2022-05-11 10:39:31 -0400", "subject": "Add br tag for documentation.", "body": "This tag allows for a simple linefeed in a p tag instead of being forced to start a new paragraph." }, { "commit": "8ee85bc605da4aff5069bd1b965bbb585d9fe851", "date": "2022-05-11 08:46:23 -0400", "subject": "Fix \"that that\" typos." }, { "commit": "c4f7edef2b6a9b541e94793c1dabbff7db854a9b", "date": "2022-05-11 08:42:46 -0400", "subject": "Fix typos in help." }, { "commit": "50d409a812be577f2c296f3d6096cf97fab049ff", "date": "2022-05-10 14:17:05 -0400", "subject": "Add FAQ about backup types and restore speed.", "body": "Based on several questions/misunderstandings, provide clarification about the backup type only affecting the backup action, and not the restore." }, { "commit": "de816a0f5793747a774b98af627776112cc7cb2d", "date": "2022-05-10 13:18:26 -0400", "subject": "Remove integration expect log testing.", "body": "Integration expect log testing was originally used as a rough-and-ready way to make sure that certain code paths were being executed before the unit tests existed. Now that we have 100% unit test coverage (with expect log testing) the value of the integration expect tests seems minimal at best.\r\n\r\nBut they do cause numerous issues:\r\n\r\n- Maintenance of the expect code and replacements that are required to keep logs reproducible.\r\n- Even a trivial change can cause massive churn in the expect logs, e.g. d9088b2. These changes should be minutely audited but since the expect logs have little value now it is seldom worth the effort.\r\n- The OS version used to do expect testing (RHEL7) can only be used to test one version of PostgreSQL. This makes it hard to balance the PostgreSQL version testing between OS versions.\r\n- When a commit affects expect logs it is not clear (especially for new developers) how to regenerate them and our contributing guide is silent on the issue.\r\n\r\nThe goal is to migrate the integration tests to C and expect testing is not part of that plan. It seems best to get rid of them now." }, { "commit": "3a403944729e03c66de83e6750c38cab97743333", "date": "2022-05-10 11:18:12 -0400", "subject": "Remove obsolete test in common/memContext.", "body": "Once upon a time the allocation array was allocated up front so this test was required for the top context, which did not allocate up front.\n\nNow allocations are done on demand so this case is covered for every context that does not allocate memory." }, { "commit": "6b98b3534e15a8d277547eb2932c085bc7515eec", "date": "2022-05-10 06:52:56 -0400", "subject": "Fix typo." }, { "commit": "cc5b0614894e1b5ef78f1ceba3bc8e4760a40a24", "date": "2022-05-09 18:07:11 -0400", "subject": "Document required base branch for pull requests.", "body": "Be explicit when submitting a PR about which branch to use as the base." }, { "commit": "b4c1ca7b807a906d42d540307da3603052c4a674", "date": "2022-05-09 14:19:05 -0400", "subject": "Split 32-bit CI tests.", "body": "This helps rebalance some of the tests that are running long, i.e. d9 and u20.\n\nI would be better to move more PostgreSQL versions to d9, but the base VM does not contain more versions. New minor versions will be out later in the week so that seems a better time to be rebuilding containers." }, { "commit": "39dddbb6bc7d296fe25a7210468b2f51ea6b1816", "date": "2022-05-09 12:48:19 -0400", "subject": "Add limited CI for ppc64le/s390x using emulation.", "body": "The emulation is so slow that running all the unit tests would be too expensive, but this at least shows that the build works and some of the more complex tests run. In particular, it is good to test on one big-endian architecture to be sure that checksums are correct.\n\nUpdate checksums in the tests where they had gotten out of date since the last time we were testing on s390x. Also use a different test in command/archivePushTest to show the name of the file when a checksum does not match to aid in debugging.\n\nThe command/archive-push test was updated but not included because there is also a permissions issue, which looks to be the same as what we see on MacOS/FreedBSD. Hopefully we'll be able to fix all of those at the same time." }, { "commit": "eefa0b161a6a02b7843e18455443a4bce41cd4b9", "date": "2022-05-09 11:59:08 -0400", "subject": "Simplify messaging around supported versions in the documentation.", "body": "The version ranges given in the user guides caused confusion. For example, because the user guide for RHEL specified PostgreSQL 9.6-11, users questioned whether pgBackRest worked for PostgreSQL 12 on RHEL.\r\n\r\nRemove these ranges and add more explanatory text to the introduction to try and make it clearer how the user guides work and which versions are covered (basically all of them)." }, { "commit": "ef4c4ab8525f81c9ed22c7e82ed12096e81703c5", "date": "2022-05-09 10:39:43 -0400", "subject": "Use variable instead of function to track FINALLY() state.", "body": "The function worked fine, but Coverity was unable to determine that the finally block was run, which led to false positives about unfreed memory.\r\n\r\nUsing a boolean in the block makes it clear to Coverity that the finally block will always be run no matter what else happens.\r\n\r\nWe'll depend on the compiler to optimize away the boolean if it is not used in a finally block. The cost of the boolean is fairly low in comparison to everything else being done in these macros, so it does not seem worth having a separate block even if the compiler is not able to eliminate the boolean.\r\n\r\nThis reverts most of 9a271e9 that fixed a bug caused by c5b5b58, which was also attempting to help Coverity understand FINALLY() blocks." }, { "commit": "e8c40a24df14fcf281de672c19d0a55dc7cb50f6", "date": "2022-05-09 09:56:19 -0400", "subject": "Remove unnecessary TRY() block in common/regExp module.", "body": "This code was written before MEM_CONTEXT_TEMP*() was available, which is a better solution." }, { "commit": "4d8c36715ddd67c11d1ab166dcd0b4850631d83d", "date": "2022-05-06 19:44:46 -0400", "subject": "Remove legacy Travis-CI configuration.", "body": "Travis-CI is now strictly a paid service. Multiple attempts to use their \"free\" service have failed due to lack of community credit and general issues with their plugin.\n\nRemove the configuration so it does not appear we are testing on Travis-CI." }, { "commit": "46b7b7287446d0bb5ed0963fd01c51fb2b08893f", "date": "2022-05-06 18:23:36 -0400", "subject": "Add hint when unable to find the primary cluster.", "body": "If all available clusters are in recovery, pgBackRest will not be able to find a primary for the backup." }, { "commit": "53bfdbc01ece396c802022fea2216d3b9d7beaa1", "date": "2022-05-06 16:02:44 -0400", "subject": "Remove useless test in config/parse unit test.", "body": "Since the packSize field is 7 bits, it could never fail the check for > 127.\n\nThe compiler will catch any packs that are larger than 7 bits and then the pack size will need to be adjusted. For now just adjust the comment to reflect what the test does and give a clearer indication of what to do when a pack grows too large." }, { "commit": "77311a9af7870e79d7a3c9ecadc268c2364458c8", "date": "2022-05-06 15:38:03 -0400", "subject": "Fix indentation.", "body": "gcc11 complains about this indentation being misleading." }, { "commit": "efbcd975c4c5b4c29c937154e2b1e221aa3ea156", "date": "2022-05-06 13:56:58 -0400", "subject": "Update Docker test image for Debian 9.", "body": "A change invalidated the current image which has been causing the d9 test to run longer." }, { "commit": "68a410779ad92a91f6e8bfe504a9acd7fecd06f2", "date": "2022-05-06 12:32:49 -0400", "subject": "Add zNewFmt().", "body": "This replaces strZ(strNewFmt()), making the code simpler and reducing indentation." }, { "commit": "475e7c692d7e54b9417de74cb566c2ccc7158997", "date": "2022-05-06 12:11:04 -0400", "subject": "Clean up dividers in the documentation.", "body": "Dividers were used in some files, but not others, and some had section names (which are hard to maintain) and others did not.\n\nTry to make this more consistent by putting a divider on front of every section, variable block, and wherever else seems appropriate." }, { "commit": "356bc27bf200762148afb7b495b5a54efeea0159", "date": "2022-05-06 11:41:28 -0400", "subject": "Remove key dividers in help.xml.", "body": "The idea was to make this file easier to browse and edit, but in fact it is much easier to just search for the command/option needed.\n\nThe dividers were never applied consistently and at some point we decided to get rid of the comments because they were hard to keep updated. The result was a mix of styles which did nobody any favors." }, { "commit": "65d22e43257d46ff6523ffbdb3af59e30deeaea5", "date": "2022-05-06 11:11:36 -0400", "subject": "Add verify output and verbose options.", "body": "These options allow the user to control how the verify results will be output to the console and log." }, { "commit": "f405fc6ae281eebf4d61abb39963a9830fe2fa2f", "date": "2022-05-06 10:21:20 -0400", "subject": "Backup file bundling documentation.", "body": "Make the feature user visible and add documentation to the user guide." }, { "commit": "4cc0d46d606382e6b00493e73ee401ecfe5b9f3e", "date": "2022-05-06 09:34:39 -0400", "subject": "Fix comment wrapping." }, { "commit": "e70c71049ec1ff3bf1f3e6a1987c82d9b95bce82", "date": "2022-05-06 07:49:23 -0400", "subject": "Use uint8_t for optionResolveOrder.", "body": "This saves a bit of space and should not affect processing speed.\n\nOn MacOS (clang) this unexpectedly reduces the size of the binary by 16kiB but on Linux (gcc) there are no savings at all." }, { "commit": "808f7bf11c367a5be2130ea573c9447d8ea2dda8", "date": "2022-05-05 20:14:13 -0400", "subject": "Replace strNewFmt() with TEST_ERROR_FMT() in command/archive-push module.", "body": "This test was likely written before TEST_ERROR_FMT() existed." }, { "commit": "efe0a39a75ed40f5568b1f5f566fee84d76fe094", "date": "2022-05-05 20:01:02 -0400", "subject": "Use TEST_ERROR_FMT() rather than strNewFmt() in common/lock module.", "body": "These tests were likely written before TEST_ERROR_FMT() existed." }, { "commit": "5089a26633c965c4d9293d3de527a02b42d9b88d", "date": "2022-05-05 18:35:00 -0400", "subject": "Convert strNewFmt() to THROW_FMT() in config/parse module.", "body": "It's not clear why strNewFmt() was used here, but there is no need for it." }, { "commit": "876f3bbd1cd46af7f7d3dc2e9d454b77d51fff80", "date": "2022-05-05 18:15:05 -0400", "subject": "Remove COLON_STR and separator parameter from cfgParseCommandRoleName().", "body": "The separator parameter in cfgParseCommandRoleName() was useless since it was always set to : and COLON_STR did not provide any clarity its the single other usage." }, { "commit": "7ae5478d9840d8b64a1211466e49d2835853ee55", "date": "2022-05-05 12:09:21 -0400", "subject": "Remove most _Z constants.", "body": "Most of the time these were not making the code any clearer.\n\nFor cases where they were used to construct Strings and Buffers, replace with constants.\n\nAlso cleanup unused Buffers and Strings." }, { "commit": "a6b1adb5fd533b4b9aca29646c8acf647d872959", "date": "2022-05-05 11:15:14 -0400", "subject": "Remove extraneous linefeed when writing a lock file.", "body": "Linefeeds are no longer part of the lock file format." }, { "commit": "5f8c9cd66a1b39a47acaddfbc3cf443ae8057519", "date": "2022-05-05 10:19:11 -0400", "subject": "Add ClockError for unexpected clock skew and timezone changes.", "body": "A distinct result code should help debugging of clock skew and timezone issues." }, { "commit": "b6bfd9f99d74b1f5e631cf6462b68342589d2b80", "date": "2022-05-05 09:20:49 -0400", "subject": "Strip extensions from history manifest before showing in error message.", "body": "In cases where clock skew or timezone issues are preventing backup label generation the user could see an error like this:\r\n\r\nnew backup label '20220504-152308F' is not later than latest backup label '20220504-222042F_20220504-222141I.manifest.gz'\r\n\r\nThis will happen if the most recent label is drawn from the history. It is cleaner (and probably less confusing) to strip off the extensions so the user sees:\r\n\r\nnew backup label '20220504-152308F' is not later than latest backup label '20220504-222042F_20220504-222141I'" }, { "commit": "ef672c74adf871f17f6e14f5f0d6ab70e8fa3d52", "date": "2022-05-04 14:53:05 -0400", "subject": "Prevent memContextFree() from freeing memory needed by callbacks.", "body": "The order of callbacks and frees meant that memory needed during a callback (for logging in all known cases) might end up being freed before a callback needed it.\r\n\r\nRequiring callbacks and logging to check the validity of their allocations is pretty risky and it is not clear that all possible cases have been accounted for.\r\n\r\nInstead recursively execute all the callbacks first and then come back and recursively free the context. This is safer and it removes the need to check if a context is freeing so a simple active flag (in debug builds) will do. The caller no longer needs this information at all so remove memContextFreeing() and objMemContextFreeing()." }, { "commit": "d9088b2e2b16a89d29b967187a547976f96061fc", "date": "2022-05-04 12:52:05 -0400", "subject": "Show backup percent complete in info output.", "body": "In the JSON output the percent complete is storage as an integer of the percent complete * 100. So, before display it should be converted to double and divided by 100, or split using integer mod and div.\r\n\r\nNote that percent complete will only be displayed on the host where the backup was executed. Remote hosts will show a backup/expire running with no percent complete." }, { "commit": "20782c88bc8d0842d547bd5f4fecdb292e69d1f7", "date": "2022-05-04 11:55:59 -0400", "subject": "PostgreSQL 15 support.", "body": "PostgreSQL 15 drops support for exclusive backup and renames the start/stop backup commands.\r\n\r\nThis is based on the pgdg-testing repo since beta1 has not been released yet, but it seems unlikely that breaking changes will be made at this point. beta1 should be tagged just before our next release so we'll retest before the release." }, { "commit": "8e849ae85d6ad0ecbd1f34649be4a664712fd40c", "date": "2022-05-04 10:28:39 -0400", "subject": "Add PRs to thread locking on Github actions.", "body": "It also makes sense to lock old PRs. They can be manually unlocked if they are needed for some reason.\n\nAlso add output logging to make it easier to determine if thread locking is completing." }, { "commit": "09b387fccddb658ae93acc6ec8dc490155c4fdea", "date": "2022-05-04 09:24:35 -0400", "subject": "Move issue locking to Github actions.", "body": "The old plugin has been defunct for some time so there are currently a lot of unlocked issues.\n\nRunning this once per week seems sufficient for now. Worst case it can be run manually if it gets behind." }, { "commit": "692fe496bdb5fa6dcffeb9f85b6188ceb1df707a", "date": "2022-05-04 08:22:45 -0400", "subject": "Remove dependency on pg_database.datlastsysoid.", "body": "This column has been removed in PostgreSQL 15. Rather than add a lot of special handling, it seems better just to update all versions to not depend on this column.\r\n\r\nAdd centralized functions to identify the type of database (i.e. system or user) by name and use FirstNormalObjectId when a name is not available.\r\n\r\nThe new query in the db module will still return the prior result for PostgreSQL <= 15, which will be stored in the manifest. This is important to preserve behavior when downgrading pgBackRest. There are no concerns here for PostgreSQL 15 since older versions of pgBackRest won't be able to restore backups for PostgreSQL 15 anyway." }, { "commit": "302e0c09217a2c5c11145e84976b54677f7a544e", "date": "2022-05-03 16:53:29 -0400", "subject": "Remove extra linefeed." }, { "commit": "9a271e925c6239fbd3577f9827c34940e7e43ae4", "date": "2022-05-03 14:34:05 -0400", "subject": "Fix error thrown from FINALLY() causing an infinite loop.", "body": "Any error thrown resets execution to the last setjmp(), which means that parts of the try block need to make sure they don't get run again. FINALLY() was not doing this so if it threw an error it would end up back in the FINALLY() block, where the error would likely be thrown again, causing an infinite loop.\r\n\r\nFix this by tracking the state of FINALLY() and only running it once. This requires cleaning the error stack like CATCH*() and clearing the error like TRY_END() depending on the order of execution." }, { "commit": "b89c568b5fefd1ea0e8319402bab0f4fba6bfa2e", "date": "2022-05-03 10:50:48 -0400", "subject": "Fix obsolete variable naming." }, { "commit": "962990869454b15db058f4037dbc0b1b611fc95e", "date": "2022-05-03 10:13:32 -0400", "subject": "Error on all lock failures except another process holding the lock.", "body": "The archive-get/archive-push commands would not error for, .e.g permissions errors, when attempting to get a lock before launching the async process. Since the async process was not launched there would be no error status file and the user would get a generic failure message. Also, there would be no async log.\r\n\r\nRefactor lockAcquireFile() to throw an error when failOnNoLock = false unless the file is locked by another process. This seems to be the original intent of this parameter and there may have been a mistake when porting from Perl. In any case it looks wrong enough to be considered a bug." }, { "commit": "eb435becb3e4d3dd8ea01b0e8899654a268d3867", "date": "2022-05-02 15:17:34 -0400", "subject": "Exclude mem context name from production builds.", "body": "The mem context name is used to produce clearer debug errors but it has no purpose in production builds.\n\nAlso remove memContextName() and access the struct directly since the name is only used within the common/memContext module.\n\nNote that a few errors that were thrown in production builds (and required the name) are now only thrown in debug builds. In practice we have not seen these errors in production builds due to extensive coverage so it does not seem worth modifying the error to work without the context name.\n\nThis saves some memory, which is worthwhile, but the goal is to refactor Strings and Variants to have their own mem contexts and this change will prevent them from using more memory than they are now, along with other changes that will be coming later." }, { "commit": "0055fa40fee2636fed88699f52e9e67cd2803f71", "date": "2022-05-02 09:45:57 -0400", "subject": "Add user:group to lock permission error.", "body": "This will help debug permissions errors when the lock file cannot be created." }, { "commit": "03c71aa606bc0dc89303b3907e7fcbd83ace79d6", "date": "2022-05-02 08:49:13 -0400", "subject": "Add hint to check the log on archive-get/archive-push async error.", "body": "If this error is thrown rather than a specific error returned from the async process, it means the async process is unable to write the status files for some reason and the only way to get the error is out of the async log.\n\nThis hint includes the exact async log path and name to make finding errors easier." }, { "commit": "126fc99c77998cc65094d92e955347100aa42531", "date": "2022-04-28 14:10:53 -0400", "subject": "Fix leaks in common/json, common/keyValue, and common/variantList.", "body": "This doesn't solve the problem of the variant code making far too many copies, but it at least plugs the leaks.\n\njsonReadVarRecurse() could leak KeyValue and VariantList.\n\nkvDup() leaked object allocations into the calling context.\n\nkvDefault() gets a more efficient return structure.\n\nkvGetList() leaked a Variant into the calling context.\n\nvarLstNewStrLst() leaked object allocations into the calling context. Update varLstDup() to reflect changes made in varLstNewStrLst()." }, { "commit": "96166539cf7df010f9077cd624f24fd77e5a4feb", "date": "2022-04-28 13:33:03 -0400", "subject": "Fix leaked String in cfgParseSize()." }, { "commit": "4872a3f121e71f79a29dd63acad34b7c3537c775", "date": "2022-04-28 12:33:39 -0400", "subject": "Improvements to test harness memory debugging.", "body": "Only set -DDEBUG_MEM for the modules currently being tested rather than globally.\n\nAlso run tests in a temp mem context. Running in the top context can confuse memory accounting when a new context is created in the top context." }, { "commit": "90f939b36f8963272cdfe8d0ed5a50606990ff5e", "date": "2022-04-28 12:31:59 -0400", "subject": "Fix leaks in common/io unit test.", "body": "These leaks make it harder to detect leaks in the core code, so fix them." }, { "commit": "8047e97e31fb133551ad69c7fd170a8a96d188d6", "date": "2022-04-28 12:17:33 -0400", "subject": "Fix leaked String and Variant in harnessPqScriptRun()." }, { "commit": "ceb303f9e2f47d9818919dfd0127e558f2d83495", "date": "2022-04-28 11:46:55 -0400", "subject": "Fix comment typo." }, { "commit": "c463993b4cc96a92c61af298bc95d88129068902", "date": "2022-04-28 11:41:16 -0400", "subject": "Fix leaks in the storage/s3 module.", "body": "storageS3Helper() leaked a few Strings which ended up in a long-lived context.\n\nstorageS3AuthAuto() and storageS3AuthWebId() were cleaned up by their callers but since they are not called often a temp mem context seems better.\n\nstorageS3Request() leaked an HttpRequest.\n\nstorageS3Info() leaked an HttpResponse.\n\nstorageS3PathRemoveInternal() leaked a variety of objects. Fix by freeing some of them and adding a temp mem context.\n\nstorageS3Remove() leaked an HttpResponse object.\n\nstorageWriteS3Part() leaked an HttpResponse object." }, { "commit": "4750bc94dd8279b166233463ff32f2f51fb67e18", "date": "2022-04-28 11:20:31 -0400", "subject": "Fix leaks in the storage/remote module.", "body": "storageRemoteFilterGroup() leaked a number of objects. Use a temp mem context to prevent that.\n\nstorageRemoteProtocolInfoListCallback() leaked a PackWrite.\n\nstorageWriteRemoteFreeResource() leaked a PackWrite." }, { "commit": "c123a6af9f5aee4baea1d9ab202e03df6de319e3", "date": "2022-04-28 11:19:01 -0400", "subject": "Fix leaks in the storage/posix module.", "body": "storagePosixPathCreate() leaked a String.\n\nstoragePosixPathRemoveCallback() leaked a String." }, { "commit": "d89bc6f2d241ae864cb9dcae2f5c6fc3a0e655ee", "date": "2022-04-28 10:53:11 -0400", "subject": "Fix leaks in the storage/gcs module.", "body": "storageGcsAuthToken() memory was being cleaned up by the calling context, but seems better to keep this tidy and add a temp mem context.\n\nstorageGcsRequest() leaked an HttpRequest.\n\nstorageGcsInfo() leaked a number of objects. Use a temp mem context to prevent that.\n\nstorageGcsPathRemoveCallback() leaked an HttpResponse.\n\nstorageGcsRemove() leaked an HttpReponse.\n\nstorageWriteGcsVerify() leaked a number of objects. Use a temp mem context to prevent that.\n\nstorageWriteGcsBlock) leaked an HttpReponse." }, { "commit": "083c93eaa3d54d4594048b37542276fc6f6665fb", "date": "2022-04-28 10:11:15 -0400", "subject": "Reuse Strings in iniLoad().", "body": "Reuse the section/key/value Strings by truncating them instead of creating a new one every time.\n\nAlso add an error for empty sections. This function is only used for loading info files (not config files), which should never contain an empty section." }, { "commit": "bc46d4e37b8c324d30543147eaeb8058184d3543", "date": "2022-04-28 09:50:23 -0400", "subject": "Add cvtZSubNTo*() functions.", "body": "These functions allow conversion from substrings without needing to create a String or a temporary buffer.\n\nhttpDateToTime() no longer requires a temp mem context. Also improve handling of month search to avoid an allocation.\n\nhttpUriDecode() no longer requires a temp mem context.\n\njsonReadStr() no longer requires a temp mem context.\n\npgLsnFromWalSegment() no longer requires a temp mem context.\n\npgVersionFromStr() no longer requires a temp mem context. Also do a bit of refactoring.\n\nstorageGcsCvtTime() no longer leaks six Strings per call.\n\nstorageS3CvtTime() no longer leaks six Strings per call." }, { "commit": "6e18235be8b56eaedd940d7382e09e52ba22ffe9", "date": "2022-04-28 06:52:07 -0400", "subject": "Fix leak in varLstNewStrLst().", "body": "The duplicated list elements were leaked into the calling context." }, { "commit": "aeb7568a0ffeba72f254e46d2aa5d7bcca7c9a19", "date": "2022-04-28 06:48:02 -0400", "subject": "Remove unused header." }, { "commit": "5285b9aa7bdda8c160adde2b7d64e217fd2be123", "date": "2022-04-27 20:18:18 -0400", "subject": "Fix incorrect test param." }, { "commit": "eb65a5674dfa84d6b516490ef8796d44934bb0ef", "date": "2022-04-27 11:40:29 -0400", "subject": "Use OBJ_NEW*() macros in SocketServer object.", "body": "This was missed in ccc255d3 when the TLS server was introduced, probably because work on that commit preceded when the macros were introduced in 475b57c8. It would have been easy to miss in a merge." }, { "commit": "6f2654a5eb0e2cb63a4ff3af187f991817189dca", "date": "2022-04-26 12:53:55 -0400", "subject": "Fix leaks in the storage/azure module.", "body": "storageAzureHelper() leaked a few Strings which ended up in a long-lived context.\n\nstorageAzureNew() failed to make a copy of the endpoint. This worked because storageAzureHelper() leaked the endpoint into the long-lived parent context.\n\nstorageAzureRequest() leaked an HttpRequest.\n\nstorageAzureInfo() leaked an HttpResponse.\n\nstorageAzurePathRemoveCallback() leaked an HttpResponse.\n\nstorageAzureRemove() leaked an HttpResponse." }, { "commit": "be120c746c403b878b6bc7df83b4ffbf6e513f72", "date": "2022-04-26 12:27:51 -0400", "subject": "Refactor and fix leak in storageRepoPathExpression().", "body": "storageRepoPathExpression() could leak a StringList. Also refactor to remove unneeded assignments and create the String one time." }, { "commit": "cca6df872a7861de18f7e69e1df5183acd504681", "date": "2022-04-26 12:09:44 -0400", "subject": "Refactor functions in postgres/interface module and fix leak.", "body": "pgLsnFromWalSegment() leaked two Strings.\n\nRefactor pgLsnRangeToWalSegmentList() to create the StringList in the calling context rather than moving it later." }, { "commit": "a56fa0eb45871dff8b6362d9298290a52c7e6cd9", "date": "2022-04-26 11:59:21 -0400", "subject": "Fix leaks in protocol module.", "body": "These leaks were not a big deal individually and there are generally few protocol objects created, but the leaks ended up in mem contexts that persist for most of execution. It makes sense to keep long-lived contexts as tidy as possible." }, { "commit": "78e912a9320770122808384ae202b89fa6236e89", "date": "2022-04-26 11:20:51 -0400", "subject": "Fix leaks in info module.", "body": "*LoadFileCallback() all leaked between retries.\n\ninfoPgArchiveId() leaked a String." }, { "commit": "36b0a9fa58f4f248fdabf170235f9f624d828d56", "date": "2022-04-26 11:07:05 -0400", "subject": "Fix leaks in db module.", "body": "dbFreeResource() leaked ProtocolCommand.\n\ndbTimeMSec() leaked PackRead." }, { "commit": "78b90e5ad80f87e0c1167701ac40504dc888d0dd", "date": "2022-04-26 11:03:06 -0400", "subject": "Fix leak in configOptionRemote().", "body": "A Pack and String were leaked." }, { "commit": "9a6df398393c116c73dbbf459073271d84c45bec", "date": "2022-04-26 11:00:20 -0400", "subject": "Fix spacing." }, { "commit": "8efb4e1e7f8d806f4688c54052f2325c21f470a9", "date": "2022-04-26 10:58:12 -0400", "subject": "Fix leak in cfgCommandJobRetry().", "body": "A Variant was leaked for each retry record.\n\nAlso remove an extra linefeed." }, { "commit": "a8fed52ecb653229298682280b179fdf92438cdb", "date": "2022-04-26 10:53:13 -0400", "subject": "Fix leak in pckReadStrLst().", "body": "A String would be leaked on each loop iteration." }, { "commit": "d7e45f12a54ea186e66824b9127478eaa1355d32", "date": "2022-04-26 10:49:04 -0400", "subject": "Refactor httpRequestNew().", "body": "Move httpRequestProcess() outside of the object mem context. In case it ever returns a value we don't want that to end up in the object context." }, { "commit": "cd1cf337ff8011f01657ab1baca31927d5d2343d", "date": "2022-04-26 10:45:30 -0400", "subject": "Fix leak in httpHeaderPutRange().", "body": "The range string was leaked." }, { "commit": "07e280199723eef35563ec71ff0c20687d642d9b", "date": "2022-04-26 10:39:28 -0400", "subject": "Fix leak in ioFilterGroupResultAllSet().", "body": "The PackRead containing the filter results was leaked." }, { "commit": "3310decf8e4b09293e3f6b7de7e0212323ad0c81", "date": "2022-04-26 10:36:27 -0400", "subject": "Add missing FUNCTION_TEST*() macros." }, { "commit": "7eed9730aa8af91999e542394d05ef53c1cc7e36", "date": "2022-04-26 10:34:10 -0400", "subject": "Fix leak in cipherBlockNew().", "body": "The string used to look up the cipher type was leaked." }, { "commit": "6eed4125e6297bc6e80c9c11f839ba65979293ee", "date": "2022-04-26 10:29:48 -0400", "subject": "Improved memory management in the common/exit module.", "body": "Refactor so that error detail is only logged in one place. This reduces calls to exitErrorDetail() and LOG_INTERNAL_FMT().\n\nFix minor leaks in exitErrorDetail() and exitSafe()." }, { "commit": "fa6c68cb029e675d9f4b3e4fa48862b6644ecc78", "date": "2022-04-26 10:18:24 -0400", "subject": "Mem allocation cleanup in command/verify module.", "body": "Move the temp mem context out of verifyJobCallback() into verifyBackup() and verifyArchive(). This makes it clearer that verifyJobCallback() allocates no memory and reduces mem usage when both verifyBackup() and verifyArchive() are called.\n\nUpdate verifyErrorMsg() to return zero-terminated strings to save on allocations. The output of this function is used when formatting strings so this is also simpler. Do a similar thing in verifyRender().\n\nAlso fix a minor leak in verifyInfoFile()." }, { "commit": "3f7c8bc9230acaf2dfb0a013303559b54a54c26f", "date": "2022-04-26 10:15:47 -0400", "subject": "Fix object allocations in incorrect mem context in execOpen().", "body": "Object variables were begin allocated in the calling context rather than the object context.\n\nThis is not a live bug because Exec objects are currently created and opened in a long-lived context." }, { "commit": "4dbe76a5bb6e5aae779b2538ac9ce54782f46b8f", "date": "2022-04-26 07:33:57 -0400", "subject": "Fix comment formatting." }, { "commit": "41f9d69edcf671fbda21cb6d87dbca08e48e9976", "date": "2022-04-25 15:38:49 -0400", "subject": "Combine functions in the command/stanza module into one function.", "body": "It is not clear why these were split out, but it probably had something to do with testing before storageList() could return NULL for an empty directory.\n\nAlso remove the tests that depended on a boolean return, which are no longer needed for coverage." }, { "commit": "55a828f999f6fc8a975ea5c2ea9710e2109a5c0a", "date": "2022-04-25 15:03:37 -0400", "subject": "Add temporary mem contexts and fix a leak in the command/restore module.", "body": "restoreRecoveryConf() and restoreRecoveryWriteConf() do enough work to deserve their own memory contexts.\n\nrestoreFilePgPath() was leaking a String every time it was called, which could be a lot.\n\nAlso fix a spacing issue." }, { "commit": "4e7414d48fb3cca45ca66b3eb853a08b6daf2833", "date": "2022-04-25 14:49:08 -0400", "subject": "Add temporary mem contexts in the command/help module.", "body": "These were not really leaks since memory was being freed by the calling function, but these functions do enough work to deserve their own memory contexts." }, { "commit": "fd295f002b720dc1acc5d9523bd442f372394d15", "date": "2022-04-25 14:19:10 -0400", "subject": "Add temporary mem context to removeExpiredBackup().", "body": "This was not really a leak since memory was being freed by the calling function, but this function does enough work to deserve its own memory context.\n\nAlso fixed a doubled semicolon." }, { "commit": "9314be36b161da8128cc4149372f41b89087e844", "date": "2022-04-25 14:14:29 -0400", "subject": "Improve result declaration in backupRegExp().", "body": "No need to set this to NULL when the initial value can be used instead." }, { "commit": "5b5dbe0e7e4d3b3f412ea869487c3018c7a8d67c", "date": "2022-04-25 13:29:43 -0400", "subject": "Fix memory leaks in archivePushDrop().", "body": "A string was leaked for every file processed.\n\nSince the list can be quite large it also makes sense to reset the context occasionally." }, { "commit": "c3b08f71ce03e2b4103f12e1d7c5c05701f02ef3", "date": "2022-04-25 13:26:09 -0400", "subject": "Fix formatting in common/string module." }, { "commit": "40ef64f2be60fb947cf68aae2bb7d510ab36c8ec", "date": "2022-04-25 12:59:46 -0400", "subject": "Fix memory leaks in archivePushFile().", "body": "The errorList is only used when throwing an error and the joined list is not needed after the error is thrown, so put both in the temp mem context." }, { "commit": "774db650868c86c791f2791d02bee129a7669be5", "date": "2022-04-25 12:55:51 -0400", "subject": "Remove useless context switches in archiveGetFile()/archivePushFile().", "body": "These context switches do nothing since the list is already in the prior context." }, { "commit": "582c3dab4cff6883150c9c5de7f7b41d12982da9", "date": "2022-04-25 12:32:33 -0400", "subject": "Add strLstAddSub*() and strLstAddSubZ*() functions.", "body": "These help with readability and remove a cause of leaks." }, { "commit": "ff45f463cf4d4521df5fd69b48bd553d862f0dc1", "date": "2022-04-25 11:58:30 -0400", "subject": "Use strLstAddZ() instead of strLstAdd() where possible.", "body": "Using STRDEF() to convert the zero-terminated string to a String has no performance advantage but generates more code." }, { "commit": "7900660d3a12573db8a68bc7df46d7552dae54d1", "date": "2022-04-25 11:47:43 -0400", "subject": "Add strLstNewFmt().", "body": "Simplifies adding a formatted string to a list and removes a common cause of leaks." }, { "commit": "3475514b61fec52a5bd9e7fa73d38d5be612a9ad", "date": "2022-04-25 10:57:36 -0400", "subject": "Fix memory leak in archiveAsyncErrorClear().", "body": "A string was leaked on each call." }, { "commit": "23d645b5e7f1069296f1a51165412049e1dccbdc", "date": "2022-04-25 10:52:13 -0400", "subject": "Fix outdated comment." }, { "commit": "699f15dd2b227ad13ba313fa549c32938bd6cf2b", "date": "2022-04-25 09:24:00 -0400", "subject": "Remove THIS_MEM_CONTEXT() macro.", "body": "objMemContext(this) performs the same task and is easier to read.\n\nMost instances of this macro were removed by 6e7be3c0." }, { "commit": "6e7be3c0525782cb58e0eb5046f1385e4eb8b1df", "date": "2022-04-25 09:12:25 -0400", "subject": "Add MEM_CONTEXT_OBJ_*() macros.", "body": "These provide a standard way to switch to an object mem context.\n\nUpdate the two different patterns that were used before to the new macros." }, { "commit": "45c3f4d53c7e5198d4d49109398326fecd282bec", "date": "2022-04-25 09:06:26 -0400", "subject": "Improve JSON handling.", "body": "Previously read/writing JSON required parsing/render via a variant, which add many more memory allocations and loops.\r\n\r\nInstead allow JSON to be read/written serially to improve performance and simplify the code. This also allows us to get rid of many String and Variant constant which are no longer required.\r\n\r\nThe goal is to be able to read/write very large (e.g. gigabyte manifest) JSON structures, which would not be practical with the current code.\r\n\r\nNote that external JSON (GCS, S3, etc) is still handled using variants. Converting these will require more consideration about key ordering since it cannot be guaranteed as in our own formats." }, { "commit": "58f24568f5bf578e8ad0f5e4866bf2ede8b45d46", "date": "2022-04-25 08:25:48 -0400", "subject": "Add ASSERT_PARAM() macro.", "body": "Allows adding a parameter to a function that is used only for assertions." }, { "commit": "1e2b545ba47d66760cf4c6df65c6325bb6404bb2", "date": "2022-04-24 19:19:46 -0400", "subject": "Require type for FUNCTION_TEST_RETURN*() macros.", "body": "This allows code to run after the return type has been generated in the case where it is an expression.\n\nNo new functionality here yet, but this will be used by a future commit that audits memory usage." }, { "commit": "91bbe810592c8b57079230d0bf15720d74c705fa", "date": "2022-04-22 09:14:12 -0400", "subject": "Add SIZE_OF_STRUCT_MEMBER() macro.", "body": "Gets the size of a struct member, which requires a bit of additional syntax." }, { "commit": "a2eee156b5a85237d9b9ba7896b237ec4f216da0", "date": "2022-04-21 18:23:17 -0400", "subject": "Fix instances where STRDEF() was used instead of STR().", "body": "In practice this didn't cause problems because the string buffer was still valid and strSize() was not being called." }, { "commit": "627921c72ab53a88519902c17450cedfa9dbda7a", "date": "2022-04-21 08:35:31 -0400", "subject": "Add STR_SIZE() macro.", "body": "This allows efficiently creating strings where the length is already known and simplifies the STR() and STRDEF() macros." }, { "commit": "1ce613a24d4de1c219d2b7d844cf7e88649887fc", "date": "2022-04-21 08:34:03 -0400", "subject": "Use STRDEF() instead of STR() in command/help module.", "body": "STRDEF() is more efficient since this is a constant string." }, { "commit": "4daddebaca6b4b1861778bc2b09d265302c05ed1", "date": "2022-04-21 08:07:22 -0400", "subject": "Add ASSERT_DECLARE() macro.", "body": "Declare variables that will be used by later assertions with the goal of making them easier to read and maintain.\n\nThis is particularly useful for variables that are used more than once and require a lot of syntax to extract." }, { "commit": "e18b70bf555de24ba38d7c51f9d0ffbe8a02a355", "date": "2022-04-21 07:45:59 -0400", "subject": "Allow *RETURN*() macros to accept struct initializers.", "body": "Struct initializers look like multiple parameters in a macro so use __VA_ARGS__ to reconstruct them." }, { "commit": "ea4d73f375558c237fce696f20c19d1efef00bc1", "date": "2022-04-20 19:56:26 -0400", "subject": "Fix ordering of backup-lsn-stop field in command/restore unit test.", "body": "All fields should be alphabetical. Currently the read code is tolerant of this, but that will not always be the case.\n\nFields are always written alphabetically so this is just a test issue introduced by d8d41321." }, { "commit": "cb7a5f1ef3bbfe0a26e3cd718d9635280f2e9b83", "date": "2022-04-20 19:49:23 -0400", "subject": "Add JSON error when value does not parse in Ini object.", "body": "If the JSON value fails to parse it is helpful to have the error message, at least for debugging." }, { "commit": "da6b4abc58674902196fe549586c16094dc37e38", "date": "2022-04-20 19:41:28 -0400", "subject": "Handle missing archive start/stop in info/info backup unit test.", "body": "This is not a very realistic case since archive start/stop are always written, but it appears in many other unit tests so it should also be tested here." }, { "commit": "d897bf1ec2c74e46c56c31c6ba2ca910394ce0e5", "date": "2022-04-20 19:36:33 -0400", "subject": "Add size to info/manifest unit test.", "body": "This prevents the check from being order dependent." }, { "commit": "d6f466be2bd1b68d45f109f0332783b784838ab7", "date": "2022-04-20 19:31:35 -0400", "subject": "Remove unnecessary mem contexts in the Manifest object.", "body": "The correct context is set by the various *Add() functions so these are not needed and cause leaks, though the leaks will only be noticeable in cases where there are a lot of page checksum errors." }, { "commit": "da9f2618528e9bd4b5538ff81cc359172a36af7b", "date": "2022-04-20 14:09:49 -0400", "subject": "Add FUNCTION_TEST_NO_RETURN() macro.", "body": "This is required for the (currently) single place where a function with test FUNCTION_TEST*() macros does not return.\n\nThis allows return to be added to the FUNCTION_TEST_RETURN_VOID() macro, which means return no longer needs to be added when returning from a function early." }, { "commit": "c304fafd456f67d11b0e9e7a8f399732716cf216", "date": "2022-04-20 08:36:53 -0400", "subject": "Refactor PgClient to return results in Pack format.", "body": "Packs support stronger typing than JSON and are more efficient. For the small result sets that we deal with efficiency is probably not very important, but this removes another place where we are using JSON instead of Pack.\r\n\r\nPush checking for result struct (e.g. single row) down into PgClient since it has easy access to this information rather than needing to parse the result set to find out.\r\n\r\nRefactor all code downstream that depends on PgClient results." }, { "commit": "e699402f99f70819bd922eb6150fbe1b837eca0d", "date": "2022-04-19 18:07:11 -0400", "subject": "Remove extra linefeed." }, { "commit": "b7fccaf9949f15ea1a00abd434e765c9c4d3e2f1", "date": "2022-04-18 14:08:53 -0400", "subject": "Refactor remote storage protocol to use Packs instead of JSON.", "body": "Packs are more efficient and strongly typed so they make more sense for the protocol." }, { "commit": "cfd6c7ceb490d8c2766fd91a4719bdf9f6e4f110", "date": "2022-04-18 12:14:22 -0400", "subject": "Use specific integer types in postgres/client and db unit tests.", "body": "This will work better once we are able to transmit the results with stronger typing.\n\nAlso remove int2 which was not being used." }, { "commit": "9751ddc4f80928de4b42bc56c54731afa4ff16aa", "date": "2022-04-18 11:53:31 -0400", "subject": "Update postgres/client unit test to conform to current patterns.", "body": "This includes adding test titles and using constants for query and error values that repeat." }, { "commit": "bc5f6fac34fefd0595eee87f250dc9abef06d191", "date": "2022-04-18 10:47:44 -0400", "subject": "Update postgres/client unit test for changes in libpq.", "body": "There have been some behavioral changes in libpq which require changes to the test.\n\nAlso update the instructions since it is now a bit easier to run against a real cluster." }, { "commit": "214ee9eb0e8398b1871a53441f4b926eb194d765", "date": "2022-04-17 09:41:22 -0400", "subject": "Fix URL for apt.p.o archives.", "body": "A new archive repo was created in March of 2020: https://www.df7cb.de/blog/2020/apt-archive.postgresql.org.html" }, { "commit": "d103dd6238dffb5ddc3d5366c674278a67426f25", "date": "2022-04-14 20:34:42 -0400", "subject": "Return stats as a JSON string rather than a KeyValue object.", "body": "There is no need to process the stats so a KeyValue is overkill.\n\nAlso remove the performance tests that check the stat totals since this is covered in the unit tests." }, { "commit": "e1ce731f8ae5d1aea009bc369fd157cddb44e059", "date": "2022-04-14 19:37:03 -0400", "subject": "Add test for protocol greeting when a field is missing.", "body": "A missing field and a NULL field are not exactly the same so it seems best to test both.\n\nBecause of the way KeyValue objects work the error is the same, but that will not always be true." }, { "commit": "8a29d56f3cbf37166f3722ac7ffe18841e9e57f3", "date": "2022-04-14 19:18:00 -0400", "subject": "Update comment in pack module to cover a more common use case.", "body": "The KeyValue object is actively being removed so this is no longer the best example.\n\nInstead use an example that should outlive the KeyValue object." }, { "commit": "aeecd07ad87fafb34087f0df9d7c7413f749194c", "date": "2022-04-14 18:29:54 -0400", "subject": "Fix reported error line number when ini key length is zero.", "body": "The line number was one less than it should have been, which could cause some confusion.\n\nSince this only affected ini files with JSON values, which are always written programmatically, there is almost zero chance this has ever been a problem in the field." }, { "commit": "47f8e11889ab8c05e1e548c07e6a338b69745c86", "date": "2022-04-14 08:13:39 -0400", "subject": "Update FreeBSD and MacOS images for Cirrus CI.", "body": "This fixes fe1ac210. Apparently FreeBSD 12.2 went EOL and the image was immediately broken.\n\nAlso add FreeBSD 13.0 and update MacOS to Monterey." }, { "commit": "bcbac8a067e84a48e0849c5336f21044fec89df5", "date": "2022-04-11 17:54:18 -0400", "subject": "Add static keywords.", "body": "This is more efficient than pushing these variables onto the stack." }, { "commit": "fa40bcdc5cf9e6fc6f46236df258d3fa536ccdc7", "date": "2022-04-11 14:08:16 -0400", "subject": "Throw error when unable to read lock process.", "body": "Previously the process id was skipped if it did not exist. Instead, throw an error and handle the errors in downstream code.\r\n\r\nThis was probably ignored at some point to provide backward-compatibility, but that is no longer required, if it ever was." }, { "commit": "fe1ac210bba9439d9a4baa4adbadd1413c61e2f5", "date": "2022-04-11 13:17:54 -0400", "subject": "Disable FreeBSD builds on Cirrus CI.", "body": "This build has started breaking with the following error:\n\ncd .. && perl ${CIRRUS_WORKING_DIR}/test/test.pl --no-gen --make-cmd=gmake --vm=none --vm-max=2 --no-coverage --no-valgrind --module=command --test=backup\n2022-04-11 17:11:53.034 P00 INFO: test begin on amd64 - log level info\n2022-04-11 17:11:53.107 P00 INFO: configure build\nld-elf.so.1: /usr/local/lib/perl5/5.32/mach/CORE/libperl.so.5.32: Undefined symbol \"strerror_l@FBSD_1.6\"\n\nDisable the build to unstick the pipeline until this can be fixed." }, { "commit": "15021a0e976c1a8eadebf642dfff293df0fed54a", "date": "2022-04-09 18:29:57 -0400", "subject": "Fix whitespace." }, { "commit": "79b204166394295e40e7ceddc359368c5770db26", "date": "2022-04-08 15:55:41 -0400", "subject": "Add lockRead*() functions for reading locks from another process.", "body": "Sometimes we need to read a lock from another process. This was done two different ways and in the case of cmdStop() was definitely hacky.\r\n\r\nCentralize the logic to make it easier to read the locks for another process. This will also make it easier to add new lock data." }, { "commit": "aad7171940afdb221a03825bee6eb2f1827289f1", "date": "2022-04-08 15:00:20 -0400", "subject": "Suppress existing WAL warning when archive-mode-check is disabled.", "body": "When archive-mode-check is disabled and archive-push is running from multiple hosts, it is very likely that the file will already exist with the same checksum, so disable the warning.\r\n\r\nHowever, if the checksums do not match, an error will still be thrown." }, { "commit": "4f543a4d67e0c18c8eccdd864a32003d45df7426", "date": "2022-04-08 11:07:26 -0400", "subject": "Handle NULL path in TEST_STORAGE_LIST when remove is specified.", "body": "Using the path variable directly resulted in a path with (null) in it, which caused the remove to fail.\n\nThe pathFull variable already exists for this purpose so use it." }, { "commit": "571dceefec5c64da2b2d79dd7e471fe0262b916a", "date": "2022-04-07 19:00:15 -0400", "subject": "Add LENGTH_OF() macro.", "body": "Determining the length of arrays that could be calculated at compile time was a bit piecemeal, with special macros used sometimes and with the math done directly other times.\n\nThis macro makes the task easier, uses less space, and automatically adjusts when the type changes." }, { "commit": "8be11d32e43bc889c4889b28f88fe20a0c4cfd60", "date": "2022-04-07 11:44:45 -0400", "subject": "Replace strCatFmt() with strCat()/strCatZ() where appropriate.", "body": "Most of these looked like copy/paste from a prior required strCatFmt() call.\n\nThere is no issue here since strCatFmt() works the same in these cases, but using strCat()/strCatZ() is more efficient." }, { "commit": "cff147a7d257e270df8fee184fdc3684347be330", "date": "2022-04-06 14:45:51 -0400", "subject": "Add default for boolean options with unresolved dependencies.", "body": "If a boolean option had an unresolved dependency then the value would be NULL, which meant the dependency would need to be checked in the code to avoid an error. For example, cfgOptionBool(cfgOptOnline) needed to be checked before it was safe to call cfgOptionBool(cfgOptArchiveCheck).\r\n\r\nAllow a default for boolean options when they are unresolved to simplify the code. This makes using the options easier and less prone to error. Not all boolean options get a dependency default in this commit, but more may be added in the future." }, { "commit": "acc9f3b72a481f64f5362d862959f300eb2aa63b", "date": "2022-04-06 10:04:08 -0400", "subject": "Move cfgParseOptionalFilterDepend() and add comment block." }, { "commit": "5dba0d6e9b9129c3cb7f3a4bc46da57544f36e73", "date": "2022-04-05 18:42:19 -0400", "subject": "Set option-archive-copy flag in backup.manifest to false when offline.", "body": "In offline mode the pg_wal directory is copied, but that is not the same as archive-copy, which copies the exact set of WAL required from the archive.\n\nThis flag is purely for informational purposes so there is no live bug here, but the prior behavior was certainly misleading." }, { "commit": "54b4187527287bf3dd8ae158329f0feb44eebed8", "date": "2022-04-05 13:14:42 -0400", "subject": "Show Docker output when building containers if --log-level=detail.", "body": "This helps with debugging and monitoring container builds." }, { "commit": "d8d4132118edd7f93a5b221478c85c2d213c39dd", "date": "2022-04-05 11:59:12 -0400", "subject": "Auto-select backup for restore command --type=lsn.", "body": "For PITR with --type=lsn, attempt to auto-select the appropriate backup set based on the --target LSN provided. Pick the most recent backup where backup-lsn-stop is less than or equal to the provided LSN." }, { "commit": "08d9e269c6f03e7797584e90c53eb0743b787f60", "date": "2022-04-05 09:14:56 -0400", "subject": "Refactor target type checking for clarity in restore module.", "body": "This avoids using targetTime != 0 as an indicator that a time target was selected." }, { "commit": "f5fdab2989140630a62a4ef0d4c7fc35d789f967", "date": "2022-03-31 17:09:50 +0200", "subject": "Fix typo in db-timeout help." }, { "commit": "c222ce1a5f6f813188b7e87a3838d6dce656e0ff", "date": "2022-03-25 08:52:41 -0600", "subject": "Fix tls-server-auth example and add clarifications." }, { "commit": "d4954fcbf49790475d26c90e951ac76660cef1e9", "date": "2022-03-25 07:52:16 -0600", "subject": "Fix comment typo." }, { "commit": "f60ec5055a633308422797902e9487085c862850", "date": "2022-03-24 18:43:43 -0600", "subject": "Cleanup output to stderr in unit tests.", "body": "The unit tests were ignoring stderr but nothing being output there was important. Now a test will fail if there is anything on stderr.\n\nThis makes it easier to work with -fsanitize, which outputs to stderr." }, { "commit": "50ee4b19fec0b6bc888e4ff6603166365ce88233", "date": "2022-03-24 17:55:38 -0600", "subject": "Align checksum page error list in manifest file packs.", "body": "This was left unaligned on purpose to save space but the sanitizer did not like it. Since this field is seldom used go ahead and align it to make the sanitizer happy.\n\nAlso add some macros to make working with alignment easier.\n\nFound with -fsanitize=undefined." }, { "commit": "3dd796045160c4d4dbf70ec2800ad483867629f1", "date": "2022-03-24 15:59:41 -0600", "subject": "Fix incorrect reference to stanza-update in the user guide.", "body": "This should be stanza-upgrade. Also fix in the git history cache since the comment was copied from the user guide." }, { "commit": "14016a86e7d37030ae934a004cad70888b539b33", "date": "2022-03-24 13:13:35 -0600", "subject": "Check that sha1 checksum is not empty in manifestFileUpdate().", "body": "The manifest test module was setting a blank value here and causing a stack overflow because memcpy() is used instead of strcpy().\n\nThis was really just a test issue but add an assert just in case the same were to happen in production code.\n\nAlso update a bogus checksum in the integration tests to the correct length to avoid running afoul of the assert.\n\nFound with -fsanitize=address." }, { "commit": "4e5ac11517022f445f4ec078cc30bd2ce3aa6d15", "date": "2022-03-24 12:56:26 -0600", "subject": "Fix incorrect struct type in list initialization.", "body": "This looks like a copy-paste error.\n\nThe code is only run during development so this is not a live issue.\n\nFound with -fsanitize=address." }, { "commit": "75b26319aea4493d8b9624a61795f8128d50c3b5", "date": "2022-03-24 12:26:09 -0600", "subject": "Use strNewZ() in cases where STRDEF() assignment goes out of scope.", "body": "If a variable assigned with STRDEF() is referenced out of scope of the STRDEF() assignment then the value is undefined.\n\nLuckily most of the instances are in tests but there is one in the core code. It is not clear if this is a live bug or not but it certainly needs to be fixed.\n\nFound with -fsanitize=address." }, { "commit": "edf6c70baa97ad9c9045c01df4f587880a78cdc6", "date": "2022-03-24 11:00:51 -0600", "subject": "Prevent signed integer overflow in cfgParseSize().", "body": "If the value and multiplier were large enough then the return value could overflow unpredictably.\n\nCheck the value to make sure it will not overflow with the current multiplier.\n\nIt would be better to present an \"out of range\" error to the user rather than \"is not valid\" but it doesn't seem worth the effort since the error is extremely unlikely.\n\nFound with -fsanitize=undefined." }, { "commit": "ccbe2a1f70b3ec0a328c7f5d0f5053ef3de1b543", "date": "2022-03-24 09:32:18 -0600", "subject": "Do not pass NULL to memcpy() in Buffer/String objects.", "body": "glibc and others seem tolerant of this but the behavior is undefined.\n\nFound with -fsanitize=undefined." }, { "commit": "98792b1b0cd084ff90343e65b64526dfede396e7", "date": "2022-03-24 09:22:05 -0600", "subject": "Do not pass NULL to bsearch()/qsort() in List object.", "body": "glibc and others seem tolerant of this but the behavior is undefined.\n\nFound with -fsanitize=undefined." }, { "commit": "333ef84606cb1597e09a324c81759a2a222e3bf4", "date": "2022-03-23 21:47:56 -0600", "subject": "Use bufUsed() instead of struct member in Buffer object.", "body": "bufUsed() is an inline function so it is just as efficient and should also be easier to read and maintain." }, { "commit": "424008d293661dfb1a167cfec2697def48057447", "date": "2022-03-23 10:41:36 -0600", "subject": "Allow files that become zero-length after the backup manifest is built.", "body": "It is possible that a file will be be truncated to zero-length after the backup manifest has been built. We could build logic into backupFile() to handle this case but it is hard to test well because of the race condition so tests would need to written directly against backupFile() and backupJobResult(). It hardly seems worth all that effort for a condition that occurs rarely, if ever.\n\nInstead just remove the manifest check and add tests to restore to make sure it handles bundled zero-length files correctly. Logging will show that the file was bundled so if it happens a lot (which seems very unlikely) then we can think about an alternate implementation." }, { "commit": "fe9fd2ff2d778b86c0df159aabe7644f18f6479f", "date": "2022-03-22 09:02:33 -0600", "subject": "Disable repo-hardlink option when repo-bundle option is enabled.", "body": "Hardlinking will not work with bundles because files are not stored individually." }, { "commit": "7afaac0a3d0e43e5598d1abf548559dbe6329363", "date": "2022-03-22 08:35:34 -0600", "subject": "Allow repo-hardlink option to be changed after full backup.", "body": "This rule was added because there were not sufficient tests to demonstrate that the repo-hardlink option could be changed in a backup set.\r\n\r\nRemove the restriction and add/update tests to show that it works.\r\n\r\nThis is necessary now because bundling requires that hardlinking be disabled. Rather than add code complexity, it seems better just to address this limitation." }, { "commit": "5ae84d5e474e95dcc00c174aa5111815a5a70ba5", "date": "2022-03-22 07:50:26 -0600", "subject": "Improve path validation for repo-* commands.", "body": "Check for invalid path in repo-* commands. Perform path validation and throw an error when appropriate. Path may not contain '//'. Strip trailing '/' from path. Absolute path must fall under repo path." }, { "commit": "21cef09dfd03c313531dc66ba9556164ae2835ab", "date": "2022-03-16 11:02:29 -0600", "subject": "Add AWS IMDSv2 support.", "body": "IMDSv2 provides additional security to prevent instance metadata from being read by an attacker.\r\n\r\nAll AWS instances should provide IMDSv2 but still fail back to IMDSv1 if the IMDSv2 token request fails. This is in case there are any services outside AWS that are emulating IMDSv1 but have not implemented IMDSv2." }, { "commit": "2c96327e654ab634ee4077a39d6d20a7eae1065f", "date": "2022-03-15 17:55:48 -0600", "subject": "Remove extraneous double spaces in code and comments." }, { "commit": "3f66f42ef9605a83c356e4c6eb3ef47bcb12c30d", "date": "2022-03-14 17:49:52 -0600", "subject": "Rename bundle-* options to repo-bundle-*.", "body": "It seems best for these to be repo options so they can be configured per repo, rather than globally.\r\n\r\nAll clarify usage for repo-bundle-size and repo-bundle-limit." }, { "commit": "7c9208ba856a71833dc7d8f1cf8cb2b4bb4b7414", "date": "2022-03-11 10:10:02 -0600", "subject": "Improve error message for invalid repo-azure-key.", "body": "Check that repo-azure-key is valid base64 when repo-azure-key-type = shared." }, { "commit": "0054677147121967889ead7feab0959fb19e39a6", "date": "2022-03-09 15:34:15 -0600", "subject": "Add bundle logging to backup command.", "body": "This was added to the restore command so add it to the backup command as well." }, { "commit": "dca6da86bfe84a0cda004df1140d125db1630c90", "date": "2022-03-09 15:03:28 -0600", "subject": "Optimize restore command for file bundling.", "body": "Since files are stored sequentially in a bundle, it is often possible to restore multiple files with a single read. Previously, each restored file required a separate read. Reducing the number of reads is particularly beneficial for object stores, but performance should benefit on any file system.\r\n\r\nCurrently if there is a gap then a new read is required. In the future we might set a limit for how large a gap we'll skip without starting a new read." }, { "commit": "f7ab002aa74dd2ca277123f7500018ae68a04c74", "date": "2022-03-08 12:18:23 -0600", "subject": "Improve stop command to honor stanza option.", "body": "Improve the stop command, when force and stanza options are specified, to terminate only processes holding lock files for the given stanza. Prior to these changes, termination of all processes holding lock files regardless of stanza occurred." }, { "commit": "514137040e60e8fff0e7f42f43b493113abae5ff", "date": "2022-03-08 08:23:31 -0600", "subject": "Add limit parameter to ioCopyP().", "body": "Allows the number of bytes copied to be limited." }, { "commit": "166039c0da1875581a08d41ab8a44adf95fa6db1", "date": "2022-03-08 08:09:58 -0600", "subject": "Fix example for repo-gcs-key-type option in configuration reference.", "body": "This looks like a copy-paste error from another option." }, { "commit": "91b718bd4f89157c7401e3904bb171387b2e8a3e", "date": "2022-03-06 14:58:43 -0600", "subject": "Fix comment typo." }, { "commit": "330e19900ee3415d920680cf6e6856bf5e8e9a69", "date": "2022-03-06 13:01:24 -0600", "subject": "Increase precision of percent complete logging for backup and restore.", "body": "For very large backups only getting an update per percent may not be often enough.\r\n\r\nAdd hundredths to the percent complete logging to provide more timely information." }, { "commit": "be731f825432c738571f4c055f1b9d42d3fa18cf", "date": "2022-03-06 12:03:57 -0600", "subject": "Fix incorrect comment.", "body": "This appears to have been blindly copied from the archive-get command." }, { "commit": "8f23b46b4be9a0ce55e0c2d13be0edfd5bdc5349", "date": "2022-03-06 11:57:20 -0600", "subject": "Replace percentage and size with a constant in restore test logs.", "body": "Checking percentage and size in every test can cause quite a bit of churn when changes are made.\n\nFollow the example of the backup tests and replace percentage and size after the few tests to reduce churn." }, { "commit": "4d2fef1c37d357965f0ffb9af0d7489f3be1371b", "date": "2022-03-06 11:48:22 -0600", "subject": "Remove redundant restoreFile() test and improve coverage.", "body": "These tests were written before the restore command was fully migrated to C so many of them have become redundant.\n\nIn the cases were they still provide coverage, add tests to synthetic restores to replace them. In general, these higher level tests provide better coverage than poking at the restoreFile() function directly." }, { "commit": "009d33aa8ebbcd00a85392e1424e8f078952e2f5", "date": "2022-03-06 11:17:01 -0600", "subject": "Update config.guess and config.sub to latest versions." }, { "commit": "c242b966e0352505ee8389fcebcfbde48b2a55d4", "date": "2022-03-06 11:14:01 -0600", "subject": "Begin v2.39 development." }, { "commit": "5249b89a2e94fdbb45461faaf2e1b0304b986ac7", "date": "2022-03-06 10:30:59 -0600", "subject": "v2.38: Minor Bug Fixes and Improvements" }, { "commit": "59a5373cf85e4ad0057b592bfe855495384dfcbd", "date": "2022-03-02 11:38:52 -0600", "subject": "Handle TLS servers that do not close connections gracefully.", "body": "Some TLS server implementations will simply close the socket rather than correctly closing the TLS connection. This causes problems when connection: close is specified with no content-length or chunked encoding and we are forced to read to EOF. It is hard to know if this is a real EOF or a network error.\r\n\r\nIn cases where we can parse the content and (hopefully) ensure it is correct, allow the closed socket to serve as EOF. This is not ideal, but the change in 8e1807c means that currently working servers with this issue will stop working after 2.35 is installed, which seems too risky." }, { "commit": "f1bdf3e04b07dc9a2eaa1fdbe5278cc18371e868", "date": "2022-03-02 10:59:06 -0600", "subject": "Add aliveness check to systemd service configuration.", "body": "If the pgbackrest service service fails to start then the user will get an error. However, retries will continue in the background." }, { "commit": "a66ec8d549b8f1f7b38682ad90281f1fcbb2748a", "date": "2022-03-02 10:22:15 -0600", "subject": "Revert PGDG yum repo workaround for aarch64.", "body": "da0f3a855 used a workaround to get the documentation building on aarch64 but recent changes to the PGDG yum repo have broken this workaround. Installing the regular way still doesn't work, either.\n\nReverting for now to get the CI pipeline working again." }, { "commit": "fb5051fde74bcfde85fa3b9a452a1df693e79db9", "date": "2022-02-26 13:50:30 -0600", "subject": "Use vagrant user in the Docker container.", "body": "This is a bit of legacy from the current Vagrant environment used to do the release, but since it is not as easy to change the user in Vagrant, just make the Docker environment conform.\n\nThis allows documentation to be built in a Vagrant environment (or any environment with the same user name) and to be deployed in a Docker environment." }, { "commit": "f716e98ad0e9eda94da973211e5f54e88c2c1aed", "date": "2022-02-26 11:31:52 -0600", "subject": "Suppress errors when there is stderr output for Docker builds.", "body": "Docker outputs build info to stderr even when the build is successful. This seems to be especially true on Mac M1.\n\nContainerTest.pm already does this suppression so add it the other places where containers are built." }, { "commit": "53de3e3aeb71f8209f56167941df35bbfcb7cdc9", "date": "2022-02-26 11:17:05 -0600", "subject": "Move repo options in TLS documentation to the global section.", "body": "These options were mistakenly added to the stanza section, which works in certain cases, but is not best practice." }, { "commit": "98d525dba43ba259ff7989746b8e5be3382c189d", "date": "2022-02-25 14:03:09 -0600", "subject": "Add FAQ explaining WAL archive suffix." }, { "commit": "b33cabe08c33e29eb8aef65dd46b2c1f3cf2d186", "date": "2022-02-25 10:51:40 -0600", "subject": "Allow case-insensitive matching of HTTP connection header values.", "body": "The specification allows values for the connection header to be case-insensitive. See https://www.rfc-editor.org/rfc/rfc7230#section-6.1." }, { "commit": "632071232344a1160ef932d6e0d65eb3149ed226", "date": "2022-02-23 13:53:02 -0600", "subject": "Automatically strip trailing slashes for repo-ls paths.", "body": "Trailing slashes in at least some of the repository storage types were preventing repo-ls from displaying any content (presumably due to storage-specific behavior).\r\n\r\nSince the path with the slash should be equivalent to the path without the slash, just remove it if provided by the user." }, { "commit": "53f1b2520434221c01caf92e052403a28c455c1b", "date": "2022-02-23 13:17:14 -0600", "subject": "Improve validation of zero pages.", "body": "Checking that pd_upper == 0 is not enough since this field may be corrupted. Still use pd_upper as a quick check, but when it is zero proceed to check the rest of the page to ensure it is also all zeroes." }, { "commit": "9eec98c61302121134d2067326dbd2cd0f2f0b9c", "date": "2022-02-23 12:05:53 -0600", "subject": "Retry on page checksum validation failure during backup.", "body": "Rather than attempting to filter page checksum failures by LSN, just retry when there is a page checksum failure. If the page has not changed since the last read report it as an error. If the page has changed, then PostgreSQL must be modifying the page so we can ignore the error because a full page write (and possibly updates) will be in the WAL.\r\n\r\nAlso remove tests made redundant by the test merge in b4897077." }, { "commit": "5c332a0b100d5a5180b3d95bd90d4d7b521b42db", "date": "2022-02-23 11:23:32 -0600", "subject": "Fix release note attribution in cacfdd94 and f798458e." }, { "commit": "f798458e1d18fc8a866b49a4cfa0cb73536cbd41", "date": "2022-02-23 10:29:17 -0600", "subject": "Disconnect help.auto.c from build-help in Makefile.", "body": "When there was an issue with the system library path during building, the build-help rule would fail during executing ./build-help with the effect that main.c wouldn't build.\r\n\r\nBreak out help.auto.c generation from the build-help stage to allow it to be re-executed when the library path has been corrected." }, { "commit": "67bdf07e6969f8160347e895454bf729c9165884", "date": "2022-02-23 10:26:39 -0600", "subject": "Add XML to invalid XML error message.", "body": "There have been cases where pgBackRest has failed on invalid XML but it is not possible to determine what was wrong with the XML.\n\nThis will only work for XML up to about 8KiB (which is the error message limit) but it should work in most cases." }, { "commit": "10038db9c9f2b4753c3821e0b246fa8626527184", "date": "2022-02-23 09:14:27 -0600", "subject": "Add archive-missing-retry option.", "body": "Retry a WAL segment that was previously reported as missing by the archive-get command. This prevents notifications in the spool path from a prior restore from being used and possibly causing a recovery failure if consistency has not been reached.\r\n\r\nDisabling this option allows PostgreSQL to more reliably recognize when the end of the WAL in the archive has been reached, which permits it to switch over to streaming from the primary. With retries enabled, a steady stream of WAL being archived will cause PostgreSQL to continue getting WAL from the archive rather than switch to streaming.\r\n\r\nWhen disabling this option it is important to ensure that the spool path for the stanza is empty. The restore command does this automatically if the spool path is configured at restore time. Otherwise, it is up to the user to ensure the spool path is empty." }, { "commit": "cacfdd94d74307285b1b590ec0977d7db3142d0d", "date": "2022-02-23 08:01:38 -0600", "subject": "Fix typos in help and release notes." }, { "commit": "e2eb3ebacce18e9c0c2f74457a646142a0eace3d", "date": "2022-02-20 17:45:41 -0600", "subject": "Most recent news for postgresql.org." }, { "commit": "e6e1122dbcf5e667d683295f7e7e45de4bbf56bd", "date": "2022-02-20 16:45:07 -0600", "subject": "Pass file by reference in manifestFileAdd().", "body": "Coverity complained that this pass by value was inefficient:\n\nCID 376402: Performance inefficiencies (PASS_BY_VALUE)\nPassing parameter file of type \"ManifestFile\" (size 136 bytes) by value.\n\nThis was completely intentional since it gives us a copy of the struct that we can change without bothering the caller. However, updating fields is fine and may benefit the caller at some future data, and in any case does no harm now.\n\nAnd as usual it is easier not to fight with Coverity." }, { "commit": "b4897077937ee4571ba719276a44d5db0a75510e", "date": "2022-02-18 17:50:05 -0600", "subject": "Move command/backup-common tests in the command/backup module.", "body": "As much as possible it is better to get coverage with more realistic tests. Merging these modules will allow the page checksum code to be covered with real backups." }, { "commit": "efc09db7b9ece6e7b7f92538d56d6ab7b9798f8f", "date": "2022-02-17 07:25:12 -0600", "subject": "Limit files that can be bundled.", "body": "Limit which files can be added to bundles, which allows resume to work reasonably well. On resume, the bundles are removed and any remaining file is eligible to be to be resumed.\r\n\r\nAlso reduce the bundle-size default to 20MiB. This is pretty arbitrary, but a smaller default seems better." }, { "commit": "34d649579eb3bd1530aa99f0ed1879e7d3125424", "date": "2022-02-14 13:24:14 -0600", "subject": "Bundle files in the repository during backup.", "body": "Bundle (combine) smaller files during backup to reduce the number of files written to the repository (enable with --bundle). Reducing the number of files is a benefit on all file systems, but especially so on object stores such as S3 that have a high file creation cost. Another benefit is that zero-length files are only stored as metadata in the manifest.\r\n\r\nFiles are batched up to bundle-size and then compressed/encrypted individually and stored sequentially in the bundle. The bundle id and offset of each file is stored in the manifest so files can be retrieved randomly without needing to read the entire bundle. Files are ordered by timestamp descending when being assigned to bundles to reduce the amount of random access that needs to be done. The idea is that bundles with older files can be read in their entirety on restore and only bundles with newer files will get fragmented.\r\n\r\nBundles are a custom format with metadata stored in the manifest. Tar was considered but it is too limited a format, the major issue being that the size of the file must be known in advance and that is very contrary to how pgBackRest works, especially once we introduce page-level incremental backups.\r\n\r\nBundles are stored numbered in the bundle directory. Some files may still end up in pg_data if they are added after the backup is complete. backup_label is an example.\r\n\r\nCurrently, only the backup command works in batches. The restore and verify commands use the offsets to pull individual files out of the bundle. It seems better to finalize how this is going to work before optimizing the other commands. Even as is, this is a major step forward, and all commands function with bundling.\r\n\r\nOne caveat: resume is currently not supported when bundle is enabled." }, { "commit": "8046f0630715015d5adcced570c54e9a183c0e73", "date": "2022-02-14 11:07:02 -0600", "subject": "Do not retry fatal errors.", "body": "There is some evidence that retrying fatal errors, especially out of memory errors, may cause lockups. It makes sense to report fatal errors as quickly as possible and bypass retries. This may or not fix the lockup issue but it is worth doing either way.\r\n\r\nFor now, the only fatal errors will be AssertError and MemoryError." }, { "commit": "8d0cce66f83abc1ff02a985ec3f0d7e285c09d72", "date": "2022-02-13 15:19:31 -0600", "subject": "Use normal error for protocol module error retry test.", "body": "Asserts will not be retried in a future commit, so adjust this test now to use non-assert errors." }, { "commit": "8573a2df14d0bfd5578333b68147cc12c93c6f96", "date": "2022-02-13 15:11:59 -0600", "subject": "Improve protocol module error test for protocolClientFree().", "body": "Using an assert here was never ideal and won't work once we start handling fatal errors differently." }, { "commit": "551e5bc6f67c91cf1e11da0cefe24f9650353adc", "date": "2022-02-11 08:11:39 -0600", "subject": "Retry errors in S3 batch file delete.", "body": "If the entire batch failed it would be retried, but individual file errors were not retried. This could cause pgBackRest to terminate during expiration or when removing an unresumable backup.\r\n\r\nRather than retry the entire batch, delete the errored files individually to take advantage of the HTTP retry rather than adding a new retry loop. These errors seem rare enough that it should not be a performance issue." }, { "commit": "b26097f8d8a7e2af6e9a21576994cbb47110c6f4", "date": "2022-02-09 10:18:39 -0600", "subject": "Add backup type filter to info command.", "body": "Support --type option in the info command to display only a specific backup type." }, { "commit": "cb630ffe3b5c819b5e97ac11d5974000bd44d632", "date": "2022-02-09 09:32:23 -0600", "subject": "Remove logic that tried to determine additional file system compression.", "body": "In theory, the additional stat() call after a file has been copied to the repo can determine if additional compression has been applied by the file system. However, it has been a very long time since we tested this in practice. There are currently no unit tests that accurately test this feature since it requires a compressed file system like ZFS to work, which never seemed worth the extra cost.\r\n\r\nIt can also add a lot of time to backups if there are a large quantity of small files.\r\n\r\nIn addition, it stands as a blocker for combining files for small file support since it is no longer possible to get per-file sizes from the viewpoint of the file system. There are several ways this could be reworked but none of them are easy while at the same time maintaining current info functionality.\r\n\r\nIt doesn't seem worth keeping an untested feature that will only work in some special cases (if it still works) when it is blocking development." }, { "commit": "755bfc4d4015a68fa06890d1d62515b8bbbc3a64", "date": "2022-02-07 08:14:28 -0600", "subject": "Fix invalid file descriptor being passed to close().", "body": "Coverity pointed out that a negative number could be passed to close(), which means the lock file would not get closed until the process ended. Proper execution does not require the file to be closed, but it is better to correctly free resources that are no longer needed." }, { "commit": "5cbb4a3e6b13598b90cae46fad28c31604f9f094", "date": "2022-02-04 15:55:24 -0600", "subject": "Remove unused backup-standby option from stanza commands." }, { "commit": "7dd657b7dd163ded9fbf7caef37eea7e08bc5a37", "date": "2022-02-04 12:48:50 -0600", "subject": "Simplify filename construction in command/verify module.", "body": "Remove some duplicated code by tracking the backup label and constructing the filename only when needed." }, { "commit": "b1da4e84e8b1fda0299931dbcb9b057ce0d3ce64", "date": "2022-02-02 14:39:39 -0600", "subject": "Revert Minio to prior release.", "body": "The most recent release of Minio has broken CI builds but there is no logging to indicate what is wrong.\n\nFor now, just use the prior release to get CI builds working again. This kind if breakage is not uncommon for Minio but they usually resolve it in the next release." }, { "commit": "9b2f10dbb49a8b869cef2c419d9957ed4c550016", "date": "2022-01-31 16:48:28 -0600", "subject": "Refactor lock code.", "body": "Update lock code to use standard common/io functions and module patterns. This module was developed before the common/io module existed and our patterns had stabilized." }, { "commit": "22734eb376b3183924cb7794f9111c1eeb5d92d6", "date": "2022-01-31 10:03:56 -0600", "subject": "Add ioBufferReadNewOpen() and ioBufferWriteNewOpen().", "body": "These are convenience functions to make the code a bit more compact where possible." }, { "commit": "e62e6664d3654dd63ae2eda64f981a6936b822d3", "date": "2022-01-26 13:26:22 -0600", "subject": "Remove release note about the default port being provisional.", "body": "The default port has now been approved by IANA." }, { "commit": "cf5b3a302f0835be993a7f795038d4d676cd6f01", "date": "2022-01-26 13:22:31 -0600", "subject": "Fix language in rh7 test container for aarch64.", "body": "The /etc/profile.d/lang.sh script was causing issues but it does not exist on amd64, so it seems the easiest thing was to remove it.\n\nFix how 32-bit VMs are determined now that another 64-bit architecture has been added.\n\nAnd remove some obsolete VM hashes." }, { "commit": "da0f3a85535df3389c5023afc637124acc6405d3", "date": "2022-01-26 10:15:14 -0600", "subject": "Download correct key on aarch64 in the user guide.", "body": "The default key that gets installed is for amd64. This is only required for RHEL8 it seems, so something is definitely amiss." }, { "commit": "e4df5b7d3846134832b9315e22786f679d63a46b", "date": "2022-01-24 15:21:07 -0600", "subject": "Simplify manifest file defaults.", "body": "Previously manifest load required two passes through the file list, one to load the data and one to set the defaults. This required each file to be packed twice.\r\n\r\nInstead simply note that the file value is default and then set the file defaults when they are loaded from the manifest. This is made possible by the different internal/external representations for files so the same method cannot be applied to paths and links.\r\n\r\nThis change seems to resolve the performance issues noted in 61ce586 but there is no obvious reason why." }, { "commit": "044138fbb19f02ec433e52c42684b1ecc616c624", "date": "2022-01-23 15:07:13 -0500", "subject": "Centralize common PostgreSQL options in the user guide.", "body": "Centralize these options so they are consistent across clusters.\n\nAlso, there were some options that the user doesn't really need to see, .e.g. log_line_prefix. These can be set in advance so they don't need to be part of the documentation." }, { "commit": "13623d6ee50794e87ceac37c61e72ba3a92ad8b6", "date": "2022-01-23 13:53:52 -0500", "subject": "Create create_test_table() function earlier in user guide.", "body": "This function (which creates lots of tables) is generally useful for testing (not just stress testing) so create it as soon as the cluster is created.\n\nAlso add the data parameter which will insert a single row into the table so the file on disk is not zero bytes." }, { "commit": "ca13f11b4a3858587c7055b0b2b4a8b396dd8451", "date": "2022-01-22 17:20:25 -0500", "subject": "Optimization for jsonFromStrInternal().", "body": "This is an extremely hot code path when saving the manifest so every little bit helps." }, { "commit": "61ce58692f3092b72833b6e9ee87037f45b3b702", "date": "2022-01-21 17:05:07 -0500", "subject": "Pack manifest file structs to save memory.", "body": "Manifests with a very large number of files can use a considerable amount of memory. There are a lot of zeroes in the data so it can be stored more efficiently by using base-128 varint encoding for the integers and storing the strings in the same allocation.\r\n\r\nThe downside is that the data needs to be unpacked in order to be used, but in most cases this seems fast enough (about 10% slower than before) except for saving the manifest, which is 10% slower up to 10 million files and then gets about 5x slower by 100 million (two minutes on my M1 Mac). Profiling does not show this slowdown so I wonder if this is related to the change in memory layout. Curiously, the function that increased most was jsonFromStrInternal(), which was not modified. That gives more weight to the idea that there is some kind of memory issue going on here and one hopes that servers would be less affected. Either way, they largest use cases we have seen are for about 6 million files so if we can improve that case I believe we will be better off.\r\n\r\nFurther analysis showed that most of the time was taken up writing the size and timestamp fields, which makes almost no sense. The same amount of time was used if they were hard-coded to 0, which points to some odd memory issue on the M1 architecture.\r\n\r\nThis change has been planned for a while, but the particular impetus at this time is that small file support requires additional fields that would increase manifest memory usage by about 20%, even if the feature is not used.\r\n\r\nNote that the Pack code has been updated to use the new varint encoder, but the decoder remains separate because it needs to fetch one byte at a time." }, { "commit": "575ae77c0dee12b932469d479b2e45dbaf389737", "date": "2022-01-21 15:57:22 -0500", "subject": "Convert varNewUInt64() to VARUINT64() where possible in manifest.", "body": "VARUINT64() does not require memory to be allocated from the mem context so should be used when possible." }, { "commit": "4a73a0286387f3f2185f3f193f9c20ed0d7d926d", "date": "2022-01-21 15:22:48 -0500", "subject": "Simplify manifest defaults.", "body": "Manifest defaults for user, group, and mode were previously generated by scanning the data to find the most common values. This was very accurate but slow and complicated. It could also lead to surprising changes in the manifest when a default value suddenly changed.\r\n\r\nInstead, use the $PGDATA path to generate defaults. In the vast majority of cases the same user/group should own all the path/files and the default file mode is easily derived from the path mode. There may be some edge cases where this generates larger manifests, but in general it reduces time and complexity when saving the manifest.\r\n\r\nRemove the MCV code since it is longer longer used." }, { "commit": "b0db4b8ff00db913cb69308017a56155d6070830", "date": "2022-01-21 08:52:51 -0500", "subject": "Simplify base path mode in mock/all integration tests.", "body": "Change the mode back to 0700 earlier to reduce churn in the expect logs.\n\nThis will be especially important in a future commit that gets the defaults exclusively from the base path." }, { "commit": "8c062e1af824c3dc80ff94b3df8d433f95e88d0f", "date": "2022-01-20 14:01:10 -0500", "subject": "Remove primary flag from manifest.", "body": "This flag was only being used by the backup command after manifestNewBuild() and had no other uses. There was a time when it was important for integration testing but the unit tests now fulfill this role.\r\n\r\nSince backup is the only code concerned with the primary flag, move the code into the backup module.\r\n\r\nWe don't have any cross-version testing but this change was tested manually with the most recent version of pgBackRest to make sure it was tolerant of the missing primary info. When an older version of pgBackRest loads a newer manifest the primary flag will always be set to false, which is fine since it is not used." }, { "commit": "16559d9e421c66596d21b047c8afe1b2c96ccd82", "date": "2022-01-20 08:41:05 -0500", "subject": "Use the PG_FILE_POSTMTRPID constant where appropriate.", "body": "Do the same in Perl with the MANIFEST_FILE_POSTMTRPID constant." }, { "commit": "4ca0590a51a665535fbba903b8f55e10557b606a", "date": "2022-01-20 08:06:42 -0500", "subject": "Refactor backupProcessQueue() to use BackupJobData struct.", "body": "BackupJobData has several members that backupProcessQueue() needs so it is more efficient to use them rather than passing them separately or getting them from the configuration." }, { "commit": "e21ba7c92bc15e9314eefa80cef8f17cf2f0e224", "date": "2022-01-18 17:40:53 -0500", "subject": "Remove extra spaces." }, { "commit": "f84909f85f7dfb202ac812c67665cc590a5acad4", "date": "2022-01-18 17:26:30 -0500", "subject": "Fix issue with Posix read offset handling after an error.", "body": "Coverity pointed out that -1 could be passed to lseek() (added in a79034ae) after a file failed to open because it is missing. Overall it seems simpler to enclose the success code in an else block to prevent any repeats of this mistake in the future.\n\nThis was not an active bug because there are currently no cases where we do read offsets in a file that is allowed to be missing.\n\nAlso remove the result flag since it is easier to just check that the file descriptor is valid." }, { "commit": "91721e391a4d83a405a0d1be31b854312f9f157d", "date": "2022-01-17 15:39:31 -0500", "subject": "Fix typo." }, { "commit": "b791f1c82f53fa16866837d4544b58243c682f7f", "date": "2022-01-15 14:33:38 -0500", "subject": "Implement restore ownership without updating manifest internals.", "body": "Updating the manifest this way was not a great idea because it broke abstraction for the object. This meant certain changes to the interface and internals were not possible because the code was modifying internal manifest data.\n\nInstead track the user replacements entirely in the restore module.\n\nThis also has the benefit of eliminating a pass over the manifest path/file/link lists." }, { "commit": "3097acd73a21b630aa5352be57a82b031930f96b", "date": "2022-01-13 08:46:14 -0500", "subject": "Add support for AWS S3 server-side encryption using KMS.", "body": "AWS S3 integrates with AWS Key Management Service (AWS KMS) to provide server side encryption of S3 objects. This integration protects objects under encryption keys that never leave AWS KMS unencrypted." }, { "commit": "92ea3e05fb43e5272a19fb1e3208412b75008c24", "date": "2022-01-11 19:15:09 -0500", "subject": "cfgOptionGroupName() replacement missed in aced5d47." }, { "commit": "a79034ae2f6a8ec66b84c62ab6cd4042d2c1a249", "date": "2022-01-11 14:42:53 -0500", "subject": "Add read range to all storage drivers.", "body": "The range feature allows reading out an arbitrary chunk of a file and will be important for efficient small file support.\r\n\r\nNow that all drivers are required to support ranges remove the storageFeatureLimitRead feature flag that was implemented only by the Posix driver." }, { "commit": "2fd100bf12760a4bb1996fb464ae54b5c379988e", "date": "2022-01-10 17:40:18 -0500", "subject": "Rename ConfigOptionGroupData.indexDisplay to indexName.", "body": "This is more consistent with the function name and purpose." }, { "commit": "2cddbbdee01391ef0885f9262f3acc488382eee2", "date": "2022-01-10 17:20:48 -0500", "subject": "Remove obsolete cfgOptionHostPort()/cfgOptionIdxHostPort().", "body": "These functions were made obsolete by the refactor in 6a124584." }, { "commit": "7c627c12b735345cc3d733ae66a52b1fe0030e47", "date": "2022-01-10 17:00:58 -0500", "subject": "Refactor option and option group config structs.", "body": "This allows individual structs to be assigned to variables to make the code easier to read and perhaps a bit more efficient." }, { "commit": "aeecb500f54561f7d52fa1b637f7f6e1f7a668de", "date": "2022-01-10 14:47:29 -0500", "subject": "Improve implementation of cfgOptionIdxName().", "body": "Cache option names after they are generated rather than regenerating them each time." }, { "commit": "aced5d47ed8e57ca7f36f92fe068f80b66f81649", "date": "2022-01-10 09:10:06 -0500", "subject": "Replace cfgOptionGroupIdxToKey() with cfgOptionGroupName().", "body": "Do the replacement anywhere cfgOptionGroupIdxToKey() is being used to construct a group name in a message. cfgOptionGroupName() is better for this case since it also includes the name of the group so that it does not need to be repeated in each message." }, { "commit": "9657f1b32519272edaaefd379355137afd554e82", "date": "2022-01-09 19:53:05 -0500", "subject": "Dynamically allocate index to key index map.", "body": "Now the config/config module has no notion of max option indexes. The config/parse still needs the max but this will be removed at a later date." }, { "commit": "e4b48eb430a564184ab739908df55813dd970aa4", "date": "2022-01-09 19:43:44 -0500", "subject": "Fix inconsistent group display names in messages.", "body": "In other instances there are no dashes, e.g. repo1 or pg1. Make these messages match." }, { "commit": "5f78a5fc18120c67cb8a68f83a4fb9bcf96982e2", "date": "2022-01-09 13:19:43 -0500", "subject": "Add ioCopy().", "body": "Functionality to copy from IoRead to IoWrite is frequently used so centralize it. This also simplifies coverage testing in places where a loop was required before." }, { "commit": "47954774c6cc70a6fc9a6c8836d82a87e341ea11", "date": "2022-01-09 10:11:00 -0500", "subject": "Combine encrypted backupFile() tests with unencrypted tests.", "body": "This makes it easier to comment out all the tests while developing without getting unused variable errors." }, { "commit": "d866dd5c297dc046581134a108032cf9694cae0e", "date": "2022-01-07 14:09:58 -0500", "subject": "Add backup LSNs to info command output.", "body": "The backup LSNs are useful for performing LSN-based PITR. LSNs will not be displayed in the general text output (without --set) because they are probably not useful enough to deserve their own line." }, { "commit": "bb4b30ddd3f80c246c565eb4769efc82b9509648", "date": "2022-01-06 15:34:04 -0500", "subject": "Remove support for PostgreSQL 8.3/8.4.", "body": "There is no evidence that users need 8.3/8.4 anymore but it does cost us in terms of development and testing, especially now that we have a number of new backup/restore features planned.\r\n\r\nIt seems to make sense to remove this support now. If there are users who need to use/migrate from these versions they can use an older version of pgBackRest." }, { "commit": "ef62ef23799f081582295f1f3e8a92691b628f53", "date": "2022-01-06 14:24:32 -0500", "subject": "Fix comment missed in 6bd280f7." }, { "commit": "fdbeb8e7d64754d35dd30e4f871834756c5d77ca", "date": "2022-01-06 14:22:56 -0500", "subject": "Fix typo in error message." }, { "commit": "a82f0179cdd5742e5c5b1e0bdfd23166753fdb38", "date": "2022-01-04 16:11:27 -0500", "subject": "Note that replications slots are not restored.", "body": "Update documentation and help to note that replication slots are not restored and reference the PostgreSQL documentation to explain why." }, { "commit": "226cfbdcde55ca384e74d7d1348c447eecd079fa", "date": "2022-01-04 15:48:00 -0500", "subject": "Fix typo." }, { "commit": "591a72c59ec403341d29eaafe3030f4651732192", "date": "2022-01-03 08:57:05 -0500", "subject": "Update config.guess and config.sub to latest versions." }, { "commit": "75e4580919ed139f00db7c4c3141dbefc3da131b", "date": "2022-01-03 08:52:25 -0500", "subject": "Begin v2.38 development." }, { "commit": "f18f2d9991da29533ad8e9859074c4f39185420f", "date": "2022-01-03 08:43:55 -0500", "subject": "v2.37: TLS Server" }, { "commit": "62fbee72ad319f92c9410ac8dbab2f81fe945a36", "date": "2022-01-01 10:50:16 -0500", "subject": "Update LICENSE.txt and PostgreSQL copyright for 2022." }, { "commit": "d6ebf6e2d67331a838f53beda1c186c527b56a8e", "date": "2021-12-30 18:54:36 -0500", "subject": "Remove dead test code." }, { "commit": "fccb7f7dd45c6c373d0cfa74b90d69ca483aa3af", "date": "2021-12-28 17:39:22 -0500", "subject": "Add release note regarding IANA approval of the default TLS port." }, { "commit": "6a12458440168f13cb05d70f36ea54b1860e390c", "date": "2021-12-16 10:30:59 -0500", "subject": "Parse protocol/port in S3/Azure endpoints.", "body": "Utilize httpUrlNewParseP() to parse endpoint and port from the URL in the S3 and Azure helpers to avoid issues where protocol was not expected to be part of the URL." }, { "commit": "f06101de77a980c7e4115762f2fc301280aa4127", "date": "2021-12-16 09:47:04 -0500", "subject": "Add TLS server documentation.", "body": "Add documentation and make the feature visible." }, { "commit": "615bdff4030a31bfedfe7df04676e3948ec9c2c0", "date": "2021-12-14 14:53:41 -0500", "subject": "Fix socket leak on connection retries.", "body": "This leak was caused by the file descriptor variable getting clobbered after a long jump. Mark it as volatile to fix.\r\n\r\nTesting this is a bit complex because the issue only happens in optimized builds, if at all. Put the test into the performance suite, which is always optimized, until a better idea presents itself." }, { "commit": "a73fe4eb966f9685f6e4179c397a10c1e7f15f19", "date": "2021-12-10 15:53:40 -0500", "subject": "Fix restore delta link mapping when path/file already exists.", "body": "If a path/file was remapped to a link using either --link-map or --link-all there would be no affect if the path/file already existed. If a link existed it would be properly updated and converting a link to a path/file also worked.\r\n\r\nThe issue happened during delta cleanup, which failed to check if the existing path/file had been remapped to a link.\r\n\r\nAdd checks for newly mapped path/file links and remove the old path/file we required." }, { "commit": "19a7ec69debfe6587fcc1163451896590c96bf21", "date": "2021-12-10 15:04:55 -0500", "subject": "Close expect log file when unit test completes.", "body": "This did not cause any issues, but it is better to explicitly close open files." }, { "commit": "c38e2d31709804eb4b9125a15ad84c8fc813f366", "date": "2021-12-08 15:00:19 -0500", "subject": "Add verb to HTTP error output.", "body": "This makes it easier to debug HTTP errors." }, { "commit": "be4ac3923cb77873da298a30aca5d847b3c635af", "date": "2021-12-08 13:57:26 -0500", "subject": "Error when restore is unable to find a backup to match the time target.", "body": "This was previously a warning but the warning is easy to miss so a lot of time may be lost restoring and recovering a backup that will not hit the target.\r\n\r\nSince this is technically a breaking change, add an \"important note\" about the change to the release." }, { "commit": "672330593789f07aaad90bbafcd2597cbc602686", "date": "2021-12-08 12:29:20 -0500", "subject": "Add warning when checkpoint_timeout exceeds db-timeout.", "body": "In the backup command, add a warning if start-fast is disabled and the PostgreSQL checkpoint_timeout is greater than db-timeout.\r\n\r\nIn such cases, we might timeout before the checkpoint occurs and the backup really starts." }, { "commit": "bd2ba802db11c505ec69943fa81b2b379073fbf4", "date": "2021-12-08 10:16:41 -0500", "subject": "Check that clusters are alive and correctly configured during a backup.", "body": "Fail the backup if a cluster stops or the standby is promoted. Previously, shutting down the primary would cause an error but it was not detected until the end of the backup. Now the error will happen sooner and a promotion on the standby will also cause an error." }, { "commit": "7b3ea883c7c010aafbeb14d150d073a113b703e4", "date": "2021-12-07 18:18:43 -0500", "subject": "Add SIGTERM and SIGHUP handling to TLS server.", "body": "SIGHUP allows the configuration to be reloaded. Note that the configuration will not be updated in child processes that have already started.\r\n\r\nSIGTERM terminates the server process gracefully and sends SIGTERM to all child processes. This also gives the tests an easy way to stop the server." }, { "commit": "49145d72bac16498cdbf5eeb3cd6128ea0be0667", "date": "2021-12-07 09:21:07 -0500", "subject": "Add timeline and checkpoint checks to backup.", "body": "Add the following checks:\r\n\r\n* Checkpoint is updated in pg_control after pg_start_backup(). This helps ensure that PostgreSQL and pgBackRest have a consistent view of the storage and that PGDATA paths match.\r\n* Timeline of backup start WAL file matches pg_control. Hard to see how this one could get hit, but we have the power...\r\n* Standby is on the same timeline as the primary. If not, this standby is not following the primary.\r\n* Last standby checkpoint is not greater than the backup checkpoint. If so, this standby is not following the primary.\r\n\r\nThis also requires some additional plumbing to read/write timeline/checkpoint from pg_control and parse timelines from WAL filenames. There were some changes in the backup tests caused by the fact that pg_control now has different contents for each backup.\r\n\r\nThe check to ensure that the required checkpoint was reached on the standby should also be updated to use pg_control (it currently uses pg_control_checkpoint()), but that requires non-trivial changes to the test harness and will need to wait." }, { "commit": "9c76056dd0d1d2b07a89646b087c5c8d36ab97f5", "date": "2021-11-30 16:21:15 -0500", "subject": "Add error type and message to CHECK() macro.", "body": "A CHECK() worked exactly like ASSERT() except that it was compiled into production code. However, over time many checks have been added that should not throw AssertError, which should be reserved for probable coding errors.\n\nAllow the error code to be specified so other error types can be thrown. Also add a human-readable message since many of these could be seen by users even when there is no coding error.\n\nUpdate coverage exceptions for CHECK() to match ASSERT() since all conditions will never be covered." }, { "commit": "0895cfcdf7d3f15b8029f73ed62c6094d30724b3", "date": "2021-11-30 13:23:11 -0500", "subject": "Add HRN_PG_CONTROL_PUT() and HRN_PG_CONTROL_TIME().", "body": "These macros simplify management of pg_control test files.\n\nCentralize time updates for pg_control in the command/backup module. This caused some time updates in the logs.\n\nFinally, move the postgres module after the storage module so it can use storage macros." }, { "commit": "01ac6b6cac86ea857e54a3b1c45077df1e128a75", "date": "2021-11-30 08:28:36 -0500", "subject": "Autogenerate test system identifiers.", "body": "hrnPgControlToBuffer() and hrnPgWalToBuffer() now generate the system id based on the version of Postgres. If a value less than 100 is specified for systemId then it will be added to the default system id so there can be multiple ids for a single version of PostgreSQL.\n\nAdd constants to represent version system ids in tests. These will eventually be auto-generated.\n\nThis changes some checksums and we no longer have big-endian tests systems, so X those checksums out so it is obvious they are no longer valid." }, { "commit": "3f7409019df112ec50efb6c3db6f7780c9a63c87", "date": "2021-11-24 16:09:45 -0500", "subject": "Ensure ASSERT() macro is always available in test modules.", "body": "Tests that run without DEBUG for performance did not have ASSERT() and were using CHECK() instead.\n\nInstead ensure that the ASSERT() macro is always available in tests." }, { "commit": "dcb4f09d8315e92c0877b589f3fa9b7f0fa65f93", "date": "2021-11-23 09:37:12 -0500", "subject": "Revert changes to backupFilePut() made in 1e77fc3d.", "body": "These changes were made obsolete by a3d7a23a." }, { "commit": "7e35245dc3416238a84a43abbecdf976170dea91", "date": "2021-11-23 08:07:31 -0500", "subject": "Use ASSERT() or TEST_RESULT*() instead of CHECK() in test modules." }, { "commit": "a3d7a23a9d90611a3d31947598fbea240b250710", "date": "2021-11-22 12:52:37 -0500", "subject": "Use infoBackupDataByLabel() to log backup size.", "body": "Eliminate summing and passing of copied files sizes for logging backup size.\r\n\r\nInstead, utilize infoBackupDataByLabel() to pull the backup size for the log message." }, { "commit": "1a0560d363d28737befb8c222647783d4fc2ca29", "date": "2021-11-19 12:22:09 -0500", "subject": "Allow y/n arguments for boolean command-line options.", "body": "This allows boolean boolean command-line options to work like their config file equivalents.\r\n\r\nAt least for now this behavior will remain undocumented since all examples in the documentation will continue to use the standard syntax. The idea is that it will \"just work\" when options are copied out of config files rather than generating an error." }, { "commit": "2d963ce9471808172f879916c3f3accc35f14d56", "date": "2021-11-18 17:23:11 -0500", "subject": "Rename server-start command to server." }, { "commit": "1f14f45dfb0d1677a695719381cbd5a8a3c6c986", "date": "2021-11-18 16:18:10 -0500", "subject": "Check archive immediately after backup start.", "body": "Previously the archive was only checked at the end of the backup to ensure all WAL required to make the backup consistent was present. The problem was that if archiving was not functioning then the backup had to complete before the user found out, which could be a while if the database was large enough.\r\n\r\nAdd an archive check immediately after backup start so failures are reported earlier.\r\n\r\nThe trick is to determine which WAL to check. If the repo is new there may not be any WAL in it and pg_start_backup() will not switch the WAL segment if it is empty. These are both likely scenarios when setting up and/or testing pgBackRest.\r\n\r\nIf the WAL segment is switched by pg_start_backup(), then check the archive for the segment that was detected prior to backup start. This should be common on normal running clusters with regular activity. Note that this might not be the segment immediately prior to the backup start segment if WAL volume is high.\r\n\r\nIf pg_start_backup() did not switch the WAL then we can force a switch on PostgreSQL >= 9.3 by creating a restore point. In that case the WAL to check will be the backup start WAL. This is most likely to happen on idle systems, during testing, or immediately after a repo switch.\r\n\r\nAn advantage of this approach other than earlier notification is that the backup directory will not be created so no resume will be attempted on the next backup.\r\n\r\nNote that some additional churn was created in backup.c because the load of archive.info needs to be done earlier." }, { "commit": "dea752477ab8e812cdbd717eb2091baf3f5d0906", "date": "2021-11-17 16:39:04 -0500", "subject": "Remove obsolete statement about future multi-repository support." }, { "commit": "0949b4d35fdd04c55927eb6a107d881376dbe73c", "date": "2021-11-16 18:26:21 -0500", "subject": "Add linefeed and remove space." }, { "commit": "809f0bbc638cdd95540e2257383147919f82e8f9", "date": "2021-11-16 11:34:53 -0500", "subject": "Add infoBackupLabelExists().", "body": "This is easier to read than using infoBackupDataByLabel() != NULL.\n\nIt also allows an assertion to be added to infoBackupDataByLabel() to ensure that a NULL return value is not used unsafely." }, { "commit": "1e77fc3d75490b7a1b6a0b31be9298c995ec672f", "date": "2021-11-16 10:21:32 -0500", "subject": "Include backup_label and tablespace_map file sizes in log output.", "body": "In cases where they are returned by postgres, include backup_label and tablespace_map file sizes in the backup size value output in the log." }, { "commit": "6b5322cdad7163d91b43d37d9d8eeaa39ac7f214", "date": "2021-11-16 09:27:15 -0500", "subject": "Add findutils package to RHEL 8 documentation container.", "body": "This package was dropped from the most recent Rocky Linux 8 image." }, { "commit": "df89eff429e9b8fbc68d9e9895badf9719fd31d2", "date": "2021-11-15 16:53:41 -0500", "subject": "Fix typos and improve documentation for the tablespace-map-all option." }, { "commit": "fcae9d35038d454c674921c65beb02b195981480", "date": "2021-11-15 16:42:46 -0500", "subject": "Fix parameter test logging in parseOptionIdxValue()." }, { "commit": "b3a5f7a8e27768c445458e47dad626609814fbb7", "date": "2021-11-15 14:32:22 -0500", "subject": "Add tablespace_map file to command/backup test module.", "body": "The code worked fine but better to have explicit tests for this file." }, { "commit": "e62ba8e85eaf469052960c4fd71ffaf26c1a1baa", "date": "2021-11-12 17:15:45 -0500", "subject": "Add path to pgbench used for stress test in user guide.", "body": "This allows the stress test to run on RHEL." }, { "commit": "43cfa9cef776360e592882c0b787704dbeb36cb3", "date": "2021-11-10 12:14:41 -0500", "subject": "Revive archive performance test.", "body": "This test was lost due to a syntax issue in a58635ac.\n\nUpdate the test to use system() to better mimic what postgres does and add logging so pgBackRest timing can be determined." }, { "commit": "dd96c29f963609fad38dac3349d7fa41e40722bb", "date": "2021-11-10 07:53:46 -0500", "subject": "Refactor postgres/client module with inline getters/setters.", "body": "Extend the pattern introduced in 79a2d02 to the postgres/client module." }, { "commit": "afe77e76e0adf948138d797e227a6f4c7d47c2eb", "date": "2021-11-10 07:31:02 -0500", "subject": "Update contributor for 6e635764." }, { "commit": "6e635764a66278d5a8c2b4d30b23063bc3923067", "date": "2021-11-09 13:24:56 -0500", "subject": "Match backup log size with size reported by info command.", "body": "Properly log the size of files copied during the backup, matching the backup size returned from the info command.\r\n\r\nIn the reference issue, the incremental backup after switchover logs the size of all files evaluated rather than only the size of the files copied in the backup." }, { "commit": "d05d6b87142347cb4891304833db389dcf7f9a81", "date": "2021-11-08 09:39:58 -0500", "subject": "Do not delete manifests individually during stanza delete.", "body": "This appears to have been an attempt to not delete files that we don't recognize, but it only works in narrow cases and could leave the user is a position of not being able to complete the stanza delete without manual intervention. It seems better just to proceed with the delete, especially since the info files have already been removed.\n\nIn addition, deleting the manifests individually could be slow on object stores if there were a very large number of backups." }, { "commit": "bb03b3f41942d0b781931092a76877ad309001ef", "date": "2021-11-04 09:44:31 -0400", "subject": "Refactor switch statements in strIdBitFromZN().", "body": "Coverity does not like fall-throughs either to or from the default case so refactor to avoid that." }, { "commit": "676b9d95dd2467d4bddd402b5cd2b4f445c71944", "date": "2021-11-04 08:19:18 -0400", "subject": "Optional parameters for tlsClientNew().", "body": "There are a number of optional parameters with the same type so this makes them easier to track and reduces churn when new ones are added." }, { "commit": "038abaa71d816cc87b382bd81d3df62ddec9455a", "date": "2021-11-03 15:23:08 -0400", "subject": "Display size option default and allowed values with appropriate units.", "body": "Size option default and allowed values were displayed in bytes, which was confusing for the user.\r\n\r\nThis also lays the groundwork for adding units to time options.\r\n\r\nMove option parsing functions into a common module so they can be used from the build module." }, { "commit": "1b93a772369bbb3a936099e0d9d5cc79bad1e0f6", "date": "2021-11-03 12:14:17 -0400", "subject": "Use void * instead of List * to avoid Coverity false positives.", "body": "Coverity complains that this should be \"List\" but that is clearly not correct." }, { "commit": "2a576477b316238473525e56bc8fc8ea5790455f", "date": "2021-11-03 11:36:34 -0400", "subject": "Add --cmd option.", "body": "Allows users to provide an executable to be used when pgbackrest generates command strings that expect to invoke pgbackrest. These generated commands are written to files by pgbackrest, e.g. recovery.conf." }, { "commit": "c5b5b5880619d0994ab4a8feb3f60ab52170b61b", "date": "2021-11-03 10:36:31 -0400", "subject": "Simplify error handler.", "body": "The error handler used a loop to process try, catch, and finally blocks. This worked fine but static analysis tools like Coverity did not understand that the finally block would always run and so there were false positives about double-free, unfreed resource, etc.\r\n\r\nThis implementation removes the loop, which simplifies everything, and makes it clear that the finally block will always run. This cuts down on Coverity false positives.\r\n\r\nThis implementation also catches lack of coverage on empty catch blocks so a few test fixes were committed separately in d74fe7a.\r\n\r\nA small refactor in backup.c is required because gcc 10.3.1 on Fedora 33 complains that the reason variable may be used uninitialized. It's not clear why this is the case, but reducing the scope of the TRY block fixes the issue." }, { "commit": "cff961ede7e41fa8035ffe7451a22eb5ea0e46c1", "date": "2021-11-03 07:38:06 -0400", "subject": "Centralize logic to build value lists during config rendering.", "body": "This reduces duplication and makes it easier to add new types." }, { "commit": "7f6c513be925c77bc6a177408efcf79f624ffc94", "date": "2021-11-03 07:27:26 -0400", "subject": "Add StringId as an option type.", "body": "Rather the converting String to StringIds at runtime, store defaults in StringId format in parse.auto.c and convert user input to StringId during parsing." }, { "commit": "b13844086d419dc3070bcce4e918b2353bf4887c", "date": "2021-11-01 17:35:19 -0400", "subject": "Use cfgOptionStrId() instead of cfgOptionStr() where appropriate.", "body": "The compress-type, repo-type and log-level-* options have allow lists, which means it is more efficient to treat them as StringIds.\r\n\r\nFor compress-type and log-level-* also update the functions that convert them to enums." }, { "commit": "b237d0cd592bbc6c6ee9280fb7aed264bf79eb9d", "date": "2021-11-01 10:43:08 -0400", "subject": "Remove placeholder bz2 helper data.", "body": "This placeholder data should have been removed when bz2 support was added in a021c9fe053." }, { "commit": "f4e281399a81835821547ea5c78ed7a189914d3d", "date": "2021-11-01 10:27:57 -0400", "subject": "Remove unused protocol log level.", "body": "This log level was used in the Perl code but was never ported to C." }, { "commit": "bc352fa6a8cff7cc08b6c7f3cdfac664d2b0805f", "date": "2021-11-01 10:08:56 -0400", "subject": "Simplify strIdFrom*() functions.", "body": "The strIdFrom*() forced the caller to pick an encoding, which led to a number of TRY...CATCH blocks in the code. In practice the caller does not care which encoding is used as long as the string is valid for some encoding.\r\n\r\nUpdate the strIdFrom*() function to try all possible encodings and only throw an error when the string is not valid for any of them." }, { "commit": "a92d7938197d1035e362390ce467ae827cbae051", "date": "2021-11-01 09:11:43 -0400", "subject": "Update automake version.", "body": "There were no changes to install.sh in this version." }, { "commit": "904b897f5e89542784af64b364a49205e7a6e040", "date": "2021-11-01 09:03:42 -0400", "subject": "Begin v2.37 development." }, { "commit": "42fd6ce4e09ee92614cfbfb6766d9c3a6ba9cc1a", "date": "2021-11-01 08:59:14 -0400", "subject": "v2.36: Minor Bug Fixes and Improvements" }, { "commit": "6abb06248c2829f2c27a7a553d373b0fdf70cfc3", "date": "2021-10-29 11:45:50 -0400", "subject": "Make analytics optional for HTML documentation.", "body": "Analytics should only be added to the current HTML documentation on the website, so exclude them by default." }, { "commit": "13366573261bf2562bc93ef77014f7d16b815e5b", "date": "2021-10-29 10:35:56 -0400", "subject": "Restore some linefeed rendering behavior from before def7d513.", "body": "The new rendering behavior is correct in normal cases, but for the pre-rendered HTML blocks in the command and configuration references it causes a lot of churn. This would be OK if the new HTML was diff-able, but it is not.\n\nGo back to the old behavior of using br tags for this case to reduce churn until a more permanent solution is found." }, { "commit": "c32e000ab92e9e9e5495ddec7c4e347c35801570", "date": "2021-10-28 15:15:49 -0400", "subject": "Use Rocky Linux for documentation builds instead of CentOS.", "body": "Since CentOS 8 will be EOL at the end of the year it makes sense to do this now. The centos:8 image is still used in documentation.xml because changes there require manual testing, which will need to be done at a later date. The changes are not user-facing, however, and can be done at any time.\n\nAlso update CentOS references to RHEL since that is what we are emulating for testing purposes." }, { "commit": "30c589ace7a459f3b3d09b702e314efd412e71d6", "date": "2021-10-28 13:28:49 -0400", "subject": "Fix typo in contributing guide.", "body": "Not sure how this got broken but it was probably an errant search and replace." }, { "commit": "2f1a2877373c7be68d553c7f781299edaf8ff196", "date": "2021-10-28 11:49:00 -0400", "subject": "Add missing assert." }, { "commit": "adc09ffc3bccb24c83a471c8af1f9bf68f2cf9c8", "date": "2021-10-28 08:10:43 -0400", "subject": "Minor fix for lower-casing of option summaries.", "body": "This works with existing cases and fixes \"I/O\"." }, { "commit": "fa564ee1969229b5cf60d2479d8ace85325f4db3", "date": "2021-10-27 11:08:32 -0400", "subject": "Improve documentation for cmd-ssh, repo-host-cmd, pg-host-cmd options.", "body": "Use \"command\" instead of \"exe\" and make the descriptions more consistent." }, { "commit": "e1f6c066b3da11fd21b1155c90370c3fa2da06b7", "date": "2021-10-27 10:52:39 -0400", "subject": "Improve documentation for buffer-size option." }, { "commit": "1f7c7b7dda1c736fab2673084498fc7c220b742a", "date": "2021-10-26 16:56:44 -0400", "subject": "Fix test descriptions in common/typeVariantTest." }, { "commit": "d74fe7a222c1e1ae0f02addbeb712f8946d3d731", "date": "2021-10-26 13:53:44 -0400", "subject": "Add coverage for empty CATCH() blocks.", "body": "Currently empty CATCH() blocks are always marked as covered because of the loop structure of error handling.\n\nA prototype implementation of error handling without looping has shown that these CATCH() blocks are not covered without new tests. Whether or not that prototype gets committed it is worth adding the tests." }, { "commit": "e2eea974c144f77448aa9d5fbb55c933b70ea5ad", "date": "2021-10-26 12:09:41 -0400", "subject": "Add assertion for Coverity.", "body": "Coverity thinks this value might be NULL but that should not be possible because of the TRY...CATCH block." }, { "commit": "4f10441574761c9cd4e31cdef750742e004ae669", "date": "2021-10-26 08:25:21 -0400", "subject": "Add missing paragraph tags in coding standards." }, { "commit": "7fb99c59c88fe11c679d6ba7835f995a969462c0", "date": "2021-10-26 07:46:48 -0400", "subject": "Use externed instead of extern'd in comments.", "body": "This is mostly to revert some comment changes in b11ab9f7 that will break the ppc64le patch, but at the same time keep the spelling consistent in all comments and documentation.\n\nAlso revert some space changes for the same reason." }, { "commit": "653ffcf8d98ebfe94ae44ed54b4a295428c57850", "date": "2021-10-25 15:42:28 -0400", "subject": "Adjustments for new breaking change in Azurite.", "body": "Azurite released another breaking change (see fbd018cd, 096829b3, c38d6926, and Azurite issue 1039) so make adjustments as needed to documentation and tests.\n\nAlso remove some dead code that hid the repo-storage-host option and was made obsolete by all these changes." }, { "commit": "13d4559708819787ad05be6f37ec0badb0eccae5", "date": "2021-10-25 15:31:39 -0400", "subject": "Check return value of getsockopt().", "body": "Checking the return value is not terribly important here, but if setsockopt() fails it is likely that bind() will fail as well. May as well get it over with and this makes Coverity happy." }, { "commit": "1152f7a7d64e69eed1d9e74b48a308f6c742c28a", "date": "2021-10-25 12:56:33 -0400", "subject": "Fix mismatched parameters in tlsClientNew() call.", "body": "3879bc69 added this call and the parameters were not quite right but in way that the compiler decided they were OK. It was mostly working but TLS verification was disabled if caPath was NULL, which is not OK." }, { "commit": "a1a2284c881ba6c3b9b1c316b31e0583c006f1af", "date": "2021-10-25 09:01:22 -0400", "subject": "Fix typos in error messages." }, { "commit": "3879bc69b888daa04d2ca98a2d1219cf22519ddc", "date": "2021-10-22 18:31:55 -0400", "subject": "Add WebIdentity authentication for AWS S3.", "body": "This allows credentials to be automatically acquired in an EKS environment." }, { "commit": "51785739f44b624091246c48af6defe97c30d7a7", "date": "2021-10-22 18:02:20 -0400", "subject": "Store config values as a union instead of a variant.", "body": "The variants were needed to easily serialize configurations for the Perl code.\r\n\r\nUnions are more efficient and will allow us to add new types that are not supported by variants, e.g. StringId." }, { "commit": "2cea005f740d640290a9948595f5933833e30e7d", "date": "2021-10-22 17:19:16 -0400", "subject": "Fix segfault on invalid GCS key file." }, { "commit": "cb36fec102855bf268ec5234bbb5261be98bdc61", "date": "2021-10-21 17:48:00 -0400", "subject": "Add analytics to the HTML documentation." }, { "commit": "a63e732987bc1f6f26514568dabc6c0b23df07ab", "date": "2021-10-21 17:25:32 -0400", "subject": "Fix indentation." }, { "commit": "78e1bd333068c4a857054490698115fa2c698e0b", "date": "2021-10-21 17:10:00 -0400", "subject": "Move v1 documentation links out of the introduction.", "body": "There should be few if any users running v1 now so these links do not need to be so prominent." }, { "commit": "861df2a73cafbd49049dccdc55d5214b00dd3cec", "date": "2021-10-21 17:02:46 -0400", "subject": "Add GitHub repository link to index.html and README.md." }, { "commit": "1cb8ae15de5b4276682bdd9825ca97012cd43855", "date": "2021-10-21 13:51:59 -0400", "subject": "Fix incorrect host name in user guide.", "body": "The text indicates to populate the pg-primary IP address into the pg_hba.conf file to allow replication connections. It should indicate to populate the pg-standby IP address" }, { "commit": "b11ab9f799aa6fc32dd03e96e8a0428d5c83d9ae", "date": "2021-10-21 13:31:22 -0400", "subject": "Fix typos." }, { "commit": "8ad6b7330e1ee6bcbc0f06ec0562a433e7888f44", "date": "2021-10-21 09:20:40 -0400", "subject": "Fix outdated comment.", "body": "This check was moved from within the path checks at some point but the comment did not get updated." }, { "commit": "fbd018cd56482efff425beb4026fe22482115138", "date": "2021-10-20 08:22:37 -0400", "subject": "Allow S3/Azure Docker images to be specified in user guide.", "body": "It is not uncommon for the S3/Azure emulators we use to introduce breaking changes without warning. If that happens the documentation can still be built by specifying a working version of the image. In general, it is better to let the version float so we know when things break.\n\nAzurite has yet another breaking change coming up (see 096829b3, c38d6926, and Azurite issue 1039) so set azure-image at the current version until the breaking change has been released." }, { "commit": "5dfdd6dd5b7b43dc3a223b9552aed4052d0db3aa", "date": "2021-10-19 12:45:20 -0400", "subject": "Add -Werror -Wfatal-errors -g flags to configure --enable-test.", "body": "These flags are used for all tests but it was not possible to add them to configure before the change in 046d6643. This is especially important for adhoc tests to ensure the flags are not forgotten.\n\nRemove the flags from test make commands where they were being applied.\n\nThere is no change for production builds." }, { "commit": "046d6643373859c5e848a97e06389ed2aa553723", "date": "2021-10-19 12:14:09 -0400", "subject": "Set most compiler flags as late as possible in configure.", "body": "Some flags, e.g. -Wfatal-errors, will cause tests in configure to behave incorrectly so we have not been able to add them to --enable-test.\n\nAdd the compiler flags as late as possible so configure checks are not affected. This will allow us to add flags that we need for testing without having to explicitly pass them to make." }, { "commit": "e443e3c6c05c9d65a67dac0c8430b59239fbc1b8", "date": "2021-10-19 09:06:06 -0400", "subject": "Add br tags for HTML documentation rendering missed in def7d513." }, { "commit": "4c2d89eb66e11017e2e73ad4171e4493c28acdad", "date": "2021-10-18 16:43:19 -0400", "subject": "Fix typos." }, { "commit": "6cc8e45df68c299990c0ad1f40c53b9282cb46db", "date": "2021-10-18 14:45:36 -0400", "subject": "Add missing paragraph tag in user guide." }, { "commit": "ccc255d3e05d8ce2b6ac251d1498f71b04098a86", "date": "2021-10-18 14:32:41 -0400", "subject": "Add TLS Server.", "body": "The TLS server is an alternative to using SSH for protocol connections to remote hosts.\n\nThis command is currently experimental and intended only for trial and testing. As such, the new commands and options will not show up in the command-line help unless directly requested." }, { "commit": "09fb9393f14b47effebaecc449a97ad07ef4c752", "date": "2021-10-18 14:02:05 -0400", "subject": "Write command configuration overrides explicitly.", "body": "If not written explicitly then it is impossible to distinguish the override from a NULL, which indicates no override." }, { "commit": "90f7f11a9f71152185219bbb57bf1de001e3a91b", "date": "2021-10-18 12:22:48 -0400", "subject": "Add missing static keywords in test modules." }, { "commit": "4570c7e27528400373ece8dc7bd348baf3ff064e", "date": "2021-10-18 11:32:53 -0400", "subject": "Allow error buffer to be resized for testing.", "body": "Some tests can generate very large error messages for diffs and they often get cut off before the end.\n\nAlso fix a test so it does not create too large a buffer on the stack." }, { "commit": "838ee3bd08c739e3dcf611e9bddfaa6c8acbb2aa", "date": "2021-10-18 11:05:53 -0400", "subject": "Increase some storage test timeouts.", "body": "32-bit Debian 9 is sometimes timing out on these tests so increase the timeouts to make the tests more reliable." }, { "commit": "6b9e19d423d99d3063c4bff3d3533b1e5081e4cb", "date": "2021-10-16 12:35:47 -0400", "subject": "Convert configuration optional rules to pack format.", "body": "The previous format was custom for configuration parsing and was not as expressive as the pack format. An immediate benefit is that commands with the same optional rules are merged.\n\nDefaults are now represented correctly (not multiplied), which simplifies the option default functions used by help." }, { "commit": "360cff94e4e9e1ab5a690a1f5c38eb278158a892", "date": "2021-10-16 12:33:31 -0400", "subject": "Update 32-bit test container to Debian 9.", "body": "Also rebalance PostgreSQL version integration tests." }, { "commit": "0e84c19a9fde0480b30078f5d3b419267b2f7673", "date": "2021-10-15 17:50:54 -0400", "subject": "Remove allow range from pg-socket-path option.", "body": "The allow range was never processed because the string type does not allow ranges, but it is wasteful to have it in the parse rules.\n\nIt would be good if auto-generation errored on useless allow ranges, but that will need wait since it does not impact production." }, { "commit": "144469b9772bad14466fcafc65edef58c5366755", "date": "2021-10-15 15:50:55 -0400", "subject": "Add const buffer functions to Pack type.", "body": "These allow packs to be created without allocating a buffer in the case that the buffer already exists or the data is in a global constant.\n\nAlso fix a rendering issue in hrnPackReadToStr()." }, { "commit": "66bfd1327e56f0f2de99fc6009431f3ee06ad6b8", "date": "2021-10-13 19:48:41 -0400", "subject": "Rename SSH connection control parameters in integration tests." }, { "commit": "447b24309d02938d04e036ec7814e75982210eb4", "date": "2021-10-13 19:43:40 -0400", "subject": "Update RHEL package URL." }, { "commit": "01b20724daf4c5cb25d6f636fb90456759773d22", "date": "2021-10-13 19:36:59 -0400", "subject": "Rename PostgreSQL pid file constants and tests." }, { "commit": "570162040864b8c56b236aa66d8f5c8d610b754b", "date": "2021-10-13 19:02:58 -0400", "subject": "Rename manifest file primary flag in tests." }, { "commit": "a44f9e373b47354a09bec0eaf2f3bf9e261c6941", "date": "2021-10-13 13:21:04 -0400", "subject": "Update Vagrantfile to Ubuntu 20.04." }, { "commit": "b16e827d69364408ea687a8e4b8894f7e889792e", "date": "2021-10-13 13:20:11 -0400", "subject": "Do not show output of PostgreSQL upgrade status check in user guide.", "body": "On some platforms the output may contain UTF-8 characters that the latex code is not prepared to handle.\n\nShowing the command is much more important than showing the output, so no big loss." }, { "commit": "5e84645ac030544b572036c08d055885d96d8905", "date": "2021-10-13 12:16:47 -0400", "subject": "Update comments referring to the PostgreSQL primary." }, { "commit": "430efff98a5b8dcf7c048f383abc12d9c0e5bbf0", "date": "2021-10-13 12:01:53 -0400", "subject": "Update documentation/links to main branch." }, { "commit": "1212668d5eff51756b0719b5296f7640e8096605", "date": "2021-10-13 11:43:14 -0400", "subject": "Update contributing.xml with rendering changes from def7d513.", "body": "Also update help.xml path missed in f4e1babf." }, { "commit": "90c73183ea5de6a63a23a9047ae2debb3f59b940", "date": "2021-10-13 09:37:03 -0400", "subject": "Add libc6-dbg required by updated valgrind to Vagrantfile/Dockerfile." }, { "commit": "c2d4552b7328489d703dc03defa769b1ccb8f739", "date": "2021-10-13 08:51:58 -0400", "subject": "Add debug options to code generation make in test.pl." }, { "commit": "bd91ebca759d2d6cfc2f7aa660366f5f7f09994a", "date": "2021-10-12 16:16:05 -0400", "subject": "Remove command overrides for output options.", "body": "The overrides are not needed since both commands require the same default and allow list." }, { "commit": "e8e346bc8738815b07bd80a37ace862fdab3dc1d", "date": "2021-10-12 08:53:12 -0400", "subject": "Remove command overrides for restore-only options.", "body": "The overrides are not needed since these options are only valid for one command." }, { "commit": "576b04763477877d4f2a61ad692703e20471901b", "date": "2021-10-11 16:25:36 -0400", "subject": "Invert required in set option to simplify generated rules." }, { "commit": "980b777a4a0cc07200ccb06cc55b89181101b266", "date": "2021-10-09 12:39:54 -0400", "subject": "Fix indentation." }, { "commit": "cc7f2eea900d0a7a429ffdc2cd45ead88c0298a9", "date": "2021-10-09 12:37:25 -0400", "subject": "Add assert in pckReadNext() to prevent reading into a field." }, { "commit": "610bfd736ef091a8298e5602d41aba86f10189bb", "date": "2021-10-09 12:34:45 -0400", "subject": "Increase tolerance for 0ms sleep in common/time test." }, { "commit": "7ab8dcbe6e007831e8a7f9f26e08f083c1026388", "date": "2021-10-09 12:15:19 -0400", "subject": "Read tag size in pckReadTagNext().", "body": "Rather than reading the size everywhere it is needed, get it when the tag is read, if it exists.\n\nThis simplifies logic around consuming the data when not needed. There are more use cases for this coming up." }, { "commit": "ed68792e765411a994d8ac79e4d047bbafc25582", "date": "2021-10-07 19:57:28 -0400", "subject": "Rename strNewN() to strNewZN().", "body": "Make the function name consistent with other functions that accept zero-terminated strings, e.g. strNewZ() and strCatZN()." }, { "commit": "b7e17d80ea02d70e56327828310c578af51795b5", "date": "2021-10-07 19:43:28 -0400", "subject": "More efficient memory allocation for Strings and String Variants.", "body": "The vast majority of Strings are never modified so for most cases allocate memory for the string with the object. This results in one allocation in most cases instead of two. Use strNew() if strCat*() functions are needed.\n\nUpdate varNewStr() in the same way since String Variants can never be modified. This results in one allocation in all cases instead of three. Also update varNewStrZ() to use STR() instead of strNewZ() to save two more allocations." }, { "commit": "208641ac7fd22f676a55e7305b3e69df574f36f8", "date": "2021-10-07 18:50:56 -0400", "subject": "Use constant string for user/group in performance/type test.", "body": "It is not safe to return strings created with STRDEF() from a function." }, { "commit": "74d3131830646c9f71ffa9847729cae40e3aa866", "date": "2021-10-07 14:58:11 -0400", "subject": "More efficient generation of diff/incr backup label." }, { "commit": "498902e885c9d7a44648ccc01b20454094d5b742", "date": "2021-10-07 12:18:24 -0400", "subject": "Allow \"global\" as a stanza prefix.", "body": "A stanza name like global_stanza was not allowed because the code was not selective enough about how a global section should be formatted.\r\n\r\nUpdate the config parser to correctly recognize global sections." }, { "commit": "338102861fd0ea4d2773b010dee34a39a96ad702", "date": "2021-10-07 11:01:48 -0400", "subject": "Improve instructions for rebuilding pgbackrest during stress testing." }, { "commit": "fb3f6928c9aef499938e195fbb612c1940a2dc19", "date": "2021-10-06 19:27:04 -0400", "subject": "Add configurable storage helpers to create repository storage.", "body": "Remove the hardcoded storage helpers from storageRepoGet() except for the the built-in Posix helper and the special remote helper.\n\nThe goal is to make storage driver development a bit easier by isolating as much of the code as possible into the driver module. This also makes coverage reporting much simpler for additional drivers since they do not need to provide coverage for storage/helper.\n\nConsolidate the CIFS tests into the Posix tests since CIFS is just a special case of the Posix.\n\nTest all storage features in the Posix test so that other storage driver tests do not need to provide coverage for storage/storage.\n\nRemove some dead code in the storage/s3 test." }, { "commit": "cfd823355af2ac99f30c5e1393a121c6dbf622b7", "date": "2021-10-06 12:38:56 -0400", "subject": "Refactor S3 storage driver for additional auth methods.", "body": "Currently only two auth methods are supported and a lot of refactoring is required to add a third one.\n\nDo the refactoring now to reduce noise in the commit that adds the third auth method." }, { "commit": "68c5f3eaf18fc9bd10fde15a323890bcdcbf4534", "date": "2021-10-05 17:59:05 -0400", "subject": "Allow link-map option to create new links.", "body": "Currently link-map only allows links that exist in the backup manifest to be remapped to a new destination.\r\n\r\nAllow link-map to create a new link as long as a valid path/file from the backup is referenced." }, { "commit": "f2aeb30fc706c04d6200cefeeb2645229a31ff69", "date": "2021-10-05 14:06:59 -0400", "subject": "Add state to ProtocolClient.", "body": "This is currently only useful for debugging, but in the future the state may be used for resetting the protocol when something goes wrong." }, { "commit": "2c65fed80f47124283d4f8be92f987dc55237f48", "date": "2021-10-05 12:29:16 -0400", "subject": "Add missing asserts and move temp mem context block." }, { "commit": "6af827cbb1e78cd4c5d649ed4cb24c49a7204b8f", "date": "2021-10-05 09:00:16 -0400", "subject": "Report original error and retries on local job failure.", "body": "The local process will retry jobs (e.g. backup file) but after a certain number of failures gives up. Previously, the last error was reported but generally the first error is far more valuable. The last error is likely to be a cascade failure such as the protocol being out of sync.\r\n\r\nReport the first error (and stack trace) and append the retry errors to the first error without stack trace information." }, { "commit": "34f78734325743b4e34cb39224852e6debf49750", "date": "2021-10-04 13:45:53 -0400", "subject": "Report backup file validation errors in backup.info.", "body": "Currently errors found during the backup are only available in text output when specifying --set.\r\n\r\nAdd a flag to backup.info that is available in both the text and json output when --set is not specified. This at least provides the basic info that an error was found in the cluster during the backup, though details are still only available as described above." }, { "commit": "57c62315465972f6b85558020198134e34cf2ee0", "date": "2021-10-02 17:27:33 -0400", "subject": "Add arm64 testing on Cirrus CI.", "body": "These tests run in a container without permissions to mount tempfs, so add an option to ci.pl to not create tempfs. Also add some packages not in the base image." }, { "commit": "f1ed8f0e5112d1a74d86168e67632be55eddb416", "date": "2021-10-02 16:29:31 -0400", "subject": "Sort WAL segment names when reporting duplicates.", "body": "Make the output consistent even when files are listed in a different order. This is purely for testing purposes, but there is no harm in consistent output.\n\nFound on arm64." }, { "commit": "71047a9d6d1eea71b3fbd430983541a54049cc69", "date": "2021-10-02 16:17:33 -0400", "subject": "Use strncpy() to limit characters copied to optionName.", "body": "Valgrind complained about uninitialized values on arm64 when comparing the reset prefix, probably because \"reset\" ended up being larger than the option name: Conditional jump or move depends on uninitialised value(s) at cfgParseOption (parse.c:568).\n\nCoverity complained because it could not verify the size of the string to be copied into optionName, probably because it does not understand the purpose of strSize(): You might overrun the 65-character fixed-size string \"optionName\" by copying the return value of \"strZ\" without checking the length.\n\nUse strncpy() even though we have already checked the size and make sure the string is terminated. Keep the size check because searching for truncated option names is not a good idea.\n\nThis is not a production bug since the code has not been released yet." }, { "commit": "b792a14cd7dbdfb61362700ffc5fc01997db890c", "date": "2021-10-01 18:23:03 -0400", "subject": "Use temp mem context when calling command handlers.", "body": "It is safer and more efficient to free memory after each handler completes.\n\nThe db command handlers use the server context so update them to use the top context." }, { "commit": "ae40ed6ec9cf77e577518b128518a5763767f589", "date": "2021-10-01 17:15:36 -0400", "subject": "Add jobRetry parameter to HRN_CFG_LOAD().", "body": "Allow the default of 0 to be overridden to test retry behavior for commands." }, { "commit": "136d309dd4bc1ada9f3d775f036b62292fda390b", "date": "2021-10-01 15:29:31 -0400", "subject": "Allow stack trace to be specified for errorInternalThrow().", "body": "This allows the stack trace to be set when an error is received by the protocol, rather than appending it to the message. Now these errors will look no different than any other error and the stack trace will be reported in the same way.\n\nOne immediate benefit is that test.pl --vm-out --log-level-test=debug will work for tests that check expect log results. Previously, the test would error at the first check because the stack trace included in the message would not match the expected log output." }, { "commit": "62f6fbe2a9ecd9bc48611ecfaf27a05b9f36a87d", "date": "2021-10-01 10:15:34 -0400", "subject": "Update file mode in info/manifest test to 0600.", "body": "0400 is not a very realistic mode. It may have become the default due to copy-pasting." }, { "commit": "0690cb25a077735780b2fe24343c946f8a4efbc6", "date": "2021-09-30 17:55:38 -0400", "subject": "Remove repository format 6 notes.", "body": "The notes have been moved to a Github project." }, { "commit": "376362475e3b3b13b70313d9002d4ff8b25b4b40", "date": "2021-09-30 16:15:45 -0400", "subject": "Move archive-header-check option to the archive reference section." }, { "commit": "cf1a57518fe3230886509f59fcec9c9a81e6513c", "date": "2021-09-30 14:29:49 -0400", "subject": "Refactor restoreManifestMap() to be driven by link-map.", "body": "This will allow new links to be added in a future commit. The current implementation is driven by the links that already exist in the manifest, which would make the new use case more complex to implement.\n\nAlso, add a more helpful error when a tablespace link is specified." }, { "commit": "d89a67776cfbb7b3047dbe297cc0c768e5c670e8", "date": "2021-09-30 13:39:29 -0400", "subject": "Refactor restoreManifestMap() tests in the command/restore unit.", "body": "Add test titles, new tests, and rearrange.\n\nAlso manifestTargetFindDefault(), which will soon be used by core code in a refactoring commit." }, { "commit": "7a53ba7c7f38bff2c9ef99c6ab58d22c59dd2290", "date": "2021-09-30 13:28:14 -0400", "subject": "Add note to comment for int64 typedef." }, { "commit": "815377cc6009c800b6a5fdc1fe98ddfceaaae824", "date": "2021-09-30 13:27:14 -0400", "subject": "Finalize catalog number for PostgreSQL 14 release." }, { "commit": "baf186bfb05ca683714a59485900892f5e7e8a1b", "date": "2021-09-29 12:03:01 -0400", "subject": "Fix comment typos." }, { "commit": "9e79f0e64b661e944ce2b3897c366feea1544ac2", "date": "2021-09-29 10:31:51 -0400", "subject": "Add recovery start time to online backup restore log.", "body": "This helps give an idea of how much recovery needs to be done to reach the end of the WAL stream and is easier to read than the backup label." }, { "commit": "9346895f5b61627b50d431f09447e08d8a50caa8", "date": "2021-09-29 09:58:47 -0400", "subject": "Rename page checksum error to error list in info text output.", "body": "\"error list\" makes it clearer that other errors may be reported. For example, if checksum-page is true in the manifest but no checksum-page-error list is provided then the error is in alignment, i.e. the file size is not a multiple of the page size, with allowances made for a valid-looking partial page at the end of the file.\r\n\r\nIt is still not possible to differentiate between alignment and page checksum errors in the output but this will be addressed in a future commit." }, { "commit": "b7ef12a76f219881d0b24c715592af96fe5c9b8f", "date": "2021-09-28 15:55:13 -0400", "subject": "Add hints to standby replay timeout message." }, { "commit": "096829b3b257444162417662612e80d8cb2ac6ec", "date": "2021-09-27 09:01:53 -0400", "subject": "Add repo-azure-uri-style option.", "body": "Azurite introduced a breaking change in 8f63964e to use automatically host-style URIs when the endpoint appears to be a multipart hostname.\n\nThis option allows the user to configure which style URI will be used, but changing the endpoint might cause breakage if Azurite decides to use a different style. Future changes to Azurite may also cause breakage." }, { "commit": "c8ea17c68f8fa72f2bf3b979539be3f709448493", "date": "2021-09-24 17:40:31 -0400", "subject": "Convert page checksum filter result to a pack.", "body": "The pack is both more compact and more efficient than a variant.\n\nAlso aggregate the page error info in the main process rather than in the filter to allow additional LSN filtering, to be added in a future commit." }, { "commit": "ac1f6db4a25520b1ee957b66925dde2e1ef156ab", "date": "2021-09-23 14:06:00 -0400", "subject": "Centralize and optimize tag stack management.", "body": "The push and pop code was duplicated in four places, so centralize the code into pckTagStackPop() and pckTagStackPush().\n\nAlso create a default bottom item for the stack to avoid allocating a list if there will only ever be the default container, which is very common. This avoids the extra time and memory to allocate a list." }, { "commit": "15e7ff10d3d6fe3570335a5abec5ff683c07e2e6", "date": "2021-09-23 08:31:32 -0400", "subject": "Add Pack pseudo-type.", "body": "Rather than working directly with Buffer types, define a new Pack pseudo-type that represents a Buffer containing a pack. This makes it clearer that a pack is being stored and allows stronger typing." }, { "commit": "131ac0ab5e98500569d4ff6985d31c55a9ef53b9", "date": "2021-09-22 11:18:12 -0400", "subject": "Rename pckReadNew()/pckWriteNew() to pckReadNewIo()/pckWriteNewIo().", "body": "These names more accurately describe the purpose of the constructors." }, { "commit": "0e76ccb5b7b2089f8d0300ab0086c454aebbbbbf", "date": "2021-09-22 10:48:21 -0400", "subject": "Convert filter param/result to Pack type.", "body": "The Pack type is more compact and flexible than the Variant type. The Pack type also allows binary data to be stored, which is useful for transferring the passphrase in the CipherBlock filter.\n\nThe primary purpose is to allow more (and more complex) result data to be returned efficiently from the PageChecksum filter. For now the PageChecksum filter still returns the original Variant. Converting the result data will be the subject of a future commit.\n\nAlso convert filter types to StringId." }, { "commit": "802373cb9df28384529fe5a7bd102bfe5c8f3911", "date": "2021-09-21 10:16:16 -0400", "subject": "Limit valgrind error output to the first error.", "body": "Generally the first error is the only important error. The rest simply lead to a lot of scrolling." }, { "commit": "473afce57bc7646c53bae4a6300b41b11b5b0357", "date": "2021-09-20 11:03:50 -0400", "subject": "Copy data page before verifying checksum.", "body": "Using UNCONSTIFY() is potentially dangerous since the buffer is modified while calculating the checksum, even though the page is reverted to the original state. Instead make a copy to ensure that the original data is never modified.\n\nThis requires the logic to be shuffled a bit since the copy cannot be made until we are sure the page is complete." }, { "commit": "0efb8adb9452cb8bc67ebaa8b6dca0d4a69c1682", "date": "2021-09-19 20:38:51 -0400", "subject": "Automatically include all PostgreSQL version interface files." }, { "commit": "95d814cf81b84d162f40717346c3ad0cb642f724", "date": "2021-09-19 20:32:27 -0400", "subject": "Specify size for helpData array." }, { "commit": "912a498b0bcc988bd5ebaaa22198956d752251bd", "date": "2021-09-11 16:07:59 -0400", "subject": "Skip comments when rendering help output.", "body": "Comments should not appear in the help. They are simply notes on implementation." }, { "commit": "c38d6926d6c9aa01b895a28e66fc0aa6965350a3", "date": "2021-09-09 08:48:45 -0400", "subject": "Revert Azurite version for testing to 3.14.0.", "body": "3.14.2 is causing breakage in the documentation. There is no obvious cause so for now just revert to the last working version." }, { "commit": "f4e1babf6b4ce7087ace8221cac7cadb51488f0e", "date": "2021-09-08 18:16:06 -0400", "subject": "Migrate command-line help generation to C.", "body": "Command-line help is now generated at build time so it does not need to be committed. This reduces churn on commits that add configuration and/or update the help.\n\nSince churn is no longer an issue, help.auto.c is bzip2 compressed to save space in the binary.\n\nThe Perl config parser (Data.pm) has been moved to doc/lib since the Perl build path is no longer required.\n\nLikewise doc/xml/reference.xml has been moved to src/build/help/help.xml since it is required at build time." }, { "commit": "def7d513cdd2d4579acf6e8c675a3d6f7da4f655", "date": "2021-09-08 17:35:45 -0400", "subject": "Eliminate linefeed formatting from documentation.", "body": "Linefeeds were originally used in the place of

    tags to denote a paragraph. While much of the linefeed usage has been replaced over time, there were many places where it was still being used, especially in reference.xml. This made it difficult to get consistent formatting across different output types. In particular there were formatting issues in the command-line help because it is harder to audit than HTML or PDF.\n\nReplace linefeed formatting with proper

    tags to make formatting more consistent.\n\nRemove double spaces in all text where

    tags were added since it does not add churn.\n\nUpdate all