pax_global_header00006660000000000000000000000064136160560460014521gustar00rootroot0000000000000052 comment=a7490b56d182e7b8f8cdca083e05c01aab2d207c borgmatic-1.5.1/000077500000000000000000000000001361605604600134745ustar00rootroot00000000000000borgmatic-1.5.1/.dockerignore000066400000000000000000000000121361605604600161410ustar00rootroot00000000000000.git .tox borgmatic-1.5.1/.drone.yml000066400000000000000000000044351361605604600154120ustar00rootroot00000000000000--- kind: pipeline name: python-3-5-alpine-3-10 services: - name: postgresql image: postgres:11.6-alpine environment: POSTGRES_PASSWORD: test POSTGRES_DB: test - name: mysql image: mariadb:10.3 environment: MYSQL_ROOT_PASSWORD: test MYSQL_DATABASE: test steps: - name: build image: python:3.5-alpine3.10 pull: always commands: - scripts/run-full-tests --- kind: pipeline name: python-3-6-alpine-3-10 services: - name: postgresql image: postgres:11.6-alpine environment: POSTGRES_PASSWORD: test POSTGRES_DB: test - name: mysql image: mariadb:10.3 environment: MYSQL_ROOT_PASSWORD: test MYSQL_DATABASE: test steps: - name: build image: python:3.6-alpine3.10 pull: always commands: - scripts/run-full-tests --- kind: pipeline name: python-3-7-alpine-3-10 services: - name: postgresql image: postgres:11.6-alpine environment: POSTGRES_PASSWORD: test POSTGRES_DB: test - name: mysql image: mariadb:10.3 environment: MYSQL_ROOT_PASSWORD: test MYSQL_DATABASE: test steps: - name: build image: python:3.7-alpine3.10 pull: always commands: - scripts/run-full-tests --- kind: pipeline name: python-3-7-alpine-3-7 services: - name: postgresql image: postgres:10.11-alpine environment: POSTGRES_PASSWORD: test POSTGRES_DB: test - name: mysql image: mariadb:10.1 environment: MYSQL_ROOT_PASSWORD: test MYSQL_DATABASE: test steps: - name: build image: python:3.7-alpine3.7 pull: always commands: - scripts/run-full-tests --- kind: pipeline name: python-3-8-alpine-3-10 services: - name: postgresql image: postgres:11.6-alpine environment: POSTGRES_PASSWORD: test POSTGRES_DB: test - name: mysql image: mariadb:10.3 environment: MYSQL_ROOT_PASSWORD: test MYSQL_DATABASE: test steps: - name: build image: python:3.8-alpine3.10 pull: always commands: - scripts/run-full-tests --- kind: pipeline name: documentation steps: - name: build image: plugins/docker settings: username: from_secret: docker_username password: from_secret: docker_password repo: witten/borgmatic-docs dockerfile: docs/Dockerfile when: branch: - master borgmatic-1.5.1/.eleventy.js000066400000000000000000000024121361605604600157420ustar00rootroot00000000000000const pluginSyntaxHighlight = require("@11ty/eleventy-plugin-syntaxhighlight"); const inclusiveLangPlugin = require("@11ty/eleventy-plugin-inclusive-language"); module.exports = function(eleventyConfig) { eleventyConfig.addPlugin(pluginSyntaxHighlight); eleventyConfig.addPlugin(inclusiveLangPlugin); let markdownIt = require("markdown-it"); let markdownItAnchor = require("markdown-it-anchor"); let markdownItReplaceLink = require("markdown-it-replace-link"); let markdownItOptions = { html: true, breaks: false, linkify: true, replaceLink: function (link, env) { if (process.env.NODE_ENV == "production") { return link; } return link.replace('https://torsion.org/borgmatic/', 'http://localhost:8080/'); } }; let markdownItAnchorOptions = { permalink: true, permalinkClass: "direct-link" }; eleventyConfig.setLibrary( "md", markdownIt(markdownItOptions) .use(markdownItAnchor, markdownItAnchorOptions) .use(markdownItReplaceLink) ); eleventyConfig.addPassthroughCopy({"docs/static": "static"}); return { templateFormats: [ "md", "txt" ] } }; borgmatic-1.5.1/.gitea/000077500000000000000000000000001361605604600146435ustar00rootroot00000000000000borgmatic-1.5.1/.gitea/issue_template.md000066400000000000000000000014501361605604600202100ustar00rootroot00000000000000#### What I'm trying to do and why #### Steps to reproduce (if a bug) Include (sanitized) borgmatic configuration files if applicable. #### Actual behavior (if a bug) Include (sanitized) `--verbosity 2` output if applicable. #### Expected behavior (if a bug) #### Other notes / implementation ideas #### Environment **borgmatic version:** [version here] Use `sudo borgmatic --version` or `sudo pip show borgmatic | grep ^Version` **borgmatic installation method:** [e.g., Debian package, Docker container, etc.] **Borg version:** [version here] Use `sudo borg --version` **Python version:** [version here] Use `python3 --version` **Database version (if applicable):** [version here] Use `psql --version` or `mysql --version` on client and server. **operating system and version:** [OS here] borgmatic-1.5.1/.gitignore000066400000000000000000000001501361605604600154600ustar00rootroot00000000000000*.egg-info *.pyc *.swp .cache .coverage .pytest_cache .tox __pycache__ build/ dist/ pip-wheel-metadata/ borgmatic-1.5.1/AUTHORS000066400000000000000000000011671361605604600145510ustar00rootroot00000000000000Dan Helfman : Main developer Alexander Görtz: Python 3 compatibility Florian Lindner: Logging rewrite Henning Schroeder: Copy editing Johannes Feichtner: Support for user hooks Michele Lazzeri: Custom archive names Nick Whyte: Support prefix filtering for archive consistency checks newtonne: Read encryption password from external file Robin `ypid` Schneider: Support additional options of Borg and add validate-borgmatic-config command Scott Squires: Custom archive names Thomas LÉVEIL: Support for a keep_minutely prune option. Support for the --json option And many others! See the output of "git log". borgmatic-1.5.1/LICENSE000066400000000000000000001044621361605604600145100ustar00rootroot00000000000000GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. {one line to give the program's name and a brief idea of what it does.} Copyright (C) {year} {name of author} This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: {project} Copyright (C) {year} {fullname} This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . borgmatic-1.5.1/MANIFEST.in000066400000000000000000000000721361605604600152310ustar00rootroot00000000000000include borgmatic/config/schema.yaml graft sample/systemd borgmatic-1.5.1/NEWS000066400000000000000000000666031361605604600142060ustar00rootroot000000000000001.5.1 * #289: Tired of looking up the latest successful archive name in order to pass it to borgmatic actions? Me too. Now you can specify "--archive latest" to all actions that accept an archive flag. * #290: Fix the "--stats" and "--files" flags so that they yield output at verbosity 0. * Reduce the default verbosity of borgmatic logs sent to Healthchecks monitoring hook. Now, it's warnings and errors only. You can increase the verbosity via the "--monitoring-verbosity" flag. * Add security policy documentation in SECURITY.md. 1.5.0 * #245: Monitor backups with PagerDuty hook integration. See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook * #255: Add per-action hooks: "before_prune", "after_prune", "before_check", and "after_check". * #274: Add ~/.config/borgmatic.d as another configuration directory default. * #277: Customize Healthchecks log level via borgmatic "--monitoring-verbosity" flag. * #280: Change "exclude_if_present" option to support multiple filenames that indicate a directory should be excluded from backups, rather than just a single filename. * #284: Backup to a removable drive or intermittent server via "soft failure" feature. See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server/ * #287: View consistency check progress via "--progress" flag for "check" action. * For "create" and "prune" actions, no longer list files or show detailed stats at any verbosities by default. You can opt back in with "--files" or "--stats" flags. * For "list" and "info" actions, show repository names even at verbosity 0. 1.4.22 * #276, #285: Disable colored output when "--json" flag is used, so as to produce valid JSON ouput. * After a backup of a database dump in directory format, properly remove the dump directory. * In "borgmatic --help", don't expand $HOME in listing of default "--config" paths. 1.4.21 * #268: Override particular configuration options from the command-line via "--override" flag. See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/#configuration-overrides * #270: Only trigger "on_error" hooks and monitoring failures for "prune", "create", and "check" actions, and not for other actions. * When pruning with verbosity level 1, list pruned and kept archives. Previously, this information was only shown at verbosity level 2. 1.4.20 * Fix repository probing during "borgmatic init" to respect verbosity flag and remote_path option. * #249: Update Healthchecks/Cronitor/Cronhub monitoring integrations to fire for "check" and "prune" actions, not just "create". 1.4.19 * #259: Optionally change the internal database dump path via "borgmatic_source_directory" option in location configuration section. * #271: Support piping "borgmatic list" output to grep by logging certain log levels to console stdout and others to stderr. * Retain colored output when piping or redirecting in an interactive terminal. * Add end-to-end tests for database dump and restore. These are run on developer machines with Docker Compose for approximate parity with continuous integration tests. 1.4.18 * Fix "--repository" flag to accept relative paths. * Fix "borgmatic umount" so it only runs Borg once instead of once per repository / configuration file. * #253: Mount whole repositories via "borgmatic mount" without any "--archive" flag. * #269: Filter listed paths via "borgmatic list --path" flag. 1.4.17 * #235: Pass extra options directly to particular Borg commands, handy for Borg options that borgmatic does not yet support natively. Use "extra_borg_options" in the storage configuration section. * #266: Attempt to repair any inconsistencies found during a consistency check via "borgmatic check --repair" flag. 1.4.16 * #256: Fix for "before_backup" hook not triggering an error when the command contains "borg" and has an exit code of 1. * #257: Fix for garbled Borg file listing when using "borgmatic create --progress" with verbosity level 1 or 2. * #260: Fix for missing Healthchecks monitoring payload or HTTP 500 due to incorrect unicode encoding. 1.4.15 * Fix for database dump removal incorrectly skipping some database dumps. * #123: Support for mounting an archive as a FUSE filesystem via "borgmatic mount" action, and unmounting via "borgmatic umount". See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/extract-a-backup/#mount-a-filesystem 1.4.14 * Show summary log errors regardless of verbosity level, and log the "summary:" header with a log level based on the contained summary logs. 1.4.13 * Show full error logs at "--verbosity 0" so you can see command output without upping the verbosity level. 1.4.12 * #247: With "borgmatic check", consider Borg warnings as errors. * Dial back the display of inline error logs a bit, so failed command output doesn't appear multiple times in the logs (well, except for the summary). 1.4.11 * #241: When using the Healthchecks monitoring hook, include borgmatic logs in the payloads for completion and failure pings. * With --verbosity level 1 or 2, show error logs both inline when they occur and in the summary logs at the bottom. With lower verbosity levels, suppress the summary and show error logs when they occur. 1.4.10 * #246: Fix for "borgmatic restore" showing success and incorrectly extracting archive files, even when no databases are configured to restore. As this can overwrite files from the archive and lead to data loss, please upgrade to get the fix before using "borgmatic restore". * Reopen the file given by "--log-file" flag if an external program rotates the log file while borgmatic is running. 1.4.9 * #228: Database dump hooks for MySQL/MariaDB, so you can easily dump your databases before backups run. * #243: Fix repository does not exist error with "borgmatic extract" when repository is remote. 1.4.8 * Monitor backups with Cronhub hook integration. See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook * Fix Healthchecks/Cronitor hooks to skip actions when the borgmatic "--dry-run" flag is used. 1.4.7 * #238: In documentation, clarify when Healthchecks/Cronitor hooks fire in relation to other hooks. * #239: Upgrade your borgmatic configuration to get new options and comments via "generate-borgmatic-config --source". See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-your-configuration 1.4.6 * Verbosity level "-1" for even quieter output: Errors only (#236). 1.4.5 * Log to file instead of syslog via command-line "--log-file" flag (#233). 1.4.4 * #234: Support for Borg --keep-exclude-tags and --exclude-nodump options. 1.4.3 * Monitor backups with Cronitor hook integration. See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook 1.4.2 * Extract files to a particular directory via "borgmatic extract --destination" flag. * Rename "borgmatic extract --restore-path" flag to "--path" to reduce confusion with the separate "borgmatic restore" action. Any uses of "--restore-path" will continue working. 1.4.1 * #229: Restore backed up PostgreSQL databases via "borgmatic restore" action. See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/backup-your-databases/ * Documentation on how to develop borgmatic's documentation: https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/#documentation-development 1.4.0 * #225: Database dump hooks for PostgreSQL, so you can easily dump your databases before backups run. * #230: Rename "borgmatic list --pattern-from" flag to "--patterns-from" to match Borg. 1.3.26 * #224: Fix "borgmatic list --successful" with a slightly better heuristic for listing successful (non-checkpoint) archives. 1.3.25 * #223: Dead man's switch to detect when backups start failing silently, implemented via healthchecks.io hook integration. See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook * Documentation on monitoring and alerting options for borgmatic backups: https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/ * Automatically rewrite links when developing on documentation locally. 1.3.24 * #86: Add "borgmatic list --successful" flag to only list successful (non-checkpoint) archives. * Add a suggestion form to all documentation pages, so users can submit ideas for improving the documentation. * Update documentation link to community Arch Linux borgmatic package. 1.3.23 * #174: More detailed error alerting via runtime context available in "on_error" hook. 1.3.22 * #144: When backups to one of several repositories fails, keep backing up to the other repositories and report errors afterwards. 1.3.21 * #192: User-defined hooks for global setup or cleanup that run before/after all actions. See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/ 1.3.20 * #205: More robust sample systemd service: boot delay, network dependency, lowered CPU/IO priority, etc. * #221: Fix "borgmatic create --progress" output so that it updates on the console in real-time. 1.3.19 * #219: Fix visibility of "borgmatic prune --stats" output. 1.3.18 * #220: Fix regression of argument parsing for default actions. 1.3.17 * #217: Fix error with "borgmatic check --only" command-line flag with "extract" consistency check. 1.3.16 * #210: Support for Borg check --verify-data flag via borgmatic "data" consistency check. * #210: Override configured consistency checks via "borgmatic check --only" command-line flag. * When generating sample configuration with generate-borgmatic-config, add a space after each "#" comment indicator. 1.3.15 * #208: Fix for traceback when the "checks" option has an empty value. * #209: Bypass Borg error about a moved repository via "relocated_repo_access_is_ok" option in borgmatic storage configuration section. * #213: Reorder arguments passed to Borg to fix duplicate directories when using Borg patterns. * #214: Fix for hook erroring with exit code 1 not being interpreted as an error. 1.3.14 * #204: Do not treat Borg warnings (exit code 1) as failures. * When validating configuration files, require strings instead of allowing any scalar type. 1.3.13 * #199: Add note to documentation about using spaces instead of tabs for indentation, as YAML does not allow tabs. * #203: Fix compatibility with ruamel.yaml 0.16.x. * If a "prefix" option in borgmatic's configuration has an empty value (blank or ""), then disable default prefix. 1.3.12 * Only log to syslog when run from a non-interactive console (e.g. a cron job). * Remove unicode byte order mark from syslog output so it doesn't show up as a literal in rsyslog output. See discussion on #197. 1.3.11 * #193: Pass through several "borg list" and "borg info" flags like --short, --format, --sort-by, --first, --last, etc. via borgmatic command-line flags. * Add borgmatic info --repository and --archive command-line flags to display info for individual repositories or archives. * Support for Borg --noatime, --noctime, and --nobirthtime flags via corresponding options in borgmatic configuration location section. 1.3.10 * #198: Fix for Borg create error output not showing up at borgmatic verbosity level zero. 1.3.9 * #195: Switch to command-line actions as more traditional sub-commands, e.g. "borgmatic create", "borgmatic prune", etc. However, the classic dashed options like "--create" still work! 1.3.8 * #191: Disable console color via "color" option in borgmatic configuration output section. 1.3.7 * #196: Fix for unclear error message for invalid YAML merge include. * #197: Don't color syslog output. * Change default syslog verbosity to show errors only. 1.3.6 * #53: Log to syslog in addition to existing console logging. Add --syslog-verbosity flag to customize the log level. See the documentation for more information: https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/ * #178: Look for .yml configuration file extension in addition to .yaml. * #189: Set umask used when executing hooks via "umask" option in borgmatic hooks section. * Remove Python cache files before each Tox run. * Add #borgmatic Freenode IRC channel to documentation. * Add Borg/borgmatic hosting providers section to documentation. * Add files for building documentation into a Docker image for web serving. * Upgrade project build server from Drone 0.8 to 1.1. * Build borgmatic documentation during continuous integration. * We're nearly at 500 ★s on GitHub. We can do this! 1.3.5 * #153: Support for various Borg directory environment variables (BORG_CONFIG_DIR, BORG_CACHE_DIR, etc.) via options in borgmatic's storage configuration. * #177: Fix for regression with missing verbose log entries. 1.3.4 * Part of #125: Color borgmatic (but not Borg) output when using an interactive terminal. * #166: Run tests for all installed versions of Python. * #168: Update README with continuous integration badge. * #169: Automatically sort Python imports in code. * Document installing borgmatic with pip install --user instead of a system Python install. * Get more reproducible builds by pinning the versions of pip and tox used to run tests. * Factor out build/test configuration from tox.ini file. 1.3.3 * Add validate-borgmatic-config command, useful for validating borgmatic config generated by configuration management or even edited by hand. 1.3.2 * #160: Fix for hooks executing when using --dry-run. Now hooks are skipped during a dry run. 1.3.1 * #155: Fix for invalid JSON output when using multiple borgmatic configuration files. * #157: Fix for seemingly random filename ordering when running through a directory of configuration files. * Fix for empty JSON output when using --create --json. * Now capturing Borg output only when --json flag is used. Previously, borgmatic delayed Borg output even without the --json flag. 1.3.0 * #148: Configuration file includes and merging via "!include" tag to support reuse of common options across configuration files. 1.2.18 * #147: Support for Borg create/extract --numeric-owner flag via "numeric_owner" option in borgmatic's location section. 1.2.17 * #140: List the files within an archive via --list --archive option. 1.2.16 * #119: Include a sample borgmatic configuration file in the documentation. * #123: Support for Borg archive restoration via borgmatic --extract command-line flag. * Refactor documentation into multiple separate pages for clarity and findability. * Organize options within command-line help into logical groups. * Exclude tests from distribution packages. 1.2.15 * #127: Remove date echo from schema example, as it's not a substitute for real logging. * #132: Leave exclude_patterns glob expansion to Borg, since doing it in borgmatic leads to confusing behavior. * #136: Handle and format validation errors raised during argument parsing. * #138: Allow use of --stats flag when --create or --prune flags are implied. 1.2.14 * #103: When generating sample configuration with generate-borgmatic-config, document the defaults for each option. * #116: When running multiple configuration files, attempt all configuration files even if one of them errors. Log a summary of results at the end. * Add borgmatic --version command-line flag to get the current installed version number. 1.2.13 * #100: Support for --stats command-line flag independent of --verbosity. * #117: With borgmatic --init command-line flag, proceed without erroring if a repository already exists. 1.2.12 * #110: Support for Borg repository initialization via borgmatic --init command-line flag. * #111: Update Borg create --filter values so a dry run lists files to back up. * #113: Update README with link to a new/forked Docker image. * Prevent deprecated --excludes command-line option from being used. * Refactor README a bit to flow better for first-time users. * Update README with a few additional borgmatic packages (Debian and Ubuntu). 1.2.11 * #108: Support for Borg create --progress via borgmatic command-line flag. 1.2.10 * #105: Support for Borg --chunker-params create option via "chunker_params" option in borgmatic's storage section. 1.2.9 * #102: Fix for syntax error that occurred in Python 3.5 and below. * Make automated tests support running in Python 3.5. 1.2.8 * #73: Enable consistency checks for only certain repositories via "check_repositories" option in borgmatic's consistency configuration. Handy for large repositories that take forever to check. * Include link to issue tracker within various command output. * Run continuous integration tests on a matrix of Python and Borg versions. 1.2.7 * #98: Support for Borg --keep-secondly prune option. * Use Black code formatter and Flake8 code checker as part of running automated tests. * Add an end-to-end automated test that actually integrates with Borg. * Set up continuous integration for borgmatic automated tests on projects.evoworx.org. 1.2.6 * Fix generated configuration to also include a "keep_daily" value so pruning works out of the box. 1.2.5 * #57: When generating sample configuration with generate-borgmatic-config, comment out all optional configuration so as to streamline the initial configuration process. 1.2.4 * Fix for archive checking traceback due to parameter mismatch. 1.2.3 * #64, #90, #92: Rewrite of logging system. Now verbosity flags passed to Borg are derived from borgmatic's log level. Note that the output of borgmatic might slightly change. * Part of #80: Support for Borg create --read-special via "read_special" option in borgmatic's location configuration. * #87: Support for Borg create --checkpoint-interval via "checkpoint_interval" option in borgmatic's storage configuration. * #88: Fix declared pykwalify compatibility version range in setup.py to prevent use of ancient versions of pykwalify with large version numbers. * #89: Pass --show-rc option to Borg when at highest verbosity level. * #94: Support for Borg --json option via borgmatic command-line to --create archives. 1.2.2 * #85: Fix compatibility issue between pykwalify and ruamel.yaml 0.15.52, which manifested in borgmatic as a pykwalify RuleError. 1.2.1 * Skip before/after backup hooks when only doing --prune, --check, --list, and/or --info. * #71: Support for XDG_CONFIG_HOME environment variable for specifying alternate user ~/.config/ path. * #74, #83: Support for Borg --json option via borgmatic command-line to --list archives or show archive --info in JSON format, ideal for programmatic consumption. * #38, #76: Upgrade ruamel.yaml compatibility version range and fix support for Python 3.7. * #77: Skip non-"*.yaml" config filenames in /etc/borgmatic.d/ so as not to parse backup files, editor swap files, etc. * #81: Document user-defined hooks run before/after backup, or on error. * Add code style guidelines to the documention. 1.2.0 * #61: Support for Borg --list option via borgmatic command-line to list all archives. * #61: Support for Borg --info option via borgmatic command-line to display summary information. * #62: Update README to mention other ways of installing borgmatic. * Support for Borg --prefix option for consistency checks via "prefix" option in borgmatic's consistency configuration. * Add introductory screencast link to documentation. * #59: Ignore "check_last" and consistency "prefix" when "archives" not in consistency checks. * #60: Add "Persistent" flag to systemd timer example. * #63: Support for Borg --nobsdflags option to skip recording bsdflags (e.g. NODUMP, IMMUTABLE) in archive. * #69: Support for Borg prune --umask option using value of existing "umask" option in borgmatic's storage configuration. * Update tox.ini to only assume Python 3.x instead of Python 3.4 specifically. * Add ~/.config/borgmatic/config.yaml to default configuration path probing. * Document how to develop on and contribute to borgmatic. 1.1.15 * Support for Borg BORG_PASSCOMMAND environment variable to read a password from an external file. * Fix for Borg create error when using borgmatic's --dry-run and --verbosity options together. Work-around for behavior introduced in Borg 1.1.3: https://github.com/borgbackup/borg/issues/3298 * #55: Fix for missing tags/releases on Gitea and GitHub project hosting. * #56: Support for Borg --lock-wait option for the maximum wait for a repository/cache lock. * #58: Support for using tilde in exclude_patterns to reference home directory. 1.1.14 * #49: Fix for typo in --patterns-from option. * #47: Support for Borg --dry-run option via borgmatic command-line. 1.1.13 * #54: Fix for incorrect consistency check flags passed to Borg when all three checks ("repository", "archives", and "extract") are specified in borgmatic configuration. * #48: Add "local_path" to configuration for specifying an alternative Borg executable path. * #49: Support for Borg experimental --patterns-from and --patterns options for specifying mixed includes/excludes. * Moved issue tracker from Taiga to integrated Gitea tracker at https://projects.torsion.org/witten/borgmatic/issues 1.1.12 * #46: Declare dependency on pykwalify 1.6 or above, as older versions yield "Unknown key: version" rule errors. * Support for Borg --keep-minutely prune option. 1.1.11 * #26: Add "ssh_command" to configuration for specifying a custom SSH command or options. * Fix for incorrect /etc/borgmatic.d/ configuration path probing on macOS. This problem manifested as an error on startup: "[Errno 2] No such file or directory: '/etc/borgmatic.d'". 1.1.10 * Pass several Unix signals through to child processes like Borg. This means that Borg now properly shuts down if borgmatic is terminated (e.g. due to a system suspend). * #30: Support for using tilde in repository paths to reference home directory. * #43: Support for Borg --files-cache option for setting the files cache operation mode. * #45: Support for Borg --remote-ratelimit option for limiting upload rate. * Log invoked Borg commands when at highest verbosity level. 1.1.9 * #17, #39: Support for user-defined hooks before/after backup, or on error. * #34: Improve clarity of logging spew at high verbosity levels. * #30: Support for using tilde in source directory path to reference home directory. * Require "prefix" in retention section when "archive_name_format" is set. This is to avoid accidental pruning of archives with a different archive name format. For similar reasons, default "prefix" to "{hostname}-" if not specified. * Convert main source repository from Mercurial to Git. * Update dead links to Borg documentation. 1.1.8 * #40: Fix to make /etc/borgmatic/config.yaml optional rather than required when using the default config paths. 1.1.7 * #29: Add "archive_name_format" to configuration for customizing archive names. * Fix for traceback when "exclude_from" value is empty in configuration file. * When pruning, make highest verbosity level list archives kept and pruned. * Clarification of Python 3 pip usage in documentation. 1.1.6 * #13, #36: Support for Borg --exclude-from, --exclude-caches, and --exclude-if-present options. 1.1.5 * #35: New "extract" consistency check that performs a dry-run extraction of the most recent archive. 1.1.4 * #18: Added command-line flags for performing a borgmatic run with only pruning, creating, or checking enabled. This supports use cases like running consistency checks from a different cron job with a different frequency, or running pruning with a different verbosity level. 1.1.3 * #15: Support for running multiple config files in /etc/borgmatic.d/ from a single borgmatic run. * Fix for generate-borgmatic-config writing config with invalid one_file_system value. 1.1.2 * #33: Fix for passing check_last as integer to subprocess when calling Borg. 1.1.1 * Part of #33: Fix for upgrade-borgmatic-config converting check_last option as a string instead of an integer. * Fix for upgrade-borgmatic-config erroring when consistency checks option is not present. 1.1.0 * Switched config file format to YAML. Run upgrade-borgmatic-config to upgrade. * Added generate-borgmatic-config command for initial config creation. * Dropped Python 2 support. Now Python 3 only. * #19: Fix for README mention of sample files not included in package. * #23: Sample files for triggering borgmatic from a systemd timer. * Support for backing up to multiple repositories. * To free up space, now pruning backups prior to creating a new backup. * Enabled test coverage output during tox runs. * Added logo. 1.0.3 * #22: Fix for verbosity flag not actually causing verbose output. 1.0.2 * #21: Fix for traceback when remote_path option is missing. 1.0.1 * #20: Support for Borg's --remote-path option to use an alternate Borg executable. See sample/config. 1.0.0 * Attic is no longer supported, as there hasn't been any recent development on it. Dropping Attic support will allow faster iteration on Borg-specific features. If you're still using Attic, this is a good time to switch to Borg! * Project renamed from atticmatic to borgmatic. See the borgmatic README for information on upgrading. 0.1.8 * Fix for handling of spaces in source_directories which resulted in backup up everything. * Fix for broken links to Borg documentation. * At verbosity zero, suppressing Borg check stderr spew about "Checking segments". * Support for Borg --one-file-system. * Support for Borg create --umask. * Support for file globs in source_directories. 0.1.7 * #12: Fixed parsing of punctuation in configuration file. * Better error message when configuration file is missing. 0.1.6 * #10: New configuration option for the encryption passphrase. * #11: Support for Borg's new archive compression feature. 0.1.5 * Changes to support release on PyPI. Now pip installable by name! 0.1.4 * Adding test that setup.py version matches release version. 0.1.3 * #2: Add support for "borg check --last N" to Borg backend. 0.1.2 * As a convenience to new users, allow a missing default excludes file. * New issue tracker, linked from documentation. 0.1.1 * Adding borgmatic cron example, and updating documentation to refer to it. 0.1.0 * New "borgmatic" command to support Borg backup software, a fork of Attic. 0.0.7 * Flag for multiple levels of verbosity: some, and lots. * Improved mocking of Python builtins in unit tests. 0.0.6 * New configuration section for customizing which Attic consistency checks run, if any. 0.0.5 * Fixed regression with --verbose output being buffered. This means dropping the helpful error message introduced in 0.0.4. 0.0.4 * Now using tox to run tests against multiple versions of Python in one go. * Helpful error message about how to create a repository if one is missing. * Troubleshooting section with steps to deal with broken pipes. * Nosetests config file (setup.cfg) with defaults. 0.0.3 * After pruning, run attic's consistency checks on all archives. * Integration tests for argument parsing. * Documentation updates about repository encryption. 0.0.2 * Configuration support for additional attic prune flags: keep_within, keep_hourly, keep_yearly, and prefix. 0.0.1 * Initial release. borgmatic-1.5.1/README.md000066400000000000000000000157021361605604600147600ustar00rootroot00000000000000--- title: borgmatic permalink: index.html --- ## It's your data. Keep it that way. borgmatic logo borgmatic is simple, configuration-driven backup software for servers and workstations. Protect your files with client-side encryption. Backup your databases too. Monitor it all with integrated third-party services. Here's an example configuration file: ```yaml location: # List of source directories to backup. source_directories: - /home - /etc # Paths of local or remote repositories to backup to. repositories: - 1234@usw-s001.rsync.net:backups.borg - k8pDxu32@k8pDxu32.repo.borgbase.com:repo - /var/lib/backups/local.borg retention: # Retention policy for how many backups to keep. keep_daily: 7 keep_weekly: 4 keep_monthly: 6 consistency: # List of checks to run to validate your backups. checks: - repository - archives hooks: # Custom preparation scripts to run. before_backup: - prepare-for-backup.sh # Databases to dump and include in backups. postgresql_databases: - name: users # Third-party services to notify you if backups aren't happening. healthchecks: https://hc-ping.com/be067061-cf96-4412-8eae-62b0c50d6a8c ``` Want to see borgmatic in action? Check out the screencast. borgmatic is powered by [Borg Backup](https://www.borgbackup.org/). ## Integrations PostgreSQL      MySQL      MariaDB      Healthchecks      Cronitor      Cronhub      PagerDuty      rsync.net      BorgBase      ## How-to guides * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) ⬅ *Start here!* * [Make per-application backups](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/) * [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/) * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) * [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/) * [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/) * [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/) * [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/) * [Backup to a removable drive or an intermittent server](https://torsion.org/borgmatic/docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server/) * [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/) * [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) ## Reference guides * [borgmatic configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/) * [borgmatic command-line reference](https://torsion.org/borgmatic/docs/reference/command-line/) ## Hosting providers Need somewhere to store your encrypted offsite backups? The following hosting providers include specific support for Borg/borgmatic. Using these links and services helps support borgmatic development and hosting. (These are referral links, but without any tracking scripts or cookies.)
  • rsync.net: Cloud Storage provider with full support for borg and any other SSH/SFTP tool
  • BorgBase: Borg hosting service with support for monitoring, 2FA, and append-only repos
## Support and contributing ### Issues You've got issues? Or an idea for a feature enhancement? We've got an [issue tracker](https://projects.torsion.org/witten/borgmatic/issues). In order to create a new issue or comment on an issue, you'll need to [login first](https://projects.torsion.org/user/login). Note that you can login with an existing GitHub account if you prefer. If you'd like to chat with borgmatic developers or users, head on over to the `#borgmatic` IRC channel on Freenode, either via web chat or a native IRC client. Also see the [security policy](https://torsion.org/borgmatic/docs/security-policy/) for any security issues. Other questions or comments? Contact [witten@torsion.org](mailto:witten@torsion.org). ### Contributing borgmatic is hosted at with [source code available](https://projects.torsion.org/witten/borgmatic). It's also mirrored on [GitHub](https://github.com/witten/borgmatic) for convenience. If you'd like to contribute to borgmatic development, please feel free to submit a [Pull Request](https://projects.torsion.org/witten/borgmatic/pulls) or open an [issue](https://projects.torsion.org/witten/borgmatic/issues) first to discuss your idea. We also accept Pull Requests on GitHub, if that's more your thing. In general, contributions are very welcome. We don't bite! Also, please check out the [borgmatic development how-to](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) for info on cloning source code, running tests, etc. ![Build Status](https://build.torsion.org/api/badges/witten/borgmatic/status.svg?ref=refs/heads/master) borgmatic-1.5.1/SECURITY.md000066400000000000000000000012411361605604600152630ustar00rootroot00000000000000--- title: Security policy permalink: security-policy/index.html --- ## Supported versions While we want to hear about security vulnerabilities in all versions of borgmatic, security fixes will only be made to the most recently released version. It's not practical for our small volunteer effort to maintain multiple different release branches and put out separate security patches for each. ## Reporting a vulnerability If you find a security vulnerability, please [file a ticket](https://torsion.org/borgmatic/#issues) or [send email directly](mailto:witten@torsion.org) as appropriate. You should expect to hear back within a few days at most, and generally sooner. borgmatic-1.5.1/borgmatic/000077500000000000000000000000001361605604600154435ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/__init__.py000066400000000000000000000000001361605604600175420ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/borg/000077500000000000000000000000001361605604600163745ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/borg/__init__.py000066400000000000000000000000001361605604600204730ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/borg/check.py000066400000000000000000000115741361605604600200330ustar00rootroot00000000000000import logging from borgmatic.borg import extract from borgmatic.execute import execute_command, execute_command_without_capture DEFAULT_CHECKS = ('repository', 'archives') DEFAULT_PREFIX = '{hostname}-' logger = logging.getLogger(__name__) def _parse_checks(consistency_config, only_checks=None): ''' Given a consistency config with a "checks" list, and an optional list of override checks, transform them a tuple of named checks to run. For example, given a retention config of: {'checks': ['repository', 'archives']} This will be returned as: ('repository', 'archives') If no "checks" option is present in the config, return the DEFAULT_CHECKS. If the checks value is the string "disabled", return an empty tuple, meaning that no checks should be run. If the "data" option is present, then make sure the "archives" option is included as well. ''' checks = [ check.lower() for check in (only_checks or consistency_config.get('checks', []) or []) ] if checks == ['disabled']: return () if 'data' in checks and 'archives' not in checks: checks.append('archives') return tuple(check for check in checks if check not in ('disabled', '')) or DEFAULT_CHECKS def _make_check_flags(checks, check_last=None, prefix=None): ''' Given a parsed sequence of checks, transform it into tuple of command-line flags. For example, given parsed checks of: ('repository',) This will be returned as: ('--repository-only',) However, if both "repository" and "archives" are in checks, then omit them from the returned flags because Borg does both checks by default. Additionally, if a check_last value is given and "archives" is in checks, then include a "--last" flag. And if a prefix value is given and "archives" is in checks, then include a "--prefix" flag. ''' if 'archives' in checks: last_flags = ('--last', str(check_last)) if check_last else () prefix_flags = ('--prefix', prefix) if prefix else () else: last_flags = () prefix_flags = () if check_last: logger.warning( 'Ignoring check_last option, as "archives" is not in consistency checks.' ) if prefix: logger.warning( 'Ignoring consistency prefix option, as "archives" is not in consistency checks.' ) common_flags = last_flags + prefix_flags + (('--verify-data',) if 'data' in checks else ()) if set(DEFAULT_CHECKS).issubset(set(checks)): return common_flags return ( tuple('--{}-only'.format(check) for check in checks if check in DEFAULT_CHECKS) + common_flags ) def check_archives( repository, storage_config, consistency_config, local_path='borg', remote_path=None, progress=None, repair=None, only_checks=None, ): ''' Given a local or remote repository path, a storage config dict, a consistency config dict, local/remote commands to run, whether to include progress information, whether to attempt a repair, and an optional list of checks to use instead of configured checks, check the contained Borg archives for consistency. If there are no consistency checks to run, skip running them. ''' checks = _parse_checks(consistency_config, only_checks) check_last = consistency_config.get('check_last', None) lock_wait = None extra_borg_options = storage_config.get('extra_borg_options', {}).get('check', '') if set(checks).intersection(set(DEFAULT_CHECKS + ('data',))): lock_wait = storage_config.get('lock_wait', None) verbosity_flags = () if logger.isEnabledFor(logging.INFO): verbosity_flags = ('--info',) if logger.isEnabledFor(logging.DEBUG): verbosity_flags = ('--debug', '--show-rc') prefix = consistency_config.get('prefix', DEFAULT_PREFIX) full_command = ( (local_path, 'check') + (('--repair',) if repair else ()) + _make_check_flags(checks, check_last, prefix) + (('--remote-path', remote_path) if remote_path else ()) + (('--lock-wait', str(lock_wait)) if lock_wait else ()) + verbosity_flags + (('--progress',) if progress else ()) + (tuple(extra_borg_options.split(' ')) if extra_borg_options else ()) + (repository,) ) # The Borg repair option trigger an interactive prompt, which won't work when output is # captured. And progress messes with the terminal directly. if repair or progress: execute_command_without_capture(full_command, error_on_warnings=True) else: execute_command(full_command, error_on_warnings=True) if 'extract' in checks: extract.extract_last_archive_dry_run(repository, lock_wait, local_path, remote_path) borgmatic-1.5.1/borgmatic/borg/create.py000066400000000000000000000175411361605604600202210ustar00rootroot00000000000000import glob import itertools import logging import os import tempfile from borgmatic.execute import execute_command, execute_command_without_capture logger = logging.getLogger(__name__) def _expand_directory(directory): ''' Given a directory path, expand any tilde (representing a user's home directory) and any globs therein. Return a list of one or more resulting paths. ''' expanded_directory = os.path.expanduser(directory) return glob.glob(expanded_directory) or [expanded_directory] def _expand_directories(directories): ''' Given a sequence of directory paths, expand tildes and globs in each one. Return all the resulting directories as a single flattened tuple. ''' if directories is None: return () return tuple( itertools.chain.from_iterable(_expand_directory(directory) for directory in directories) ) def _expand_home_directories(directories): ''' Given a sequence of directory paths, expand tildes in each one. Do not perform any globbing. Return the results as a tuple. ''' if directories is None: return () return tuple(os.path.expanduser(directory) for directory in directories) def _write_pattern_file(patterns=None): ''' Given a sequence of patterns, write them to a named temporary file and return it. Return None if no patterns are provided. ''' if not patterns: return None pattern_file = tempfile.NamedTemporaryFile('w') pattern_file.write('\n'.join(patterns)) pattern_file.flush() return pattern_file def _make_pattern_flags(location_config, pattern_filename=None): ''' Given a location config dict with a potential patterns_from option, and a filename containing any additional patterns, return the corresponding Borg flags for those files as a tuple. ''' pattern_filenames = tuple(location_config.get('patterns_from') or ()) + ( (pattern_filename,) if pattern_filename else () ) return tuple( itertools.chain.from_iterable( ('--patterns-from', pattern_filename) for pattern_filename in pattern_filenames ) ) def _make_exclude_flags(location_config, exclude_filename=None): ''' Given a location config dict with various exclude options, and a filename containing any exclude patterns, return the corresponding Borg flags as a tuple. ''' exclude_filenames = tuple(location_config.get('exclude_from') or ()) + ( (exclude_filename,) if exclude_filename else () ) exclude_from_flags = tuple( itertools.chain.from_iterable( ('--exclude-from', exclude_filename) for exclude_filename in exclude_filenames ) ) caches_flag = ('--exclude-caches',) if location_config.get('exclude_caches') else () if_present_flags = tuple( itertools.chain.from_iterable( ('--exclude-if-present', if_present) for if_present in location_config.get('exclude_if_present', ()) ) ) keep_exclude_tags_flags = ( ('--keep-exclude-tags',) if location_config.get('keep_exclude_tags') else () ) exclude_nodump_flags = ('--exclude-nodump',) if location_config.get('exclude_nodump') else () return ( exclude_from_flags + caches_flag + if_present_flags + keep_exclude_tags_flags + exclude_nodump_flags ) DEFAULT_BORGMATIC_SOURCE_DIRECTORY = '~/.borgmatic' def borgmatic_source_directories(borgmatic_source_directory): ''' Return a list of borgmatic-specific source directories used for state like database backups. ''' if not borgmatic_source_directory: borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY return ( [borgmatic_source_directory] if os.path.exists(os.path.expanduser(borgmatic_source_directory)) else [] ) def create_archive( dry_run, repository, location_config, storage_config, local_path='borg', remote_path=None, progress=False, stats=False, json=False, files=False, ): ''' Given vebosity/dry-run flags, a local or remote repository path, a location config dict, and a storage config dict, create a Borg archive and return Borg's JSON output (if any). ''' sources = _expand_directories( location_config['source_directories'] + borgmatic_source_directories(location_config.get('borgmatic_source_directory')) ) pattern_file = _write_pattern_file(location_config.get('patterns')) exclude_file = _write_pattern_file( _expand_home_directories(location_config.get('exclude_patterns')) ) checkpoint_interval = storage_config.get('checkpoint_interval', None) chunker_params = storage_config.get('chunker_params', None) compression = storage_config.get('compression', None) remote_rate_limit = storage_config.get('remote_rate_limit', None) umask = storage_config.get('umask', None) lock_wait = storage_config.get('lock_wait', None) files_cache = location_config.get('files_cache') default_archive_name_format = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' archive_name_format = storage_config.get('archive_name_format', default_archive_name_format) extra_borg_options = storage_config.get('extra_borg_options', {}).get('create', '') full_command = ( (local_path, 'create') + _make_pattern_flags(location_config, pattern_file.name if pattern_file else None) + _make_exclude_flags(location_config, exclude_file.name if exclude_file else None) + (('--checkpoint-interval', str(checkpoint_interval)) if checkpoint_interval else ()) + (('--chunker-params', chunker_params) if chunker_params else ()) + (('--compression', compression) if compression else ()) + (('--remote-ratelimit', str(remote_rate_limit)) if remote_rate_limit else ()) + (('--one-file-system',) if location_config.get('one_file_system') else ()) + (('--numeric-owner',) if location_config.get('numeric_owner') else ()) + (('--noatime',) if location_config.get('atime') is False else ()) + (('--noctime',) if location_config.get('ctime') is False else ()) + (('--nobirthtime',) if location_config.get('birthtime') is False else ()) + (('--read-special',) if location_config.get('read_special') else ()) + (('--nobsdflags',) if location_config.get('bsd_flags') is False else ()) + (('--files-cache', files_cache) if files_cache else ()) + (('--remote-path', remote_path) if remote_path else ()) + (('--umask', str(umask)) if umask else ()) + (('--lock-wait', str(lock_wait)) if lock_wait else ()) + (('--list', '--filter', 'AME-') if files and not json and not progress else ()) + (('--info',) if logger.getEffectiveLevel() == logging.INFO and not json else ()) + (('--stats',) if stats and not json and not dry_run else ()) + (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not json else ()) + (('--dry-run',) if dry_run else ()) + (('--progress',) if progress else ()) + (('--json',) if json else ()) + (tuple(extra_borg_options.split(' ')) if extra_borg_options else ()) + ( '{repository}::{archive_name_format}'.format( repository=repository, archive_name_format=archive_name_format ), ) + sources ) # The progress output isn't compatible with captured and logged output, as progress messes with # the terminal directly. if progress: execute_command_without_capture(full_command, error_on_warnings=False) return if json: output_log_level = None elif (stats or files) and logger.getEffectiveLevel() == logging.WARNING: output_log_level = logging.WARNING else: output_log_level = logging.INFO return execute_command(full_command, output_log_level, error_on_warnings=False) borgmatic-1.5.1/borgmatic/borg/environment.py000066400000000000000000000021511361605604600213110ustar00rootroot00000000000000import os OPTION_TO_ENVIRONMENT_VARIABLE = { 'borg_base_directory': 'BORG_BASE_DIR', 'borg_config_directory': 'BORG_CONFIG_DIR', 'borg_cache_directory': 'BORG_CACHE_DIR', 'borg_security_directory': 'BORG_SECURITY_DIR', 'borg_keys_directory': 'BORG_KEYS_DIR', 'encryption_passcommand': 'BORG_PASSCOMMAND', 'encryption_passphrase': 'BORG_PASSPHRASE', 'ssh_command': 'BORG_RSH', } DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE = { 'relocated_repo_access_is_ok': 'BORG_RELOCATED_REPO_ACCESS_IS_OK', 'unknown_unencrypted_repo_access_is_ok': 'BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK', } def initialize(storage_config): for option_name, environment_variable_name in OPTION_TO_ENVIRONMENT_VARIABLE.items(): value = storage_config.get(option_name) if value: os.environ[environment_variable_name] = value for ( option_name, environment_variable_name, ) in DEFAULT_BOOL_OPTION_TO_ENVIRONMENT_VARIABLE.items(): value = storage_config.get(option_name, False) os.environ[environment_variable_name] = 'yes' if value else 'no' borgmatic-1.5.1/borgmatic/borg/extract.py000066400000000000000000000067751361605604600204370ustar00rootroot00000000000000import logging import os from borgmatic.execute import execute_command, execute_command_without_capture logger = logging.getLogger(__name__) def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg', remote_path=None): ''' Perform an extraction dry-run of the most recent archive. If there are no archives, skip the dry-run. ''' remote_path_flags = ('--remote-path', remote_path) if remote_path else () lock_wait_flags = ('--lock-wait', str(lock_wait)) if lock_wait else () verbosity_flags = () if logger.isEnabledFor(logging.DEBUG): verbosity_flags = ('--debug', '--show-rc') elif logger.isEnabledFor(logging.INFO): verbosity_flags = ('--info',) full_list_command = ( (local_path, 'list', '--short') + remote_path_flags + lock_wait_flags + verbosity_flags + (repository,) ) list_output = execute_command(full_list_command, output_log_level=None, error_on_warnings=False) try: last_archive_name = list_output.strip().splitlines()[-1] except IndexError: return list_flag = ('--list',) if logger.isEnabledFor(logging.DEBUG) else () full_extract_command = ( (local_path, 'extract', '--dry-run') + remote_path_flags + lock_wait_flags + verbosity_flags + list_flag + ( '{repository}::{last_archive_name}'.format( repository=repository, last_archive_name=last_archive_name ), ) ) execute_command(full_extract_command, working_directory=None, error_on_warnings=True) def extract_archive( dry_run, repository, archive, paths, location_config, storage_config, local_path='borg', remote_path=None, destination_path=None, progress=False, error_on_warnings=True, ): ''' Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to restore from the archive, location/storage configuration dicts, optional local and remote Borg paths, and an optional destination path to extract to, extract the archive into the current directory. ''' umask = storage_config.get('umask', None) lock_wait = storage_config.get('lock_wait', None) full_command = ( (local_path, 'extract') + (('--remote-path', remote_path) if remote_path else ()) + (('--numeric-owner',) if location_config.get('numeric_owner') else ()) + (('--umask', str(umask)) if umask else ()) + (('--lock-wait', str(lock_wait)) if lock_wait else ()) + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ()) + (('--dry-run',) if dry_run else ()) + (('--progress',) if progress else ()) + ('::'.join((repository if ':' in repository else os.path.abspath(repository), archive)),) + (tuple(paths) if paths else ()) ) # The progress output isn't compatible with captured and logged output, as progress messes with # the terminal directly. if progress: execute_command_without_capture( full_command, working_directory=destination_path, error_on_warnings=error_on_warnings ) return # Error on warnings by default, as Borg only gives a warning if the restore paths don't exist in # the archive! execute_command( full_command, working_directory=destination_path, error_on_warnings=error_on_warnings ) borgmatic-1.5.1/borgmatic/borg/flags.py000066400000000000000000000015161361605604600200450ustar00rootroot00000000000000import itertools def make_flags(name, value): ''' Given a flag name and its value, return it formatted as Borg-compatible flags. ''' if not value: return () flag = '--{}'.format(name.replace('_', '-')) if value is True: return (flag,) return (flag, str(value)) def make_flags_from_arguments(arguments, excludes=()): ''' Given borgmatic command-line arguments as an instance of argparse.Namespace, and optionally a list of named arguments to exclude, generate and return the corresponding Borg command-line flags as a tuple. ''' return tuple( itertools.chain.from_iterable( make_flags(name, value=getattr(arguments, name)) for name in sorted(vars(arguments)) if name not in excludes and not name.startswith('_') ) ) borgmatic-1.5.1/borgmatic/borg/info.py000066400000000000000000000026311361605604600177030ustar00rootroot00000000000000import logging from borgmatic.borg.flags import make_flags, make_flags_from_arguments from borgmatic.execute import execute_command logger = logging.getLogger(__name__) def display_archives_info( repository, storage_config, info_arguments, local_path='borg', remote_path=None ): ''' Given a local or remote repository path, a storage config dict, and the arguments to the info action, display summary information for Borg archives in the repository or return JSON summary information. ''' lock_wait = storage_config.get('lock_wait', None) full_command = ( (local_path, 'info') + ( ('--info',) if logger.getEffectiveLevel() == logging.INFO and not info_arguments.json else () ) + ( ('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not info_arguments.json else () ) + make_flags('remote-path', remote_path) + make_flags('lock-wait', lock_wait) + make_flags_from_arguments(info_arguments, excludes=('repository', 'archive')) + ( '::'.join((repository, info_arguments.archive)) if info_arguments.archive else repository, ) ) return execute_command( full_command, output_log_level=None if info_arguments.json else logging.WARNING, error_on_warnings=False, ) borgmatic-1.5.1/borgmatic/borg/init.py000066400000000000000000000041151361605604600177120ustar00rootroot00000000000000import logging import subprocess from borgmatic.execute import execute_command, execute_command_without_capture logger = logging.getLogger(__name__) INFO_REPOSITORY_NOT_FOUND_EXIT_CODE = 2 def initialize_repository( repository, storage_config, encryption_mode, append_only=None, storage_quota=None, local_path='borg', remote_path=None, ): ''' Given a local or remote repository path, a storage configuration dict, a Borg encryption mode, whether the repository should be append-only, and the storage quota to use, initialize the repository. If the repository already exists, then log and skip initialization. ''' info_command = ( (local_path, 'info') + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ()) + (('--remote-path', remote_path) if remote_path else ()) + (repository,) ) logger.debug(' '.join(info_command)) try: execute_command(info_command, output_log_level=None) logger.info('Repository already exists. Skipping initialization.') return except subprocess.CalledProcessError as error: if error.returncode != INFO_REPOSITORY_NOT_FOUND_EXIT_CODE: raise extra_borg_options = storage_config.get('extra_borg_options', {}).get('init', '') init_command = ( (local_path, 'init') + (('--encryption', encryption_mode) if encryption_mode else ()) + (('--append-only',) if append_only else ()) + (('--storage-quota', storage_quota) if storage_quota else ()) + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + (('--debug',) if logger.isEnabledFor(logging.DEBUG) else ()) + (('--remote-path', remote_path) if remote_path else ()) + (tuple(extra_borg_options.split(' ')) if extra_borg_options else ()) + (repository,) ) # Don't use execute_command() here because it doesn't support interactive prompts. execute_command_without_capture(init_command, error_on_warnings=False) borgmatic-1.5.1/borgmatic/borg/list.py000066400000000000000000000064111361605604600177230ustar00rootroot00000000000000import logging from borgmatic.borg.flags import make_flags, make_flags_from_arguments from borgmatic.execute import execute_command logger = logging.getLogger(__name__) # A hack to convince Borg to exclude archives ending in ".checkpoint". This assumes that a # non-checkpoint archive name ends in a digit (e.g. from a timestamp). BORG_EXCLUDE_CHECKPOINTS_GLOB = '*[0123456789]' def resolve_archive_name(repository, archive, storage_config, local_path='borg', remote_path=None): ''' Given a local or remote repository path, an archive name, a storage config dict, a local Borg path, and a remote Borg path, simply return the archive name. But if the archive name is "latest", then instead introspect the repository for the latest successful (non-checkpoint) archive, and return its name. Raise ValueError if "latest" is given but there are no archives in the repository. ''' if archive != "latest": return archive lock_wait = storage_config.get('lock_wait', None) full_command = ( (local_path, 'list') + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ()) + make_flags('remote-path', remote_path) + make_flags('lock-wait', lock_wait) + make_flags('glob-archives', BORG_EXCLUDE_CHECKPOINTS_GLOB) + make_flags('last', 1) + ('--short', repository) ) output = execute_command(full_command, output_log_level=None, error_on_warnings=False) try: latest_archive = output.strip().splitlines()[-1] except IndexError: raise ValueError('No archives found in the repository') logger.debug('{}: Latest archive is {}'.format(repository, latest_archive)) return latest_archive def list_archives(repository, storage_config, list_arguments, local_path='borg', remote_path=None): ''' Given a local or remote repository path, a storage config dict, and the arguments to the list action, display the output of listing Borg archives in the repository or return JSON output. Or, if an archive name is given, listing the files in that archive. ''' lock_wait = storage_config.get('lock_wait', None) if list_arguments.successful: list_arguments.glob_archives = BORG_EXCLUDE_CHECKPOINTS_GLOB full_command = ( (local_path, 'list') + ( ('--info',) if logger.getEffectiveLevel() == logging.INFO and not list_arguments.json else () ) + ( ('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) and not list_arguments.json else () ) + make_flags('remote-path', remote_path) + make_flags('lock-wait', lock_wait) + make_flags_from_arguments( list_arguments, excludes=('repository', 'archive', 'paths', 'successful') ) + ( '::'.join((repository, list_arguments.archive)) if list_arguments.archive else repository, ) + (tuple(list_arguments.paths) if list_arguments.paths else ()) ) return execute_command( full_command, output_log_level=None if list_arguments.json else logging.WARNING, error_on_warnings=False, ) borgmatic-1.5.1/borgmatic/borg/mount.py000066400000000000000000000031341361605604600201110ustar00rootroot00000000000000import logging from borgmatic.execute import execute_command, execute_command_without_capture logger = logging.getLogger(__name__) def mount_archive( repository, archive, mount_point, paths, foreground, options, storage_config, local_path='borg', remote_path=None, ): ''' Given a local or remote repository path, an optional archive name, a filesystem mount point, zero or more paths to mount from the archive, extra Borg mount options, a storage configuration dict, and optional local and remote Borg paths, mount the archive onto the mount point. ''' umask = storage_config.get('umask', None) lock_wait = storage_config.get('lock_wait', None) full_command = ( (local_path, 'mount') + (('--remote-path', remote_path) if remote_path else ()) + (('--umask', str(umask)) if umask else ()) + (('--lock-wait', str(lock_wait)) if lock_wait else ()) + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ()) + (('--foreground',) if foreground else ()) + (('-o', options) if options else ()) + (('::'.join((repository, archive)),) if archive else (repository,)) + (mount_point,) + (tuple(paths) if paths else ()) ) # Don't capture the output when foreground mode is used so that ctrl-C can work properly. if foreground: execute_command_without_capture(full_command, error_on_warnings=False) return execute_command(full_command, error_on_warnings=False) borgmatic-1.5.1/borgmatic/borg/prune.py000066400000000000000000000045601361605604600201040ustar00rootroot00000000000000import logging from borgmatic.execute import execute_command logger = logging.getLogger(__name__) def _make_prune_flags(retention_config): ''' Given a retention config dict mapping from option name to value, tranform it into an iterable of command-line name-value flag pairs. For example, given a retention config of: {'keep_weekly': 4, 'keep_monthly': 6} This will be returned as an iterable of: ( ('--keep-weekly', '4'), ('--keep-monthly', '6'), ) ''' config = retention_config.copy() if 'prefix' not in config: config['prefix'] = '{hostname}-' elif not config['prefix']: config.pop('prefix') return ( ('--' + option_name.replace('_', '-'), str(value)) for option_name, value in config.items() ) def prune_archives( dry_run, repository, storage_config, retention_config, local_path='borg', remote_path=None, stats=False, files=False, ): ''' Given dry-run flag, a local or remote repository path, a storage config dict, and a retention config dict, prune Borg archives according to the retention policy specified in that configuration. ''' umask = storage_config.get('umask', None) lock_wait = storage_config.get('lock_wait', None) extra_borg_options = storage_config.get('extra_borg_options', {}).get('prune', '') full_command = ( (local_path, 'prune') + tuple(element for pair in _make_prune_flags(retention_config) for element in pair) + (('--remote-path', remote_path) if remote_path else ()) + (('--umask', str(umask)) if umask else ()) + (('--lock-wait', str(lock_wait)) if lock_wait else ()) + (('--stats',) if stats and not dry_run else ()) + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + (('--list',) if files else ()) + (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ()) + (('--dry-run',) if dry_run else ()) + (tuple(extra_borg_options.split(' ')) if extra_borg_options else ()) + (repository,) ) if (stats or files) and logger.getEffectiveLevel() == logging.WARNING: output_log_level = logging.WARNING else: output_log_level = logging.INFO execute_command(full_command, output_log_level=output_log_level, error_on_warnings=False) borgmatic-1.5.1/borgmatic/borg/umount.py000066400000000000000000000011301361605604600202700ustar00rootroot00000000000000import logging from borgmatic.execute import execute_command logger = logging.getLogger(__name__) def unmount_archive(mount_point, local_path='borg'): ''' Given a mounted filesystem mount point, and an optional local Borg paths, umount the filesystem from the mount point. ''' full_command = ( (local_path, 'umount') + (('--info',) if logger.getEffectiveLevel() == logging.INFO else ()) + (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ()) + (mount_point,) ) execute_command(full_command, error_on_warnings=True) borgmatic-1.5.1/borgmatic/commands/000077500000000000000000000000001361605604600172445ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/commands/__init__.py000066400000000000000000000000001361605604600213430ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/commands/arguments.py000066400000000000000000000515271361605604600216350ustar00rootroot00000000000000import collections from argparse import ArgumentParser from borgmatic.config import collect SUBPARSER_ALIASES = { 'init': ['--init', '-I'], 'prune': ['--prune', '-p'], 'create': ['--create', '-C'], 'check': ['--check', '-k'], 'extract': ['--extract', '-x'], 'mount': ['--mount', '-m'], 'umount': ['--umount', '-u'], 'restore': ['--restore', '-r'], 'list': ['--list', '-l'], 'info': ['--info', '-i'], } def parse_subparser_arguments(unparsed_arguments, subparsers): ''' Given a sequence of arguments, and a subparsers object as returned by argparse.ArgumentParser().add_subparsers(), give each requested action's subparser a shot at parsing all arguments. This allows common arguments like "--repository" to be shared across multiple subparsers. Return the result as a dict mapping from subparser name to a parsed namespace of arguments. ''' arguments = collections.OrderedDict() remaining_arguments = list(unparsed_arguments) alias_to_subparser_name = { alias: subparser_name for subparser_name, aliases in SUBPARSER_ALIASES.items() for alias in aliases } for subparser_name, subparser in subparsers.choices.items(): if subparser_name not in remaining_arguments: continue canonical_name = alias_to_subparser_name.get(subparser_name, subparser_name) # If a parsed value happens to be the same as the name of a subparser, remove it from the # remaining arguments. This prevents, for instance, "check --only extract" from triggering # the "extract" subparser. parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments) for value in vars(parsed).values(): if isinstance(value, str): if value in subparsers.choices: remaining_arguments.remove(value) elif isinstance(value, list): for item in value: if item in subparsers.choices: remaining_arguments.remove(item) arguments[canonical_name] = parsed # If no actions are explicitly requested, assume defaults: prune, create, and check. if not arguments and '--help' not in unparsed_arguments and '-h' not in unparsed_arguments: for subparser_name in ('prune', 'create', 'check'): subparser = subparsers.choices[subparser_name] parsed, unused_remaining = subparser.parse_known_args(unparsed_arguments) arguments[subparser_name] = parsed return arguments def parse_global_arguments(unparsed_arguments, top_level_parser, subparsers): ''' Given a sequence of arguments, a top-level parser (containing subparsers), and a subparsers object as returned by argparse.ArgumentParser().add_subparsers(), parse and return any global arguments as a parsed argparse.Namespace instance. ''' # Ask each subparser, one by one, to greedily consume arguments. Any arguments that remain # are global arguments. remaining_arguments = list(unparsed_arguments) present_subparser_names = set() for subparser_name, subparser in subparsers.choices.items(): if subparser_name not in remaining_arguments: continue present_subparser_names.add(subparser_name) unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments) # If no actions are explicitly requested, assume defaults: prune, create, and check. if ( not present_subparser_names and '--help' not in unparsed_arguments and '-h' not in unparsed_arguments ): for subparser_name in ('prune', 'create', 'check'): subparser = subparsers.choices[subparser_name] unused_parsed, remaining_arguments = subparser.parse_known_args(remaining_arguments) # Remove the subparser names themselves. for subparser_name in present_subparser_names: if subparser_name in remaining_arguments: remaining_arguments.remove(subparser_name) return top_level_parser.parse_args(remaining_arguments) def parse_arguments(*unparsed_arguments): ''' Given command-line arguments with which this script was invoked, parse the arguments and return them as a dict mapping from subparser name (or "global") to an argparse.Namespace instance. ''' config_paths = collect.get_default_config_paths(expand_home=True) unexpanded_config_paths = collect.get_default_config_paths(expand_home=False) global_parser = ArgumentParser(add_help=False) global_group = global_parser.add_argument_group('global arguments') global_group.add_argument( '-c', '--config', nargs='*', dest='config_paths', default=config_paths, help='Configuration filenames or directories, defaults to: {}'.format( ' '.join(unexpanded_config_paths) ), ) global_group.add_argument( '--excludes', dest='excludes_filename', help='Deprecated in favor of exclude_patterns within configuration', ) global_group.add_argument( '-n', '--dry-run', dest='dry_run', action='store_true', help='Go through the motions, but do not actually write to any repositories', ) global_group.add_argument( '-nc', '--no-color', dest='no_color', action='store_true', help='Disable colored output' ) global_group.add_argument( '-v', '--verbosity', type=int, choices=range(-1, 3), default=0, help='Display verbose progress to the console (from only errors to very verbose: -1, 0, 1, or 2)', ) global_group.add_argument( '--syslog-verbosity', type=int, choices=range(-1, 3), default=0, help='Log verbose progress to syslog (from only errors to very verbose: -1, 0, 1, or 2). Ignored when console is interactive or --log-file is given', ) global_group.add_argument( '--log-file-verbosity', type=int, choices=range(-1, 3), default=0, help='Log verbose progress to log file (from only errors to very verbose: -1, 0, 1, or 2). Only used when --log-file is given', ) global_group.add_argument( '--monitoring-verbosity', type=int, choices=range(-1, 3), default=0, help='Log verbose progress to monitoring integrations that support logging (from only errors to very verbose: -1, 0, 1, or 2)', ) global_group.add_argument( '--log-file', type=str, default=None, help='Write log messages to this file instead of syslog', ) global_group.add_argument( '--override', metavar='SECTION.OPTION=VALUE', nargs='+', dest='overrides', help='One or more configuration file options to override with specified values', ) global_group.add_argument( '--version', dest='version', default=False, action='store_true', help='Display installed version number of borgmatic and exit', ) top_level_parser = ArgumentParser( description=''' Simple, configuration-driven backup software for servers and workstations. If none of the action options are given, then borgmatic defaults to: prune, create, and check archives. ''', parents=[global_parser], ) subparsers = top_level_parser.add_subparsers( title='actions', metavar='', help='Specify zero or more actions. Defaults to prune, create, and check. Use --help with action for details:', ) init_parser = subparsers.add_parser( 'init', aliases=SUBPARSER_ALIASES['init'], help='Initialize an empty Borg repository', description='Initialize an empty Borg repository', add_help=False, ) init_group = init_parser.add_argument_group('init arguments') init_group.add_argument( '-e', '--encryption', dest='encryption_mode', help='Borg repository encryption mode', required=True, ) init_group.add_argument( '--append-only', dest='append_only', action='store_true', help='Create an append-only repository', ) init_group.add_argument( '--storage-quota', dest='storage_quota', help='Create a repository with a fixed storage quota', ) init_group.add_argument('-h', '--help', action='help', help='Show this help message and exit') prune_parser = subparsers.add_parser( 'prune', aliases=SUBPARSER_ALIASES['prune'], help='Prune archives according to the retention policy', description='Prune archives according to the retention policy', add_help=False, ) prune_group = prune_parser.add_argument_group('prune arguments') prune_group.add_argument( '--stats', dest='stats', default=False, action='store_true', help='Display statistics of archive', ) prune_group.add_argument( '--files', dest='files', default=False, action='store_true', help='Show per-file details' ) prune_group.add_argument('-h', '--help', action='help', help='Show this help message and exit') create_parser = subparsers.add_parser( 'create', aliases=SUBPARSER_ALIASES['create'], help='Create archives (actually perform backups)', description='Create archives (actually perform backups)', add_help=False, ) create_group = create_parser.add_argument_group('create arguments') create_group.add_argument( '--progress', dest='progress', default=False, action='store_true', help='Display progress for each file as it is backed up', ) create_group.add_argument( '--stats', dest='stats', default=False, action='store_true', help='Display statistics of archive', ) create_group.add_argument( '--files', dest='files', default=False, action='store_true', help='Show per-file details' ) create_group.add_argument( '--json', dest='json', default=False, action='store_true', help='Output results as JSON' ) create_group.add_argument('-h', '--help', action='help', help='Show this help message and exit') check_parser = subparsers.add_parser( 'check', aliases=SUBPARSER_ALIASES['check'], help='Check archives for consistency', description='Check archives for consistency', add_help=False, ) check_group = check_parser.add_argument_group('check arguments') check_group.add_argument( '--progress', dest='progress', default=False, action='store_true', help='Display progress for each file as it is checked', ) check_group.add_argument( '--repair', dest='repair', default=False, action='store_true', help='Attempt to repair any inconsistencies found (experimental and only for interactive use)', ) check_group.add_argument( '--only', metavar='CHECK', choices=('repository', 'archives', 'data', 'extract'), dest='only', action='append', help='Run a particular consistency check (repository, archives, data, or extract) instead of configured checks; can specify flag multiple times', ) check_group.add_argument('-h', '--help', action='help', help='Show this help message and exit') extract_parser = subparsers.add_parser( 'extract', aliases=SUBPARSER_ALIASES['extract'], help='Extract files from a named archive to the current directory', description='Extract a named archive to the current directory', add_help=False, ) extract_group = extract_parser.add_argument_group('extract arguments') extract_group.add_argument( '--repository', help='Path of repository to extract, defaults to the configured repository if there is only one', ) extract_group.add_argument( '--archive', help='Name of archive to extract (or "latest")', required=True ) extract_group.add_argument( '--path', '--restore-path', metavar='PATH', nargs='+', dest='paths', help='Paths to extract from archive, defaults to the entire archive', ) extract_group.add_argument( '--destination', metavar='PATH', dest='destination', help='Directory to extract files into, defaults to the current directory', ) extract_group.add_argument( '--progress', dest='progress', default=False, action='store_true', help='Display progress for each file as it is extracted', ) extract_group.add_argument( '-h', '--help', action='help', help='Show this help message and exit' ) mount_parser = subparsers.add_parser( 'mount', aliases=SUBPARSER_ALIASES['mount'], help='Mount files from a named archive as a FUSE filesystem', description='Mount a named archive as a FUSE filesystem', add_help=False, ) mount_group = mount_parser.add_argument_group('mount arguments') mount_group.add_argument( '--repository', help='Path of repository to use, defaults to the configured repository if there is only one', ) mount_group.add_argument('--archive', help='Name of archive to mount (or "latest")') mount_group.add_argument( '--mount-point', metavar='PATH', dest='mount_point', help='Path where filesystem is to be mounted', required=True, ) mount_group.add_argument( '--path', metavar='PATH', nargs='+', dest='paths', help='Paths to mount from archive, defaults to the entire archive', ) mount_group.add_argument( '--foreground', dest='foreground', default=False, action='store_true', help='Stay in foreground until ctrl-C is pressed', ) mount_group.add_argument('--options', dest='options', help='Extra Borg mount options') mount_group.add_argument('-h', '--help', action='help', help='Show this help message and exit') umount_parser = subparsers.add_parser( 'umount', aliases=SUBPARSER_ALIASES['umount'], help='Unmount a FUSE filesystem that was mounted with "borgmatic mount"', description='Unmount a mounted FUSE filesystem', add_help=False, ) umount_group = umount_parser.add_argument_group('umount arguments') umount_group.add_argument( '--mount-point', metavar='PATH', dest='mount_point', help='Path of filesystem to unmount', required=True, ) umount_group.add_argument('-h', '--help', action='help', help='Show this help message and exit') restore_parser = subparsers.add_parser( 'restore', aliases=SUBPARSER_ALIASES['restore'], help='Restore database dumps from a named archive', description='Restore database dumps from a named archive. (To extract files instead, use "borgmatic extract".)', add_help=False, ) restore_group = restore_parser.add_argument_group('restore arguments') restore_group.add_argument( '--repository', help='Path of repository to restore from, defaults to the configured repository if there is only one', ) restore_group.add_argument( '--archive', help='Name of archive to restore from (or "latest")', required=True ) restore_group.add_argument( '--database', metavar='NAME', nargs='+', dest='databases', help='Names of databases to restore from archive, defaults to all databases. Note that any databases to restore must be defined in borgmatic\'s configuration', ) restore_group.add_argument( '--progress', dest='progress', default=False, action='store_true', help='Display progress for each database dump file as it is extracted from archive', ) restore_group.add_argument( '-h', '--help', action='help', help='Show this help message and exit' ) list_parser = subparsers.add_parser( 'list', aliases=SUBPARSER_ALIASES['list'], help='List archives', description='List archives or the contents of an archive', add_help=False, ) list_group = list_parser.add_argument_group('list arguments') list_group.add_argument( '--repository', help='Path of repository to list, defaults to the configured repository if there is only one', ) list_group.add_argument('--archive', help='Name of archive to list (or "latest")') list_group.add_argument( '--path', metavar='PATH', nargs='+', dest='paths', help='Paths to list from archive, defaults to the entire archive', ) list_group.add_argument( '--short', default=False, action='store_true', help='Output only archive or path names' ) list_group.add_argument('--format', help='Format for file listing') list_group.add_argument( '--json', default=False, action='store_true', help='Output results as JSON' ) list_group.add_argument( '-P', '--prefix', help='Only list archive names starting with this prefix' ) list_group.add_argument( '-a', '--glob-archives', metavar='GLOB', help='Only list archive names matching this glob' ) list_group.add_argument( '--successful', default=False, action='store_true', help='Only list archive names of successful (non-checkpoint) backups', ) list_group.add_argument( '--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys' ) list_group.add_argument( '--first', metavar='N', help='List first N archives after other filters are applied' ) list_group.add_argument( '--last', metavar='N', help='List last N archives after other filters are applied' ) list_group.add_argument( '-e', '--exclude', metavar='PATTERN', help='Exclude paths matching the pattern' ) list_group.add_argument( '--exclude-from', metavar='FILENAME', help='Exclude paths from exclude file, one per line' ) list_group.add_argument('--pattern', help='Include or exclude paths matching a pattern') list_group.add_argument( '--patterns-from', metavar='FILENAME', help='Include or exclude paths matching patterns from pattern file, one per line', ) list_group.add_argument('-h', '--help', action='help', help='Show this help message and exit') info_parser = subparsers.add_parser( 'info', aliases=SUBPARSER_ALIASES['info'], help='Display summary information on archives', description='Display summary information on archives', add_help=False, ) info_group = info_parser.add_argument_group('info arguments') info_group.add_argument( '--repository', help='Path of repository to show info for, defaults to the configured repository if there is only one', ) info_group.add_argument('--archive', help='Name of archive to show info for (or "latest")') info_group.add_argument( '--json', dest='json', default=False, action='store_true', help='Output results as JSON' ) info_group.add_argument( '-P', '--prefix', help='Only show info for archive names starting with this prefix' ) info_group.add_argument( '-a', '--glob-archives', metavar='GLOB', help='Only show info for archive names matching this glob', ) info_group.add_argument( '--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys' ) info_group.add_argument( '--first', metavar='N', help='Show info for first N archives after other filters are applied', ) info_group.add_argument( '--last', metavar='N', help='Show info for first N archives after other filters are applied' ) info_group.add_argument('-h', '--help', action='help', help='Show this help message and exit') arguments = parse_subparser_arguments(unparsed_arguments, subparsers) arguments['global'] = parse_global_arguments(unparsed_arguments, top_level_parser, subparsers) if arguments['global'].excludes_filename: raise ValueError( 'The --excludes option has been replaced with exclude_patterns in configuration' ) if 'init' in arguments and arguments['global'].dry_run: raise ValueError('The init action cannot be used with the --dry-run option') if 'list' in arguments and arguments['list'].glob_archives and arguments['list'].successful: raise ValueError('The --glob-archives and --successful options cannot be used together') if ( 'list' in arguments and 'info' in arguments and arguments['list'].json and arguments['info'].json ): raise ValueError('With the --json option, list and info actions cannot be used together') return arguments borgmatic-1.5.1/borgmatic/commands/borgmatic.py000066400000000000000000000632761361605604600216030ustar00rootroot00000000000000import collections import copy import json import logging import os import sys from subprocess import CalledProcessError import colorama import pkg_resources from borgmatic.borg import check as borg_check from borgmatic.borg import create as borg_create from borgmatic.borg import environment as borg_environment from borgmatic.borg import extract as borg_extract from borgmatic.borg import info as borg_info from borgmatic.borg import init as borg_init from borgmatic.borg import list as borg_list from borgmatic.borg import mount as borg_mount from borgmatic.borg import prune as borg_prune from borgmatic.borg import umount as borg_umount from borgmatic.commands.arguments import parse_arguments from borgmatic.config import checks, collect, convert, validate from borgmatic.hooks import command, dispatch, dump, monitor from borgmatic.logger import configure_logging, should_do_markup from borgmatic.signals import configure_signals from borgmatic.verbosity import verbosity_to_log_level logger = logging.getLogger(__name__) LEGACY_CONFIG_PATH = '/etc/borgmatic/config' def run_configuration(config_filename, config, arguments): ''' Given a config filename, the corresponding parsed config dict, and command-line arguments as a dict from subparser name to a namespace of parsed arguments, execute its defined pruning, backups, consistency checks, and/or other actions. Yield a combination of: * JSON output strings from successfully executing any actions that produce JSON * logging.LogRecord instances containing errors from any actions or backup hooks that fail ''' (location, storage, retention, consistency, hooks) = ( config.get(section_name, {}) for section_name in ('location', 'storage', 'retention', 'consistency', 'hooks') ) global_arguments = arguments['global'] local_path = location.get('local_path', 'borg') remote_path = location.get('remote_path') borg_environment.initialize(storage) encountered_error = None error_repository = '' prune_create_or_check = {'prune', 'create', 'check'}.intersection(arguments) monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity) try: if prune_create_or_check: dispatch.call_hooks( 'ping_monitor', hooks, config_filename, monitor.MONITOR_HOOK_NAMES, monitor.State.START, monitoring_log_level, global_arguments.dry_run, ) if 'prune' in arguments: command.execute_hook( hooks.get('before_prune'), hooks.get('umask'), config_filename, 'pre-prune', global_arguments.dry_run, ) if 'create' in arguments: command.execute_hook( hooks.get('before_backup'), hooks.get('umask'), config_filename, 'pre-backup', global_arguments.dry_run, ) dispatch.call_hooks( 'dump_databases', hooks, config_filename, dump.DATABASE_HOOK_NAMES, location, global_arguments.dry_run, ) if 'check' in arguments: command.execute_hook( hooks.get('before_check'), hooks.get('umask'), config_filename, 'pre-check', global_arguments.dry_run, ) except (OSError, CalledProcessError) as error: if command.considered_soft_failure(config_filename, error): return encountered_error = error yield from make_error_log_records( '{}: Error running pre hook'.format(config_filename), error ) if not encountered_error: for repository_path in location['repositories']: try: yield from run_actions( arguments=arguments, location=location, storage=storage, retention=retention, consistency=consistency, hooks=hooks, local_path=local_path, remote_path=remote_path, repository_path=repository_path, ) except (OSError, CalledProcessError, ValueError) as error: encountered_error = error error_repository = repository_path yield from make_error_log_records( '{}: Error running actions for repository'.format(repository_path), error ) if not encountered_error: try: if 'prune' in arguments: command.execute_hook( hooks.get('after_prune'), hooks.get('umask'), config_filename, 'post-prune', global_arguments.dry_run, ) if 'create' in arguments: dispatch.call_hooks( 'remove_database_dumps', hooks, config_filename, dump.DATABASE_HOOK_NAMES, location, global_arguments.dry_run, ) command.execute_hook( hooks.get('after_backup'), hooks.get('umask'), config_filename, 'post-backup', global_arguments.dry_run, ) if 'check' in arguments: command.execute_hook( hooks.get('after_check'), hooks.get('umask'), config_filename, 'post-check', global_arguments.dry_run, ) if {'prune', 'create', 'check'}.intersection(arguments): dispatch.call_hooks( 'ping_monitor', hooks, config_filename, monitor.MONITOR_HOOK_NAMES, monitor.State.FINISH, monitoring_log_level, global_arguments.dry_run, ) except (OSError, CalledProcessError) as error: if command.considered_soft_failure(config_filename, error): return encountered_error = error yield from make_error_log_records( '{}: Error running post hook'.format(config_filename), error ) if encountered_error and prune_create_or_check: try: command.execute_hook( hooks.get('on_error'), hooks.get('umask'), config_filename, 'on-error', global_arguments.dry_run, repository=error_repository, error=encountered_error, output=getattr(encountered_error, 'output', ''), ) dispatch.call_hooks( 'ping_monitor', hooks, config_filename, monitor.MONITOR_HOOK_NAMES, monitor.State.FAIL, monitoring_log_level, global_arguments.dry_run, ) except (OSError, CalledProcessError) as error: if command.considered_soft_failure(config_filename, error): return yield from make_error_log_records( '{}: Error running on-error hook'.format(config_filename), error ) def run_actions( *, arguments, location, storage, retention, consistency, hooks, local_path, remote_path, repository_path ): # pragma: no cover ''' Given parsed command-line arguments as an argparse.ArgumentParser instance, several different configuration dicts, local and remote paths to Borg, and a repository name, run all actions from the command-line arguments on the given repository. Yield JSON output strings from executing any actions that produce JSON. Raise OSError or subprocess.CalledProcessError if an error occurs running a command for an action. Raise ValueError if the arguments or configuration passed to action are invalid. ''' repository = os.path.expanduser(repository_path) global_arguments = arguments['global'] dry_run_label = ' (dry run; not making any changes)' if global_arguments.dry_run else '' if 'init' in arguments: logger.info('{}: Initializing repository'.format(repository)) borg_init.initialize_repository( repository, storage, arguments['init'].encryption_mode, arguments['init'].append_only, arguments['init'].storage_quota, local_path=local_path, remote_path=remote_path, ) if 'prune' in arguments: logger.info('{}: Pruning archives{}'.format(repository, dry_run_label)) borg_prune.prune_archives( global_arguments.dry_run, repository, storage, retention, local_path=local_path, remote_path=remote_path, stats=arguments['prune'].stats, files=arguments['prune'].files, ) if 'create' in arguments: logger.info('{}: Creating archive{}'.format(repository, dry_run_label)) json_output = borg_create.create_archive( global_arguments.dry_run, repository, location, storage, local_path=local_path, remote_path=remote_path, progress=arguments['create'].progress, stats=arguments['create'].stats, json=arguments['create'].json, files=arguments['create'].files, ) if json_output: yield json.loads(json_output) if 'check' in arguments and checks.repository_enabled_for_checks(repository, consistency): logger.info('{}: Running consistency checks'.format(repository)) borg_check.check_archives( repository, storage, consistency, local_path=local_path, remote_path=remote_path, progress=arguments['check'].progress, repair=arguments['check'].repair, only_checks=arguments['check'].only, ) if 'extract' in arguments: if arguments['extract'].repository is None or validate.repositories_match( repository, arguments['extract'].repository ): logger.info( '{}: Extracting archive {}'.format(repository, arguments['extract'].archive) ) borg_extract.extract_archive( global_arguments.dry_run, repository, borg_list.resolve_archive_name( repository, arguments['extract'].archive, storage, local_path, remote_path ), arguments['extract'].paths, location, storage, local_path=local_path, remote_path=remote_path, destination_path=arguments['extract'].destination, progress=arguments['extract'].progress, ) if 'mount' in arguments: if arguments['mount'].repository is None or validate.repositories_match( repository, arguments['mount'].repository ): if arguments['mount'].archive: logger.info( '{}: Mounting archive {}'.format(repository, arguments['mount'].archive) ) else: logger.info('{}: Mounting repository'.format(repository)) borg_mount.mount_archive( repository, borg_list.resolve_archive_name( repository, arguments['mount'].archive, storage, local_path, remote_path ), arguments['mount'].mount_point, arguments['mount'].paths, arguments['mount'].foreground, arguments['mount'].options, storage, local_path=local_path, remote_path=remote_path, ) if 'restore' in arguments: if arguments['restore'].repository is None or validate.repositories_match( repository, arguments['restore'].repository ): logger.info( '{}: Restoring databases from archive {}'.format( repository, arguments['restore'].archive ) ) restore_names = arguments['restore'].databases or [] if 'all' in restore_names: restore_names = [] # Extract dumps for the named databases from the archive. dump_patterns = dispatch.call_hooks( 'make_database_dump_patterns', hooks, repository, dump.DATABASE_HOOK_NAMES, location, restore_names, ) borg_extract.extract_archive( global_arguments.dry_run, repository, borg_list.resolve_archive_name( repository, arguments['restore'].archive, storage, local_path, remote_path ), dump.convert_glob_patterns_to_borg_patterns( dump.flatten_dump_patterns(dump_patterns, restore_names) ), location, storage, local_path=local_path, remote_path=remote_path, destination_path='/', progress=arguments['restore'].progress, # We don't want glob patterns that don't match to error. error_on_warnings=False, ) # Map the restore names or detected dumps to the corresponding database configurations. restore_databases = dump.get_per_hook_database_configurations( hooks, restore_names, dump_patterns ) # Finally, restore the databases and cleanup the dumps. dispatch.call_hooks( 'restore_database_dumps', restore_databases, repository, dump.DATABASE_HOOK_NAMES, location, global_arguments.dry_run, ) dispatch.call_hooks( 'remove_database_dumps', restore_databases, repository, dump.DATABASE_HOOK_NAMES, location, global_arguments.dry_run, ) if 'list' in arguments: if arguments['list'].repository is None or validate.repositories_match( repository, arguments['list'].repository ): list_arguments = copy.copy(arguments['list']) if not list_arguments.json: logger.warning('{}: Listing archives'.format(repository)) list_arguments.archive = borg_list.resolve_archive_name( repository, list_arguments.archive, storage, local_path, remote_path ) json_output = borg_list.list_archives( repository, storage, list_arguments=list_arguments, local_path=local_path, remote_path=remote_path, ) if json_output: yield json.loads(json_output) if 'info' in arguments: if arguments['info'].repository is None or validate.repositories_match( repository, arguments['info'].repository ): info_arguments = copy.copy(arguments['info']) if not info_arguments.json: logger.warning('{}: Displaying summary info for archives'.format(repository)) info_arguments.archive = borg_list.resolve_archive_name( repository, info_arguments.archive, storage, local_path, remote_path ) json_output = borg_info.display_archives_info( repository, storage, info_arguments=info_arguments, local_path=local_path, remote_path=remote_path, ) if json_output: yield json.loads(json_output) def load_configurations(config_filenames, overrides=None): ''' Given a sequence of configuration filenames, load and validate each configuration file. Return the results as a tuple of: dict of configuration filename to corresponding parsed configuration, and sequence of logging.LogRecord instances containing any parse errors. ''' # Dict mapping from config filename to corresponding parsed config dict. configs = collections.OrderedDict() logs = [] # Parse and load each configuration file. for config_filename in config_filenames: try: configs[config_filename] = validate.parse_configuration( config_filename, validate.schema_filename(), overrides ) except (ValueError, OSError, validate.Validation_error) as error: logs.extend( [ logging.makeLogRecord( dict( levelno=logging.CRITICAL, levelname='CRITICAL', msg='{}: Error parsing configuration file'.format(config_filename), ) ), logging.makeLogRecord( dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error) ), ] ) return (configs, logs) def log_record(suppress_log=False, **kwargs): ''' Create a log record based on the given makeLogRecord() arguments, one of which must be named "levelno". Log the record (unless suppress log is set) and return it. ''' record = logging.makeLogRecord(kwargs) if suppress_log: return record logger.handle(record) return record def make_error_log_records(message, error=None): ''' Given error message text and an optional exception object, yield a series of logging.LogRecord instances with error summary information. As a side effect, log each record. ''' if not error: yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message) return try: raise error except CalledProcessError as error: yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message) if error.output: # Suppress these logs for now and save full error output for the log summary at the end. yield log_record( levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output, suppress_log=True ) yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error) except (ValueError, OSError) as error: yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message) yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error) except: # noqa: E722 # Raising above only as a means of determining the error type. Swallow the exception here # because we don't want the exception to propagate out of this function. pass def get_local_path(configs): ''' Arbitrarily return the local path from the first configuration dict. Default to "borg" if not set. ''' return next(iter(configs.values())).get('location', {}).get('local_path', 'borg') def collect_configuration_run_summary_logs(configs, arguments): ''' Given a dict of configuration filename to corresponding parsed configuration, and parsed command-line arguments as a dict from subparser name to a parsed namespace of arguments, run each configuration file and yield a series of logging.LogRecord instances containing summary information about each run. As a side effect of running through these configuration files, output their JSON results, if any, to stdout. ''' # Run cross-file validation checks. if 'extract' in arguments: repository = arguments['extract'].repository elif 'list' in arguments and arguments['list'].archive: repository = arguments['list'].repository elif 'mount' in arguments: repository = arguments['mount'].repository else: repository = None if repository: try: validate.guard_configuration_contains_repository(repository, configs) except ValueError as error: yield from make_error_log_records(str(error)) return if not configs: yield from make_error_log_records( '{}: No configuration files found'.format(' '.join(arguments['global'].config_paths)) ) return if 'create' in arguments: try: for config_filename, config in configs.items(): hooks = config.get('hooks', {}) command.execute_hook( hooks.get('before_everything'), hooks.get('umask'), config_filename, 'pre-everything', arguments['global'].dry_run, ) except (CalledProcessError, ValueError, OSError) as error: yield from make_error_log_records('Error running pre-everything hook', error) return # Execute the actions corresponding to each configuration file. json_results = [] for config_filename, config in configs.items(): results = list(run_configuration(config_filename, config, arguments)) error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord)) if error_logs: yield from make_error_log_records( '{}: Error running configuration file'.format(config_filename) ) yield from error_logs else: yield logging.makeLogRecord( dict( levelno=logging.INFO, levelname='INFO', msg='{}: Successfully ran configuration file'.format(config_filename), ) ) if results: json_results.extend(results) if 'umount' in arguments: logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point)) try: borg_umount.unmount_archive( mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs) ) except (CalledProcessError, OSError) as error: yield from make_error_log_records('Error unmounting mount point', error) if json_results: sys.stdout.write(json.dumps(json_results)) if 'create' in arguments: try: for config_filename, config in configs.items(): hooks = config.get('hooks', {}) command.execute_hook( hooks.get('after_everything'), hooks.get('umask'), config_filename, 'post-everything', arguments['global'].dry_run, ) except (CalledProcessError, ValueError, OSError) as error: yield from make_error_log_records('Error running post-everything hook', error) def exit_with_help_link(): # pragma: no cover ''' Display a link to get help and exit with an error code. ''' logger.critical('') logger.critical('Need some help? https://torsion.org/borgmatic/#issues') sys.exit(1) def main(): # pragma: no cover configure_signals() try: arguments = parse_arguments(*sys.argv[1:]) except ValueError as error: configure_logging(logging.CRITICAL) logger.critical(error) exit_with_help_link() except SystemExit as error: if error.code == 0: raise error configure_logging(logging.CRITICAL) logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv))) exit_with_help_link() global_arguments = arguments['global'] if global_arguments.version: print(pkg_resources.require('borgmatic')[0].version) sys.exit(0) config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths)) configs, parse_logs = load_configurations(config_filenames, global_arguments.overrides) any_json_flags = any( getattr(sub_arguments, 'json', False) for sub_arguments in arguments.values() ) colorama.init( autoreset=True, strip=not should_do_markup(global_arguments.no_color or any_json_flags, configs), ) try: configure_logging( verbosity_to_log_level(global_arguments.verbosity), verbosity_to_log_level(global_arguments.syslog_verbosity), verbosity_to_log_level(global_arguments.log_file_verbosity), verbosity_to_log_level(global_arguments.monitoring_verbosity), global_arguments.log_file, ) except (FileNotFoundError, PermissionError) as error: configure_logging(logging.CRITICAL) logger.critical('Error configuring logging: {}'.format(error)) exit_with_help_link() logger.debug('Ensuring legacy configuration is upgraded') convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames) summary_logs = parse_logs + list(collect_configuration_run_summary_logs(configs, arguments)) summary_logs_max_level = max(log.levelno for log in summary_logs) for message in ('', 'summary:'): log_record( levelno=summary_logs_max_level, levelname=logging.getLevelName(summary_logs_max_level), msg=message, ) for log in summary_logs: logger.handle(log) if summary_logs_max_level >= logging.CRITICAL: exit_with_help_link() borgmatic-1.5.1/borgmatic/commands/convert_config.py000066400000000000000000000064451361605604600226340ustar00rootroot00000000000000import os import sys import textwrap from argparse import ArgumentParser from ruamel import yaml from borgmatic.config import convert, generate, legacy, validate DEFAULT_SOURCE_CONFIG_FILENAME = '/etc/borgmatic/config' DEFAULT_SOURCE_EXCLUDES_FILENAME = '/etc/borgmatic/excludes' DEFAULT_DESTINATION_CONFIG_FILENAME = '/etc/borgmatic/config.yaml' def parse_arguments(*arguments): ''' Given command-line arguments with which this script was invoked, parse the arguments and return them as an ArgumentParser instance. ''' parser = ArgumentParser( description=''' Convert legacy INI-style borgmatic configuration and excludes files to a single YAML configuration file. Note that this replaces any comments from the source files. ''' ) parser.add_argument( '-s', '--source-config', dest='source_config_filename', default=DEFAULT_SOURCE_CONFIG_FILENAME, help='Source INI-style configuration filename. Default: {}'.format( DEFAULT_SOURCE_CONFIG_FILENAME ), ) parser.add_argument( '-e', '--source-excludes', dest='source_excludes_filename', default=DEFAULT_SOURCE_EXCLUDES_FILENAME if os.path.exists(DEFAULT_SOURCE_EXCLUDES_FILENAME) else None, help='Excludes filename', ) parser.add_argument( '-d', '--destination-config', dest='destination_config_filename', default=DEFAULT_DESTINATION_CONFIG_FILENAME, help='Destination YAML configuration filename. Default: {}'.format( DEFAULT_DESTINATION_CONFIG_FILENAME ), ) return parser.parse_args(arguments) TEXT_WRAP_CHARACTERS = 80 def display_result(args): # pragma: no cover result_lines = textwrap.wrap( 'Your borgmatic configuration has been upgraded. Please review the result in {}.'.format( args.destination_config_filename ), TEXT_WRAP_CHARACTERS, ) delete_lines = textwrap.wrap( 'Once you are satisfied, you can safely delete {}{}.'.format( args.source_config_filename, ' and {}'.format(args.source_excludes_filename) if args.source_excludes_filename else '', ), TEXT_WRAP_CHARACTERS, ) print('\n'.join(result_lines)) print() print('\n'.join(delete_lines)) def main(): # pragma: no cover try: args = parse_arguments(*sys.argv[1:]) schema = yaml.round_trip_load(open(validate.schema_filename()).read()) source_config = legacy.parse_configuration( args.source_config_filename, legacy.CONFIG_FORMAT ) source_config_file_mode = os.stat(args.source_config_filename).st_mode source_excludes = ( open(args.source_excludes_filename).read().splitlines() if args.source_excludes_filename else [] ) destination_config = convert.convert_legacy_parsed_config( source_config, source_excludes, schema ) generate.write_configuration( args.destination_config_filename, destination_config, mode=source_config_file_mode ) display_result(args) except (ValueError, OSError) as error: print(error, file=sys.stderr) sys.exit(1) borgmatic-1.5.1/borgmatic/commands/generate_config.py000066400000000000000000000040651361605604600227420ustar00rootroot00000000000000import sys from argparse import ArgumentParser from borgmatic.config import generate, validate DEFAULT_DESTINATION_CONFIG_FILENAME = '/etc/borgmatic/config.yaml' def parse_arguments(*arguments): ''' Given command-line arguments with which this script was invoked, parse the arguments and return them as an ArgumentParser instance. ''' parser = ArgumentParser(description='Generate a sample borgmatic YAML configuration file.') parser.add_argument( '-s', '--source', dest='source_filename', help='Optional YAML configuration file to merge into the generated configuration, useful for upgrading your configuration', ) parser.add_argument( '-d', '--destination', dest='destination_filename', default=DEFAULT_DESTINATION_CONFIG_FILENAME, help='Destination YAML configuration file. Default: {}'.format( DEFAULT_DESTINATION_CONFIG_FILENAME ), ) return parser.parse_args(arguments) def main(): # pragma: no cover try: args = parse_arguments(*sys.argv[1:]) generate.generate_sample_configuration( args.source_filename, args.destination_filename, validate.schema_filename() ) print('Generated a sample configuration file at {}.'.format(args.destination_filename)) print() if args.source_filename: print( 'Merged in the contents of configuration file at {}.'.format(args.source_filename) ) print('To review the changes made, run:') print() print( ' diff --unified {} {}'.format(args.source_filename, args.destination_filename) ) print() print('Please edit the file to suit your needs. The values are representative.') print('All fields are optional except where indicated.') print() print('If you ever need help: https://torsion.org/borgmatic/#issues') except (ValueError, OSError) as error: print(error, file=sys.stderr) sys.exit(1) borgmatic-1.5.1/borgmatic/commands/validate_config.py000066400000000000000000000032511361605604600227350ustar00rootroot00000000000000import logging import sys from argparse import ArgumentParser from borgmatic.config import collect, validate logger = logging.getLogger(__name__) def parse_arguments(*arguments): ''' Given command-line arguments with which this script was invoked, parse the arguments and return them as an ArgumentParser instance. ''' config_paths = collect.get_default_config_paths() parser = ArgumentParser(description='Validate borgmatic configuration file(s).') parser.add_argument( '-c', '--config', nargs='+', dest='config_paths', default=config_paths, help='Configuration filenames or directories, defaults to: {}'.format( ' '.join(config_paths) ), ) return parser.parse_args(arguments) def main(): # pragma: no cover args = parse_arguments(*sys.argv[1:]) logging.basicConfig(level=logging.INFO, format='%(message)s') config_filenames = tuple(collect.collect_config_filenames(args.config_paths)) if len(config_filenames) == 0: logger.critical('No files to validate found') sys.exit(1) found_issues = False for config_filename in config_filenames: try: validate.parse_configuration(config_filename, validate.schema_filename()) except (ValueError, OSError, validate.Validation_error) as error: logging.critical('{}: Error parsing configuration file'.format(config_filename)) logging.critical(error) found_issues = True if found_issues: sys.exit(1) else: logger.info( 'All given configuration files are valid: {}'.format(', '.join(config_filenames)) ) borgmatic-1.5.1/borgmatic/config/000077500000000000000000000000001361605604600167105ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/config/__init__.py000066400000000000000000000000001361605604600210070ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/config/checks.py000066400000000000000000000005351361605604600205250ustar00rootroot00000000000000def repository_enabled_for_checks(repository, consistency): ''' Given a repository name and a consistency configuration dict, return whether the repository is enabled to have consistency checks run. ''' if not consistency.get('check_repositories'): return True return repository in consistency['check_repositories'] borgmatic-1.5.1/borgmatic/config/collect.py000066400000000000000000000040441361605604600207110ustar00rootroot00000000000000import os def get_default_config_paths(expand_home=True): ''' Based on the value of the XDG_CONFIG_HOME and HOME environment variables, return a list of default configuration paths. This includes both system-wide configuration and configuration in the current user's home directory. Don't expand the home directory ($HOME) if the expand home flag is False. ''' user_config_directory = os.getenv('XDG_CONFIG_HOME') or os.path.join('$HOME', '.config') if expand_home: user_config_directory = os.path.expandvars(user_config_directory) return [ '/etc/borgmatic/config.yaml', '/etc/borgmatic.d', '%s/borgmatic/config.yaml' % user_config_directory, '%s/borgmatic.d' % user_config_directory, ] def collect_config_filenames(config_paths): ''' Given a sequence of config paths, both filenames and directories, resolve that to an iterable of files. Accomplish this by listing any given directories looking for contained config files (ending with the ".yaml" or ".yml" extension). This is non-recursive, so any directories within the given directories are ignored. Return paths even if they don't exist on disk, so the user can find out about missing configuration paths. However, skip a default config path if it's missing, so the user doesn't have to create a default config path unless they need it. ''' real_default_config_paths = set(map(os.path.realpath, get_default_config_paths())) for path in config_paths: exists = os.path.exists(path) if os.path.realpath(path) in real_default_config_paths and not exists: continue if not os.path.isdir(path) or not exists: yield path continue for filename in sorted(os.listdir(path)): full_filename = os.path.join(path, filename) matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml') if matching_filetype and not os.path.isdir(full_filename): yield full_filename borgmatic-1.5.1/borgmatic/config/convert.py000066400000000000000000000072761361605604600207560ustar00rootroot00000000000000import os from ruamel import yaml from borgmatic.config import generate def _convert_section(source_section_config, section_schema): ''' Given a legacy Parsed_config instance for a single section, convert it to its corresponding yaml.comments.CommentedMap representation in preparation for actual serialization to YAML. Where integer types exist in the given section schema, convert their values to integers. ''' destination_section_config = yaml.comments.CommentedMap( [ ( option_name, int(option_value) if section_schema['map'].get(option_name, {}).get('type') == 'int' else option_value, ) for option_name, option_value in source_section_config.items() ] ) return destination_section_config def convert_legacy_parsed_config(source_config, source_excludes, schema): ''' Given a legacy Parsed_config instance loaded from an INI-style config file and a list of exclude patterns, convert them to a corresponding yaml.comments.CommentedMap representation in preparation for serialization to a single YAML config file. Additionally, use the given schema as a source of helpful comments to include within the returned CommentedMap. ''' destination_config = yaml.comments.CommentedMap( [ (section_name, _convert_section(section_config, schema['map'][section_name])) for section_name, section_config in source_config._asdict().items() ] ) # Split space-seperated values into actual lists, make "repository" into a list, and merge in # excludes. location = destination_config['location'] location['source_directories'] = source_config.location['source_directories'].split(' ') location['repositories'] = [location.pop('repository')] location['exclude_patterns'] = source_excludes if source_config.consistency.get('checks'): destination_config['consistency']['checks'] = source_config.consistency['checks'].split(' ') # Add comments to each section, and then add comments to the fields in each section. generate.add_comments_to_configuration_map(destination_config, schema) for section_name, section_config in destination_config.items(): generate.add_comments_to_configuration_map( section_config, schema['map'][section_name], indent=generate.INDENT ) return destination_config class Legacy_configuration_not_upgraded(FileNotFoundError): def __init__(self): super(Legacy_configuration_not_upgraded, self).__init__( '''borgmatic changed its configuration file format in version 1.1.0 from INI-style to YAML. This better supports validation, and has a more natural way to express lists of values. To upgrade your existing configuration, run: sudo upgrade-borgmatic-config That will generate a new YAML configuration file at /etc/borgmatic/config.yaml (by default) using the values from both your existing configuration and excludes files. The new version of borgmatic will consume the YAML configuration file instead of the old one.''' ) def guard_configuration_upgraded(source_config_filename, destination_config_filenames): ''' If legacy source configuration exists but no destination upgraded configs do, raise Legacy_configuration_not_upgraded. The idea is that we want to alert the user about upgrading their config if they haven't already. ''' destination_config_exists = any( os.path.exists(filename) for filename in destination_config_filenames ) if os.path.exists(source_config_filename) and not destination_config_exists: raise Legacy_configuration_not_upgraded() borgmatic-1.5.1/borgmatic/config/generate.py000066400000000000000000000246161361605604600210650ustar00rootroot00000000000000import collections import io import os import re from ruamel import yaml from borgmatic.config import load INDENT = 4 SEQUENCE_INDENT = 2 def _insert_newline_before_comment(config, field_name): ''' Using some ruamel.yaml black magic, insert a blank line in the config right before the given field and its comments. ''' config.ca.items[field_name][1].insert( 0, yaml.tokens.CommentToken('\n', yaml.error.CommentMark(0), None) ) def _schema_to_sample_configuration(schema, level=0, parent_is_sequence=False): ''' Given a loaded configuration schema, generate and return sample config for it. Include comments for each section based on the schema "desc" description. ''' example = schema.get('example') if example is not None: return example if 'seq' in schema: config = yaml.comments.CommentedSeq( [ _schema_to_sample_configuration(item_schema, level, parent_is_sequence=True) for item_schema in schema['seq'] ] ) add_comments_to_configuration_sequence( config, schema, indent=(level * INDENT) + SEQUENCE_INDENT ) elif 'map' in schema: config = yaml.comments.CommentedMap( [ (field_name, _schema_to_sample_configuration(sub_schema, level + 1)) for field_name, sub_schema in schema['map'].items() ] ) indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0) add_comments_to_configuration_map( config, schema, indent=indent, skip_first=parent_is_sequence ) else: raise ValueError('Schema at level {} is unsupported: {}'.format(level, schema)) return config def _comment_out_line(line): # If it's already is commented out (or empty), there's nothing further to do! stripped_line = line.lstrip() if not stripped_line or stripped_line.startswith('#'): return line # Comment out the names of optional sections, inserting the '#' after any indent for aesthetics. matches = re.match(r'(\s*)', line) indent_spaces = matches.group(0) if matches else '' count_indent_spaces = len(indent_spaces) return '# '.join((indent_spaces, line[count_indent_spaces:])) def _comment_out_optional_configuration(rendered_config): ''' Post-process a rendered configuration string to comment out optional key/values, as determined by a sentinel in the comment before each key. The idea is that the pre-commented configuration prevents the user from having to comment out a bunch of configuration they don't care about to get to a minimal viable configuration file. Ideally ruamel.yaml would support commenting out keys during configuration generation, but it's not terribly easy to accomplish that way. ''' lines = [] optional = False for line in rendered_config.split('\n'): # Upon encountering an optional configuration option, commenting out lines until the next # blank line. if line.strip().startswith('# {}'.format(COMMENTED_OUT_SENTINEL)): optional = True continue # Hit a blank line, so reset commenting. if not line.strip(): optional = False lines.append(_comment_out_line(line) if optional else line) return '\n'.join(lines) def _render_configuration(config): ''' Given a config data structure of nested OrderedDicts, render the config as YAML and return it. ''' dumper = yaml.YAML() dumper.indent(mapping=INDENT, sequence=INDENT + SEQUENCE_INDENT, offset=INDENT) rendered = io.StringIO() dumper.dump(config, rendered) return rendered.getvalue() def write_configuration(config_filename, rendered_config, mode=0o600): ''' Given a target config filename and rendered config YAML, write it out to file. Create any containing directories as needed. ''' if os.path.exists(config_filename): raise FileExistsError('{} already exists. Aborting.'.format(config_filename)) try: os.makedirs(os.path.dirname(config_filename), mode=0o700) except (FileExistsError, FileNotFoundError): pass with open(config_filename, 'w') as config_file: config_file.write(rendered_config) os.chmod(config_filename, mode) def add_comments_to_configuration_sequence(config, schema, indent=0): ''' If the given config sequence's items are maps, then mine the schema for the description of the map's first item, and slap that atop the sequence. Indent the comment the given number of characters. Doing this for sequences of maps results in nice comments that look like: ``` things: # First key description. Added by this function. - key: foo # Second key description. Added by add_comments_to_configuration_map(). other: bar ``` ''' if 'map' not in schema['seq'][0]: return for field_name in config[0].keys(): field_schema = schema['seq'][0]['map'].get(field_name, {}) description = field_schema.get('desc') # No description to use? Skip it. if not field_schema or not description: return config[0].yaml_set_start_comment(description, indent=indent) # We only want the first key's description here, as the rest of the keys get commented by # add_comments_to_configuration_map(). return REQUIRED_SECTION_NAMES = {'location', 'retention'} REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'} COMMENTED_OUT_SENTINEL = 'COMMENT_OUT' def add_comments_to_configuration_map(config, schema, indent=0, skip_first=False): ''' Using descriptions from a schema as a source, add those descriptions as comments to the given config mapping, before each field. Indent the comment the given number of characters. ''' for index, field_name in enumerate(config.keys()): if skip_first and index == 0: continue field_schema = schema['map'].get(field_name, {}) description = field_schema.get('desc', '').strip() # If this is an optional key, add an indicator to the comment flagging it to be commented # out from the sample configuration. This sentinel is consumed by downstream processing that # does the actual commenting out. if field_name not in REQUIRED_SECTION_NAMES and field_name not in REQUIRED_KEYS: description = ( '\n'.join((description, COMMENTED_OUT_SENTINEL)) if description else COMMENTED_OUT_SENTINEL ) # No description to use? Skip it. if not field_schema or not description: # pragma: no cover continue config.yaml_set_comment_before_after_key(key=field_name, before=description, indent=indent) if index > 0: _insert_newline_before_comment(config, field_name) RUAMEL_YAML_COMMENTS_INDEX = 1 def remove_commented_out_sentinel(config, field_name): ''' Given a configuration CommentedMap and a top-level field name in it, remove any "commented out" sentinel found at the end of its YAML comments. This prevents the given field name from getting commented out by downstream processing that consumes the sentinel. ''' try: last_comment_value = config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX][-1].value except KeyError: return if last_comment_value == '# {}\n'.format(COMMENTED_OUT_SENTINEL): config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop() def merge_source_configuration_into_destination(destination_config, source_config): ''' Deep merge the given source configuration dict into the destination configuration CommentedMap, favoring values from the source when there are collisions. The purpose of this is to upgrade configuration files from old versions of borgmatic by adding new configuration keys and comments. ''' if not source_config: return destination_config if not destination_config or not isinstance(source_config, collections.abc.Mapping): return source_config for field_name, source_value in source_config.items(): # Since this key/value is from the source configuration, leave it uncommented and remove any # sentinel that would cause it to get commented out. remove_commented_out_sentinel(destination_config, field_name) # This is a mapping. Recurse for this key/value. if isinstance(source_value, collections.abc.Mapping): destination_config[field_name] = merge_source_configuration_into_destination( destination_config[field_name], source_value ) continue # This is a sequence. Recurse for each item in it. if isinstance(source_value, collections.abc.Sequence) and not isinstance(source_value, str): destination_value = destination_config[field_name] destination_config[field_name] = yaml.comments.CommentedSeq( [ merge_source_configuration_into_destination( destination_value[index] if index < len(destination_value) else None, source_item, ) for index, source_item in enumerate(source_value) ] ) continue # This is some sort of scalar. Simply set it into the destination. destination_config[field_name] = source_config[field_name] return destination_config def generate_sample_configuration(source_filename, destination_filename, schema_filename): ''' Given an optional source configuration filename, and a required destination configuration filename, and the path to a schema filename in pykwalify YAML schema format, write out a sample configuration file based on that schema. If a source filename is provided, merge the parsed contents of that configuration into the generated configuration. ''' schema = yaml.round_trip_load(open(schema_filename)) source_config = None if source_filename: source_config = load.load_configuration(source_filename) destination_config = merge_source_configuration_into_destination( _schema_to_sample_configuration(schema), source_config ) write_configuration( destination_filename, _comment_out_optional_configuration(_render_configuration(destination_config)), ) borgmatic-1.5.1/borgmatic/config/legacy.py000066400000000000000000000124701361605604600205320ustar00rootroot00000000000000from collections import OrderedDict, namedtuple from configparser import RawConfigParser Section_format = namedtuple('Section_format', ('name', 'options')) Config_option = namedtuple('Config_option', ('name', 'value_type', 'required')) def option(name, value_type=str, required=True): ''' Given a config file option name, an expected type for its value, and whether it's required, return a Config_option capturing that information. ''' return Config_option(name, value_type, required) CONFIG_FORMAT = ( Section_format( 'location', ( option('source_directories'), option('one_file_system', value_type=bool, required=False), option('remote_path', required=False), option('repository'), ), ), Section_format( 'storage', ( option('encryption_passphrase', required=False), option('compression', required=False), option('umask', required=False), ), ), Section_format( 'retention', ( option('keep_within', required=False), option('keep_hourly', int, required=False), option('keep_daily', int, required=False), option('keep_weekly', int, required=False), option('keep_monthly', int, required=False), option('keep_yearly', int, required=False), option('prefix', required=False), ), ), Section_format( 'consistency', (option('checks', required=False), option('check_last', required=False)) ), ) def validate_configuration_format(parser, config_format): ''' Given an open RawConfigParser and an expected config file format, validate that the parsed configuration file has the expected sections, that any required options are present in those sections, and that there aren't any unexpected options. A section is required if any of its contained options are required. Raise ValueError if anything is awry. ''' section_names = set(parser.sections()) required_section_names = tuple( section.name for section in config_format if any(option.required for option in section.options) ) unknown_section_names = section_names - set( section_format.name for section_format in config_format ) if unknown_section_names: raise ValueError( 'Unknown config sections found: {}'.format(', '.join(unknown_section_names)) ) missing_section_names = set(required_section_names) - section_names if missing_section_names: raise ValueError('Missing config sections: {}'.format(', '.join(missing_section_names))) for section_format in config_format: if section_format.name not in section_names: continue option_names = parser.options(section_format.name) expected_options = section_format.options unexpected_option_names = set(option_names) - set( option.name for option in expected_options ) if unexpected_option_names: raise ValueError( 'Unexpected options found in config section {}: {}'.format( section_format.name, ', '.join(sorted(unexpected_option_names)) ) ) missing_option_names = tuple( option.name for option in expected_options if option.required if option.name not in option_names ) if missing_option_names: raise ValueError( 'Required options missing from config section {}: {}'.format( section_format.name, ', '.join(missing_option_names) ) ) def parse_section_options(parser, section_format): ''' Given an open RawConfigParser and an expected section format, return the option values from that section as a dict mapping from option name to value. Omit those options that are not present in the parsed options. Raise ValueError if any option values cannot be coerced to the expected Python data type. ''' type_getter = {str: parser.get, int: parser.getint, bool: parser.getboolean} return OrderedDict( (option.name, type_getter[option.value_type](section_format.name, option.name)) for option in section_format.options if parser.has_option(section_format.name, option.name) ) def parse_configuration(config_filename, config_format): ''' Given a config filename and an expected config file format, return the parsed configuration as a namedtuple with one attribute for each parsed section. Raise IOError if the file cannot be read, or ValueError if the format is not as expected. ''' parser = RawConfigParser() if not parser.read(config_filename): raise ValueError('Configuration file cannot be opened: {}'.format(config_filename)) validate_configuration_format(parser, config_format) # Describes a parsed configuration, where each attribute is the name of a configuration file # section and each value is a dict of that section's parsed options. Parsed_config = namedtuple( 'Parsed_config', (section_format.name for section_format in config_format) ) return Parsed_config( *(parse_section_options(parser, section_format) for section_format in config_format) ) borgmatic-1.5.1/borgmatic/config/load.py000066400000000000000000000037761361605604600202160ustar00rootroot00000000000000import logging import os import ruamel.yaml logger = logging.getLogger(__name__) def load_configuration(filename): ''' Load the given configuration file and return its contents as a data structure of nested dicts and lists. Raise ruamel.yaml.error.YAMLError if something goes wrong parsing the YAML, or RecursionError if there are too many recursive includes. ''' yaml = ruamel.yaml.YAML(typ='safe') yaml.Constructor = Include_constructor return yaml.load(open(filename)) def include_configuration(loader, filename_node): ''' Load the given YAML filename (ignoring the given loader so we can use our own), and return its contents as a data structure of nested dicts and lists. ''' return load_configuration(os.path.expanduser(filename_node.value)) class Include_constructor(ruamel.yaml.SafeConstructor): ''' A YAML "constructor" (a ruamel.yaml concept) that supports a custom "!include" tag for including separate YAML configuration files. Example syntax: `retention: !include common.yaml` ''' def __init__(self, preserve_quotes=None, loader=None): super(Include_constructor, self).__init__(preserve_quotes, loader) self.add_constructor('!include', include_configuration) def flatten_mapping(self, node): ''' Support the special case of shallow merging included configuration into an existing mapping using the YAML '<<' merge key. Example syntax: ``` retention: keep_daily: 1 <<: !include common.yaml ``` ''' representer = ruamel.yaml.representer.SafeRepresenter() for index, (key_node, value_node) in enumerate(node.value): if key_node.tag == u'tag:yaml.org,2002:merge' and value_node.tag == '!include': included_value = representer.represent_data(self.construct_object(value_node)) node.value[index] = (key_node, included_value) super(Include_constructor, self).flatten_mapping(node) borgmatic-1.5.1/borgmatic/config/normalize.py000066400000000000000000000006551361605604600212700ustar00rootroot00000000000000def normalize(config): ''' Given a configuration dict, apply particular hard-coded rules to normalize its contents to adhere to the configuration schema. ''' exclude_if_present = config.get('location', {}).get('exclude_if_present') # "Upgrade" exclude_if_present from a string to a list. if isinstance(exclude_if_present, str): config['location']['exclude_if_present'] = [exclude_if_present] borgmatic-1.5.1/borgmatic/config/override.py000066400000000000000000000040431361605604600211020ustar00rootroot00000000000000import io import ruamel.yaml def set_values(config, keys, value): ''' Given a hierarchy of configuration dicts, a sequence of parsed key strings, and a string value, descend into the hierarchy based on the keys to set the value into the right place. ''' if not keys: return first_key = keys[0] if len(keys) == 1: config[first_key] = value return if first_key not in config: config[first_key] = {} set_values(config[first_key], keys[1:], value) def convert_value_type(value): ''' Given a string value, determine its logical type (string, boolean, integer, etc.), and return it converted to that type. ''' return ruamel.yaml.YAML(typ='safe').load(io.StringIO(value)) def parse_overrides(raw_overrides): ''' Given a sequence of configuration file override strings in the form of "section.option=value", parse and return a sequence of tuples (keys, values), where keys is a sequence of strings. For instance, given the following raw overrides: ['section.my_option=value1', 'section.other_option=value2'] ... return this: ( (('section', 'my_option'), 'value1'), (('section', 'other_option'), 'value2'), ) Raise ValueError if an override can't be parsed. ''' if not raw_overrides: return () try: return tuple( (tuple(raw_keys.split('.')), convert_value_type(value)) for raw_override in raw_overrides for raw_keys, value in (raw_override.split('=', 1),) ) except ValueError: raise ValueError('Invalid override. Make sure you use the form: SECTION.OPTION=VALUE') def apply_overrides(config, raw_overrides): ''' Given a sequence of configuration file override strings in the form of "section.option=value" and a configuration dict, parse each override and set it the configuration dict. ''' overrides = parse_overrides(raw_overrides) for (keys, value) in overrides: set_values(config, keys, value) borgmatic-1.5.1/borgmatic/config/schema.yaml000066400000000000000000000726661361605604600210550ustar00rootroot00000000000000name: Borgmatic configuration file schema version: 1 map: location: desc: | Where to look for files to backup, and where to store those backups. See https://borgbackup.readthedocs.io/en/stable/quickstart.html and https://borgbackup.readthedocs.io/en/stable/usage.html#borg-create for details. required: true map: source_directories: required: true seq: - type: str desc: | List of source directories to backup (required). Globs and tildes are expanded. example: - /home - /etc - /var/log/syslog* repositories: required: true seq: - type: str desc: | Paths to local or remote repositories (required). Tildes are expanded. Multiple repositories are backed up to in sequence. See ssh_command for SSH options like identity file or port. example: - user@backupserver:sourcehostname.borg one_file_system: type: bool desc: Stay in same file system (do not cross mount points). Defaults to false. example: true numeric_owner: type: bool desc: Only store/extract numeric user and group identifiers. Defaults to false. example: true atime: type: bool desc: Store atime into archive. Defaults to true. example: false ctime: type: bool desc: Store ctime into archive. Defaults to true. example: false birthtime: type: bool desc: Store birthtime (creation date) into archive. Defaults to true. example: false read_special: type: bool desc: | Use Borg's --read-special flag to allow backup of block and other special devices. Use with caution, as it will lead to problems if used when backing up special devices such as /dev/zero. Defaults to false. example: false bsd_flags: type: bool desc: Record bsdflags (e.g. NODUMP, IMMUTABLE) in archive. Defaults to true. example: true files_cache: type: str desc: | Mode in which to operate the files cache. See https://borgbackup.readthedocs.io/en/stable/usage/create.html#description for details. Defaults to "ctime,size,inode". example: ctime,size,inode local_path: type: str desc: Alternate Borg local executable. Defaults to "borg". example: borg1 remote_path: type: str desc: Alternate Borg remote executable. Defaults to "borg". example: borg1 patterns: seq: - type: str desc: | Any paths matching these patterns are included/excluded from backups. Globs are expanded. (Tildes are not.) Note that Borg considers this option experimental. See the output of "borg help patterns" for more details. Quote any value if it contains leading punctuation, so it parses correctly. example: - 'R /' - '- /home/*/.cache' - '+ /home/susan' - '- /home/*' patterns_from: seq: - type: str desc: | Read include/exclude patterns from one or more separate named files, one pattern per line. Note that Borg considers this option experimental. See the output of "borg help patterns" for more details. example: - /etc/borgmatic/patterns exclude_patterns: seq: - type: str desc: | Any paths matching these patterns are excluded from backups. Globs and tildes are expanded. See the output of "borg help patterns" for more details. example: - '*.pyc' - ~/*/.cache - /etc/ssl exclude_from: seq: - type: str desc: | Read exclude patterns from one or more separate named files, one pattern per line. See the output of "borg help patterns" for more details. example: - /etc/borgmatic/excludes exclude_caches: type: bool desc: | Exclude directories that contain a CACHEDIR.TAG file. See http://www.brynosaurus.com/cachedir/spec.html for details. Defaults to false. example: true exclude_if_present: seq: - type: str desc: | Exclude directories that contain a file with the given filenames. Defaults to not set. example: - .nobackup keep_exclude_tags: type: bool desc: | If true, the exclude_if_present filename is included in backups. Defaults to false, meaning that the exclude_if_present filename is omitted from backups. example: true exclude_nodump: type: bool desc: | Exclude files with the NODUMP flag. Defaults to false. example: true borgmatic_source_directory: type: str desc: | Path for additional source files used for temporary internal state like borgmatic database dumps. Note that changing this path prevents "borgmatic restore" from finding any database dumps created before the change. Defaults to ~/.borgmatic example: /tmp/borgmatic storage: desc: | Repository storage options. See https://borgbackup.readthedocs.io/en/stable/usage.html#borg-create and https://borgbackup.readthedocs.io/en/stable/usage/general.html#environment-variables for details. map: encryption_passcommand: type: str desc: | The standard output of this command is used to unlock the encryption key. Only use on repositories that were initialized with passcommand/repokey encryption. Note that if both encryption_passcommand and encryption_passphrase are set, then encryption_passphrase takes precedence. Defaults to not set. example: "secret-tool lookup borg-repository repo-name" encryption_passphrase: type: str desc: | Passphrase to unlock the encryption key with. Only use on repositories that were initialized with passphrase/repokey encryption. Quote the value if it contains punctuation, so it parses correctly. And backslash any quote or backslash literals as well. Defaults to not set. example: "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" checkpoint_interval: type: int desc: | Number of seconds between each checkpoint during a long-running backup. See https://borgbackup.readthedocs.io/en/stable/faq.html#if-a-backup-stops-mid-way-does-the-already-backed-up-data-stay-there for details. Defaults to checkpoints every 1800 seconds (30 minutes). example: 1800 chunker_params: type: str desc: | Specify the parameters passed to then chunker (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE). See https://borgbackup.readthedocs.io/en/stable/internals.html for details. Defaults to "19,23,21,4095". example: 19,23,21,4095 compression: type: str desc: | Type of compression to use when creating archives. See https://borgbackup.readthedocs.org/en/stable/usage.html#borg-create for details. Defaults to "lz4". example: lz4 remote_rate_limit: type: int desc: Remote network upload rate limit in kiBytes/second. Defaults to unlimited. example: 100 ssh_command: type: str desc: | Command to use instead of "ssh". This can be used to specify ssh options. Defaults to not set. example: ssh -i /path/to/private/key borg_base_directory: type: str desc: | Base path used for various Borg directories. Defaults to $HOME, ~$USER, or ~. See https://borgbackup.readthedocs.io/en/stable/usage/general.html#environment-variables for details. example: /path/to/base borg_config_directory: type: str desc: | Path for Borg configuration files. Defaults to $borg_base_directory/.config/borg example: /path/to/base/config borg_cache_directory: type: str desc: | Path for Borg cache files. Defaults to $borg_base_directory/.cache/borg example: /path/to/base/cache borg_security_directory: type: str desc: | Path for Borg security and encryption nonce files. Defaults to $borg_base_directory/.config/borg/security example: /path/to/base/config/security borg_keys_directory: type: str desc: | Path for Borg encryption key files. Defaults to $borg_base_directory/.config/borg/keys example: /path/to/base/config/keys umask: type: scalar desc: Umask to be used for borg create. Defaults to 0077. example: 0077 lock_wait: type: int desc: Maximum seconds to wait for acquiring a repository/cache lock. Defaults to 1. example: 5 archive_name_format: type: str desc: | Name of the archive. Borg placeholders can be used. See the output of "borg help placeholders" for details. Defaults to "{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}". If you specify this option, you must also specify a prefix in the retention section to avoid accidental pruning of archives with a different archive name format. And you should also specify a prefix in the consistency section as well. example: "{hostname}-documents-{now}" relocated_repo_access_is_ok: type: bool desc: Bypass Borg error about a repository that has been moved. Defaults to false. example: true unknown_unencrypted_repo_access_is_ok: type: bool desc: | Bypass Borg error about a previously unknown unencrypted repository. Defaults to false. example: true extra_borg_options: map: init: type: str desc: Extra command-line options to pass to "borg init". example: "--make-parent-dirs" prune: type: str desc: Extra command-line options to pass to "borg prune". example: "--save-space" create: type: str desc: Extra command-line options to pass to "borg create". example: "--no-files-cache" check: type: str desc: Extra command-line options to pass to "borg check". example: "--save-space" desc: | Additional options to pass directly to particular Borg commands, handy for Borg options that borgmatic does not yet support natively. Note that borgmatic does not perform any validation on these options. Running borgmatic with "--verbosity 2" shows the exact Borg command-line invocation. retention: desc: | Retention policy for how many backups to keep in each category. See https://borgbackup.readthedocs.org/en/stable/usage.html#borg-prune for details. At least one of the "keep" options is required for pruning to work. See https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/ if you'd like to skip pruning entirely. map: keep_within: type: str desc: Keep all archives within this time interval. example: 3H keep_secondly: type: int desc: Number of secondly archives to keep. example: 60 keep_minutely: type: int desc: Number of minutely archives to keep. example: 60 keep_hourly: type: int desc: Number of hourly archives to keep. example: 24 keep_daily: type: int desc: Number of daily archives to keep. example: 7 keep_weekly: type: int desc: Number of weekly archives to keep. example: 4 keep_monthly: type: int desc: Number of monthly archives to keep. example: 6 keep_yearly: type: int desc: Number of yearly archives to keep. example: 1 prefix: type: str desc: | When pruning, only consider archive names starting with this prefix. Borg placeholders can be used. See the output of "borg help placeholders" for details. Defaults to "{hostname}-". Use an empty value to disable the default. example: sourcehostname consistency: desc: | Consistency checks to run after backups. See https://borgbackup.readthedocs.org/en/stable/usage.html#borg-check and https://borgbackup.readthedocs.org/en/stable/usage.html#borg-extract for details. map: checks: seq: - type: str enum: ['repository', 'archives', 'data', 'extract', 'disabled'] unique: true desc: | List of one or more consistency checks to run: "repository", "archives", "data", and/or "extract". Defaults to "repository" and "archives". Set to "disabled" to disable all consistency checks. "repository" checks the consistency of the repository, "archives" checks all of the archives, "data" verifies the integrity of the data within the archives, and "extract" does an extraction dry-run of the most recent archive. Note that "data" implies "archives". example: - repository - archives check_repositories: seq: - type: str desc: | Paths to a subset of the repositories in the location section on which to run consistency checks. Handy in case some of your repositories are very large, and so running consistency checks on them would take too long. Defaults to running consistency checks on all repositories configured in the location section. example: - user@backupserver:sourcehostname.borg check_last: type: int desc: Restrict the number of checked archives to the last n. Applies only to the "archives" check. Defaults to checking all archives. example: 3 prefix: type: str desc: | When performing the "archives" check, only consider archive names starting with this prefix. Borg placeholders can be used. See the output of "borg help placeholders" for details. Defaults to "{hostname}-". Use an empty value to disable the default. example: sourcehostname output: desc: | Options for customizing borgmatic's own output and logging. map: color: type: bool desc: | Apply color to console output. Can be overridden with --no-color command-line flag. Defaults to true. example: false hooks: desc: | Shell commands, scripts, or integrations to execute at various points during a borgmatic run. IMPORTANT: All provided commands and scripts are executed with user permissions of borgmatic. Do not forget to set secure permissions on this configuration file (chmod 0600) as well as on any script called from a hook (chmod 0700) to prevent potential shell injection or privilege escalation. map: before_backup: seq: - type: str desc: | List of one or more shell commands or scripts to execute before creating a backup, run once per configuration file. example: - echo "Starting a backup." before_prune: seq: - type: str desc: | List of one or more shell commands or scripts to execute before pruning, run once per configuration file. example: - echo "Starting pruning." before_check: seq: - type: str desc: | List of one or more shell commands or scripts to execute before consistency checks, run once per configuration file. example: - echo "Starting checks." after_backup: seq: - type: str desc: | List of one or more shell commands or scripts to execute after creating a backup, run once per configuration file. example: - echo "Finished a backup." after_prune: seq: - type: str desc: | List of one or more shell commands or scripts to execute after pruning, run once per configuration file. example: - echo "Finished pruning." after_check: seq: - type: str desc: | List of one or more shell commands or scripts to execute after consistency checks, run once per configuration file. example: - echo "Finished checks." on_error: seq: - type: str desc: | List of one or more shell commands or scripts to execute when an exception occurs during a "prune", "create", or "check" action or an associated before/after hook. example: - echo "Error during prune/create/check." postgresql_databases: seq: - map: name: required: true type: str desc: | Database name (required if using this hook). Or "all" to dump all databases on the host. example: users hostname: type: str desc: | Database hostname to connect to. Defaults to connecting via local Unix socket. example: database.example.org port: type: int desc: Port to connect to. Defaults to 5432. example: 5433 username: type: str desc: | Username with which to connect to the database. Defaults to the username of the current user. You probably want to specify the "postgres" superuser here when the database name is "all". example: dbuser password: type: str desc: | Password with which to connect to the database. Omitting a password will only work if PostgreSQL is configured to trust the configured username without a password, or you create a ~/.pgpass file. example: trustsome1 format: type: str enum: ['plain', 'custom', 'directory', 'tar'] desc: | Database dump output format. One of "plain", "custom", "directory", or "tar". Defaults to "custom" (unlike raw pg_dump). See https://www.postgresql.org/docs/current/app-pgdump.html for details. Note that format is ignored when the database name is "all". example: directory options: type: str desc: | Additional pg_dump/pg_dumpall options to pass directly to the dump command, without performing any validation on them. See https://www.postgresql.org/docs/current/app-pgdump.html for details. example: --role=someone desc: | List of one or more PostgreSQL databases to dump before creating a backup, run once per configuration file. The database dumps are added to your source directories at runtime, backed up, and then removed afterwards. Requires pg_dump/pg_dumpall/pg_restore commands. See https://www.postgresql.org/docs/current/app-pgdump.html for details. mysql_databases: seq: - map: name: required: true type: str desc: | Database name (required if using this hook). Or "all" to dump all databases on the host. example: users hostname: type: str desc: | Database hostname to connect to. Defaults to connecting via local Unix socket. example: database.example.org port: type: int desc: Port to connect to. Defaults to 3306. example: 3307 username: type: str desc: | Username with which to connect to the database. Defaults to the username of the current user. example: dbuser password: type: str desc: | Password with which to connect to the database. Omitting a password will only work if MySQL is configured to trust the configured username without a password. example: trustsome1 options: type: str desc: | Additional mysqldump options to pass directly to the dump command, without performing any validation on them. See https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html or https://mariadb.com/kb/en/library/mysqldump/ for details. example: --skip-comments desc: | List of one or more MySQL/MariaDB databases to dump before creating a backup, run once per configuration file. The database dumps are added to your source directories at runtime, backed up, and then removed afterwards. Requires mysqldump/mysql commands (from either MySQL or MariaDB). See https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html or https://mariadb.com/kb/en/library/mysqldump/ for details. healthchecks: type: str desc: | Healthchecks ping URL or UUID to notify when a backup begins, ends, or errors. Create an account at https://healthchecks.io if you'd like to use this service. See https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook for details. example: https://hc-ping.com/your-uuid-here cronitor: type: str desc: | Cronitor ping URL to notify when a backup begins, ends, or errors. Create an account at https://cronitor.io if you'd like to use this service. See https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook for details. example: https://cronitor.link/d3x0c1 pagerduty: type: str desc: | PagerDuty integration key used to notify PagerDuty when a backup errors. Create an account at https://www.pagerduty.com/ if you'd like to use this service. See https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook for details. example: a177cad45bd374409f78906a810a3074 cronhub: type: str desc: | Cronhub ping URL to notify when a backup begins, ends, or errors. Create an account at https://cronhub.io if you'd like to use this service. See https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook for details. example: https://cronhub.io/start/1f5e3410-254c-11e8-b61d-55875966d031 before_everything: seq: - type: str desc: | List of one or more shell commands or scripts to execute before running all actions (if one of them is "create"). These are collected from all configuration files and then run once before all of them (prior to all actions). example: - echo "Starting actions." after_everything: seq: - type: str desc: | List of one or more shell commands or scripts to execute after running all actions (if one of them is "create"). These are collected from all configuration files and then run once before all of them (prior to all actions). example: - echo "Completed actions." umask: type: scalar desc: Umask used when executing hooks. Defaults to the umask that borgmatic is run with. example: 0077 borgmatic-1.5.1/borgmatic/config/validate.py000066400000000000000000000143161361605604600210600ustar00rootroot00000000000000import logging import os import pkg_resources import pykwalify.core import pykwalify.errors import ruamel.yaml from borgmatic.config import load, normalize, override def schema_filename(): ''' Path to the installed YAML configuration schema file, used to validate and parse the configuration. ''' return pkg_resources.resource_filename('borgmatic', 'config/schema.yaml') class Validation_error(ValueError): ''' A collection of error message strings generated when attempting to validate a particular configurartion file. ''' def __init__(self, config_filename, error_messages): self.config_filename = config_filename self.error_messages = error_messages def __str__(self): ''' Render a validation error as a user-facing string. ''' return 'An error occurred while parsing a configuration file at {}:\n'.format( self.config_filename ) + '\n'.join(self.error_messages) def apply_logical_validation(config_filename, parsed_configuration): ''' Given a parsed and schematically valid configuration as a data structure of nested dicts (see below), run through any additional logical validation checks. If there are any such validation problems, raise a Validation_error. ''' archive_name_format = parsed_configuration.get('storage', {}).get('archive_name_format') prefix = parsed_configuration.get('retention', {}).get('prefix') if archive_name_format and not prefix: raise Validation_error( config_filename, ('If you provide an archive_name_format, you must also specify a retention prefix.',), ) location_repositories = parsed_configuration.get('location', {}).get('repositories') check_repositories = parsed_configuration.get('consistency', {}).get('check_repositories', []) for repository in check_repositories: if repository not in location_repositories: raise Validation_error( config_filename, ( 'Unknown repository in the consistency section\'s check_repositories: {}'.format( repository ), ), ) def remove_examples(schema): ''' pykwalify gets angry if the example field is not a string. So rather than bend to its will, remove all examples from the given schema before passing the schema to pykwalify. ''' if 'map' in schema: for item_name, item_schema in schema['map'].items(): item_schema.pop('example', None) remove_examples(item_schema) elif 'seq' in schema: for item_schema in schema['seq']: item_schema.pop('example', None) remove_examples(item_schema) return schema def parse_configuration(config_filename, schema_filename, overrides=None): ''' Given the path to a config filename in YAML format, the path to a schema filename in pykwalify YAML schema format, a sequence of configuration file override strings in the form of "section.option=value", return the parsed configuration as a data structure of nested dicts and lists corresponding to the schema. Example return value: {'location': {'source_directories': ['/home', '/etc'], 'repository': 'hostname.borg'}, 'retention': {'keep_daily': 7}, 'consistency': {'checks': ['repository', 'archives']}} Raise FileNotFoundError if the file does not exist, PermissionError if the user does not have permissions to read the file, or Validation_error if the config does not match the schema. ''' logging.getLogger('pykwalify').setLevel(logging.ERROR) try: config = load.load_configuration(config_filename) schema = load.load_configuration(schema_filename) except (ruamel.yaml.error.YAMLError, RecursionError) as error: raise Validation_error(config_filename, (str(error),)) override.apply_overrides(config, overrides) normalize.normalize(config) validator = pykwalify.core.Core(source_data=config, schema_data=remove_examples(schema)) parsed_result = validator.validate(raise_exception=False) if validator.validation_errors: raise Validation_error(config_filename, validator.validation_errors) apply_logical_validation(config_filename, parsed_result) return parsed_result def normalize_repository_path(repository): ''' Given a repository path, return the absolute path of it (for local repositories). ''' # A colon in the repository indicates it's a remote repository. Bail. if ':' in repository: return repository return os.path.abspath(repository) def repositories_match(first, second): ''' Given two repository paths (relative and/or absolute), return whether they match. ''' return normalize_repository_path(first) == normalize_repository_path(second) def guard_configuration_contains_repository(repository, configurations): ''' Given a repository path and a dict mapping from config filename to corresponding parsed config dict, ensure that the repository is declared exactly once in all of the configurations. If no repository is given, then error if there are multiple configured repositories. Raise ValueError if the repository is not found in a configuration, or is declared multiple times. ''' if not repository: count = len( tuple( config_repository for config in configurations.values() for config_repository in config['location']['repositories'] ) ) if count > 1: raise ValueError( 'Can\'t determine which repository to use. Use --repository option to disambiguate' ) return count = len( tuple( config_repository for config in configurations.values() for config_repository in config['location']['repositories'] if repositories_match(repository, config_repository) ) ) if count == 0: raise ValueError('Repository {} not found in configuration files'.format(repository)) if count > 1: raise ValueError('Repository {} found in multiple configuration files'.format(repository)) borgmatic-1.5.1/borgmatic/execute.py000066400000000000000000000115611361605604600174630ustar00rootroot00000000000000import logging import os import subprocess logger = logging.getLogger(__name__) ERROR_OUTPUT_MAX_LINE_COUNT = 25 BORG_ERROR_EXIT_CODE = 2 def exit_code_indicates_error(command, exit_code, error_on_warnings=True): ''' Return True if the given exit code from running the command corresponds to an error. If error on warnings is False, then treat exit code 1 as a warning instead of an error. ''' if error_on_warnings: return bool(exit_code != 0) return bool(exit_code >= BORG_ERROR_EXIT_CODE) def log_output(command, process, output_buffer, output_log_level, error_on_warnings): ''' Given a command already executed, its process opened by subprocess.Popen(), and the process' relevant output buffer (stderr or stdout), log its output with the requested log level. Additionally, raise a CalledProcessException if the process exits with an error (or a warning, if error on warnings is True). ''' last_lines = [] while process.poll() is None: line = output_buffer.readline().rstrip().decode() if not line: continue # Keep the last few lines of output in case the command errors, and we need the output for # the exception below. last_lines.append(line) if len(last_lines) > ERROR_OUTPUT_MAX_LINE_COUNT: last_lines.pop(0) logger.log(output_log_level, line) remaining_output = output_buffer.read().rstrip().decode() if remaining_output: # pragma: no cover logger.log(output_log_level, remaining_output) exit_code = process.poll() if exit_code_indicates_error(command, exit_code, error_on_warnings): # If an error occurs, include its output in the raised exception so that we don't # inadvertently hide error output. if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT: last_lines.insert(0, '...') raise subprocess.CalledProcessError(exit_code, ' '.join(command), '\n'.join(last_lines)) def execute_command( full_command, output_log_level=logging.INFO, output_file=None, input_file=None, shell=False, extra_environment=None, working_directory=None, error_on_warnings=True, ): ''' Execute the given command (a sequence of command/argument strings) and log its output at the given log level. If output log level is None, instead capture and return the output. If an open output file object is given, then write stdout to the file and only log stderr (but only if an output log level is set). If an open input file object is given, then read stdin from the file. If shell is True, execute the command within a shell. If an extra environment dict is given, then use it to augment the current environment, and pass the result into the command. If a working directory is given, use that as the present working directory when running the command. If error on warnings is False, then treat exit code 1 as a warning instead of an error. Raise subprocesses.CalledProcessError if an error occurs while running the command. ''' logger.debug( ' '.join(full_command) + (' < {}'.format(input_file.name) if input_file else '') + (' > {}'.format(output_file.name) if output_file else '') ) environment = {**os.environ, **extra_environment} if extra_environment else None if output_log_level is None: output = subprocess.check_output( full_command, shell=shell, env=environment, cwd=working_directory ) return output.decode() if output is not None else None else: process = subprocess.Popen( full_command, stdin=input_file, stdout=output_file or subprocess.PIPE, stderr=subprocess.PIPE if output_file else subprocess.STDOUT, shell=shell, env=environment, cwd=working_directory, ) log_output( full_command, process, process.stderr if output_file else process.stdout, output_log_level, error_on_warnings, ) def execute_command_without_capture(full_command, working_directory=None, error_on_warnings=True): ''' Execute the given command (a sequence of command/argument strings), but don't capture or log its output in any way. This is necessary for commands that monkey with the terminal (e.g. progress display) or provide interactive prompts. If a working directory is given, use that as the present working directory when running the command. If error on warnings is False, then treat exit code 1 as a warning instead of an error. ''' logger.debug(' '.join(full_command)) try: subprocess.check_call(full_command, cwd=working_directory) except subprocess.CalledProcessError as error: if exit_code_indicates_error(full_command, error.returncode, error_on_warnings): raise borgmatic-1.5.1/borgmatic/hooks/000077500000000000000000000000001361605604600165665ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/hooks/__init__.py000066400000000000000000000000001361605604600206650ustar00rootroot00000000000000borgmatic-1.5.1/borgmatic/hooks/command.py000066400000000000000000000060701361605604600205610ustar00rootroot00000000000000import logging import os from borgmatic import execute logger = logging.getLogger(__name__) SOFT_FAIL_EXIT_CODE = 75 def interpolate_context(command, context): ''' Given a single hook command and a dict of context names/values, interpolate the values by "{name}" into the command and return the result. ''' for name, value in context.items(): command = command.replace('{%s}' % name, str(value)) return command def execute_hook(commands, umask, config_filename, description, dry_run, **context): ''' Given a list of hook commands to execute, a umask to execute with (or None), a config filename, a hook description, and whether this is a dry run, run the given commands. Or, don't run them if this is a dry run. The context contains optional values interpolated by name into the hook commands. Currently, this only applies to the on_error hook. Raise ValueError if the umask cannot be parsed. Raise subprocesses.CalledProcessError if an error occurs in a hook. ''' if not commands: logger.debug('{}: No commands to run for {} hook'.format(config_filename, description)) return dry_run_label = ' (dry run; not actually running hooks)' if dry_run else '' context['configuration_filename'] = config_filename commands = [interpolate_context(command, context) for command in commands] if len(commands) == 1: logger.info( '{}: Running command for {} hook{}'.format(config_filename, description, dry_run_label) ) else: logger.info( '{}: Running {} commands for {} hook{}'.format( config_filename, len(commands), description, dry_run_label ) ) if umask: parsed_umask = int(str(umask), 8) logger.debug('{}: Set hook umask to {}'.format(config_filename, oct(parsed_umask))) original_umask = os.umask(parsed_umask) else: original_umask = None try: for command in commands: if not dry_run: execute.execute_command( [command], output_log_level=logging.ERROR if description == 'on-error' else logging.WARNING, shell=True, ) finally: if original_umask: os.umask(original_umask) def considered_soft_failure(config_filename, error): ''' Given a configuration filename and an exception object, return whether the exception object represents a subprocess.CalledProcessError with a return code of SOFT_FAIL_EXIT_CODE. If so, that indicates that the error is a "soft failure", and should not result in an error. ''' exit_code = getattr(error, 'returncode', None) if exit_code is None: return False if exit_code == SOFT_FAIL_EXIT_CODE: logger.info( '{}: Command hook exited with soft failure exit code ({}); skipping remaining actions'.format( config_filename, SOFT_FAIL_EXIT_CODE ) ) return True return False borgmatic-1.5.1/borgmatic/hooks/cronhub.py000066400000000000000000000020661361605604600206040ustar00rootroot00000000000000import logging import requests from borgmatic.hooks import monitor logger = logging.getLogger(__name__) MONITOR_STATE_TO_CRONHUB = { monitor.State.START: 'start', monitor.State.FINISH: 'finish', monitor.State.FAIL: 'fail', } def ping_monitor(ping_url, config_filename, state, monitoring_log_level, dry_run): ''' Ping the given Cronhub URL, modified with the monitor.State. Use the given configuration filename in any log entries. If this is a dry run, then don't actually ping anything. ''' dry_run_label = ' (dry run; not actually pinging)' if dry_run else '' formatted_state = '/{}/'.format(MONITOR_STATE_TO_CRONHUB[state]) ping_url = ping_url.replace('/start/', formatted_state).replace('/ping/', formatted_state) logger.info( '{}: Pinging Cronhub {}{}'.format(config_filename, state.name.lower(), dry_run_label) ) logger.debug('{}: Using Cronhub ping URL {}'.format(config_filename, ping_url)) if not dry_run: logging.getLogger('urllib3').setLevel(logging.ERROR) requests.get(ping_url) borgmatic-1.5.1/borgmatic/hooks/cronitor.py000066400000000000000000000017401361605604600210010ustar00rootroot00000000000000import logging import requests from borgmatic.hooks import monitor logger = logging.getLogger(__name__) MONITOR_STATE_TO_CRONITOR = { monitor.State.START: 'run', monitor.State.FINISH: 'complete', monitor.State.FAIL: 'fail', } def ping_monitor(ping_url, config_filename, state, monitoring_log_level, dry_run): ''' Ping the given Cronitor URL, modified with the monitor.State. Use the given configuration filename in any log entries. If this is a dry run, then don't actually ping anything. ''' dry_run_label = ' (dry run; not actually pinging)' if dry_run else '' ping_url = '{}/{}'.format(ping_url, MONITOR_STATE_TO_CRONITOR[state]) logger.info( '{}: Pinging Cronitor {}{}'.format(config_filename, state.name.lower(), dry_run_label) ) logger.debug('{}: Using Cronitor ping URL {}'.format(config_filename, ping_url)) if not dry_run: logging.getLogger('urllib3').setLevel(logging.ERROR) requests.get(ping_url) borgmatic-1.5.1/borgmatic/hooks/dispatch.py000066400000000000000000000046751361605604600207530ustar00rootroot00000000000000import logging from borgmatic.hooks import cronhub, cronitor, healthchecks, mysql, pagerduty, postgresql logger = logging.getLogger(__name__) HOOK_NAME_TO_MODULE = { 'healthchecks': healthchecks, 'cronitor': cronitor, 'cronhub': cronhub, 'pagerduty': pagerduty, 'postgresql_databases': postgresql, 'mysql_databases': mysql, } def call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs): ''' Given the hooks configuration dict and a prefix to use in log entries, call the requested function of the Python module corresponding to the given hook name. Supply that call with the configuration for this hook, the log prefix, and any given args and kwargs. Return any return value. If the hook name is not present in the hooks configuration, then bail without calling anything. Raise ValueError if the hook name is unknown. Raise AttributeError if the function name is not found in the module. Raise anything else that the called function raises. ''' config = hooks.get(hook_name) if not config: logger.debug('{}: No {} hook configured.'.format(log_prefix, hook_name)) return try: module = HOOK_NAME_TO_MODULE[hook_name] except KeyError: raise ValueError('Unknown hook name: {}'.format(hook_name)) logger.debug('{}: Calling {} hook function {}'.format(log_prefix, hook_name, function_name)) return getattr(module, function_name)(config, log_prefix, *args, **kwargs) def call_hooks(function_name, hooks, log_prefix, hook_names, *args, **kwargs): ''' Given the hooks configuration dict and a prefix to use in log entries, call the requested function of the Python module corresponding to each given hook name. Supply each call with the configuration for that hook, the log prefix, and any given args and kwargs. Collect any return values into a dict from hook name to return value. If the hook name is not present in the hooks configuration, then don't call the function for it, and omit it from the return values. Raise ValueError if the hook name is unknown. Raise AttributeError if the function name is not found in the module. Raise anything else that a called function raises. An error stops calls to subsequent functions. ''' return { hook_name: call_hook(function_name, hooks, log_prefix, hook_name, *args, **kwargs) for hook_name in hook_names if hook_name in hooks } borgmatic-1.5.1/borgmatic/hooks/dump.py000066400000000000000000000144331361605604600201120ustar00rootroot00000000000000import glob import logging import os import shutil from borgmatic.borg.create import DEFAULT_BORGMATIC_SOURCE_DIRECTORY logger = logging.getLogger(__name__) DATABASE_HOOK_NAMES = ('postgresql_databases', 'mysql_databases') def make_database_dump_path(borgmatic_source_directory, database_hook_name): ''' Given a borgmatic source directory (or None) and a database hook name, construct a database dump path. ''' if not borgmatic_source_directory: borgmatic_source_directory = DEFAULT_BORGMATIC_SOURCE_DIRECTORY return os.path.join(borgmatic_source_directory, database_hook_name) def make_database_dump_filename(dump_path, name, hostname=None): ''' Based on the given dump directory path, database name, and hostname, return a filename to use for the database dump. The hostname defaults to localhost. Raise ValueError if the database name is invalid. ''' if os.path.sep in name: raise ValueError('Invalid database name {}'.format(name)) return os.path.join(os.path.expanduser(dump_path), hostname or 'localhost', name) def flatten_dump_patterns(dump_patterns, names): ''' Given a dict from a database hook name to glob patterns matching the dumps for the named databases, flatten out all the glob patterns into a single sequence, and return it. Raise ValueError if there are no resulting glob patterns, which indicates that databases are not configured in borgmatic's configuration. ''' flattened = [pattern for patterns in dump_patterns.values() for pattern in patterns] if not flattened: raise ValueError( 'Cannot restore database(s) {} missing from borgmatic\'s configuration'.format( ', '.join(names) or '"all"' ) ) return flattened def remove_database_dumps(dump_path, databases, database_type_name, log_prefix, dry_run): ''' Remove the database dumps for the given databases in the dump directory path. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the name of the database type and the log prefix in any log entries. If this is a dry run, then don't actually remove anything. ''' if not databases: logger.debug('{}: No {} databases configured'.format(log_prefix, database_type_name)) return dry_run_label = ' (dry run; not actually removing anything)' if dry_run else '' logger.info( '{}: Removing {} database dumps{}'.format(log_prefix, database_type_name, dry_run_label) ) for database in databases: dump_filename = make_database_dump_filename( dump_path, database['name'], database.get('hostname') ) logger.debug( '{}: Removing {} database dump {} from {}{}'.format( log_prefix, database_type_name, database['name'], dump_filename, dry_run_label ) ) if dry_run: continue if os.path.isdir(dump_filename): shutil.rmtree(dump_filename) else: os.remove(dump_filename) dump_file_dir = os.path.dirname(dump_filename) if len(os.listdir(dump_file_dir)) == 0: os.rmdir(dump_file_dir) def convert_glob_patterns_to_borg_patterns(patterns): ''' Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive patterns like "sh:etc/*". ''' return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns] def get_database_names_from_dumps(patterns): ''' Given a sequence of database dump patterns, find the corresponding database dumps on disk and return the database names from their filenames. ''' return [os.path.basename(dump_path) for pattern in patterns for dump_path in glob.glob(pattern)] def get_database_configurations(databases, names): ''' Given the full database configuration dicts as per the configuration schema, and a sequence of database names, filter down and yield the configuration for just the named databases. Additionally, if a database configuration is named "all", project out that configuration for each named database. ''' named_databases = {database['name']: database for database in databases} for name in names: database = named_databases.get(name) if database: yield database continue if 'all' in named_databases: yield {**named_databases['all'], **{'name': name}} continue def get_per_hook_database_configurations(hooks, names, dump_patterns): ''' Given the hooks configuration dict as per the configuration schema, a sequence of database names to restore, and a dict from database hook name to glob patterns for matching dumps, filter down the configuration for just the named databases. If there are no named databases given, then find the corresponding database dumps on disk and use the database names from their filenames. Additionally, if a database configuration is named "all", project out that configuration for each named database. Return the results as a dict from database hook name to a sequence of database configuration dicts for that database type. Raise ValueError if one of the database names cannot be matched to a database in borgmatic's database configuration. ''' hook_databases = { hook_name: list( get_database_configurations( hooks.get(hook_name), names or get_database_names_from_dumps(dump_patterns[hook_name]), ) ) for hook_name in DATABASE_HOOK_NAMES if hook_name in hooks } if not names or 'all' in names: if not any(hook_databases.values()): raise ValueError( 'Cannot restore database "all", as there are no database dumps in the archive' ) return hook_databases found_names = { database['name'] for databases in hook_databases.values() for database in databases } missing_names = sorted(set(names) - found_names) if missing_names: raise ValueError( 'Cannot restore database(s) {} missing from borgmatic\'s configuration'.format( ', '.join(missing_names) ) ) return hook_databases borgmatic-1.5.1/borgmatic/hooks/healthchecks.py000066400000000000000000000065601361605604600215750ustar00rootroot00000000000000import logging import requests from borgmatic.hooks import monitor logger = logging.getLogger(__name__) MONITOR_STATE_TO_HEALTHCHECKS = { monitor.State.START: 'start', monitor.State.FINISH: None, # Healthchecks doesn't append to the URL for the finished state. monitor.State.FAIL: 'fail', } PAYLOAD_TRUNCATION_INDICATOR = '...\n' PAYLOAD_LIMIT_BYTES = 10 * 1024 - len(PAYLOAD_TRUNCATION_INDICATOR) class Forgetful_buffering_handler(logging.Handler): ''' A buffering log handler that stores log messages in memory, and throws away messages (oldest first) once a particular capacity in bytes is reached. ''' def __init__(self, byte_capacity, log_level): super().__init__() self.byte_capacity = byte_capacity self.byte_count = 0 self.buffer = [] self.forgot = False self.setLevel(log_level) def emit(self, record): message = record.getMessage() + '\n' self.byte_count += len(message) self.buffer.append(message) while self.byte_count > self.byte_capacity and self.buffer: self.byte_count -= len(self.buffer[0]) self.buffer.pop(0) self.forgot = True def format_buffered_logs_for_payload(): ''' Get the handler previously added to the root logger, and slurp buffered logs out of it to send to Healthchecks. ''' try: buffering_handler = next( handler for handler in logging.getLogger().handlers if isinstance(handler, Forgetful_buffering_handler) ) except StopIteration: # No handler means no payload. return '' payload = ''.join(message for message in buffering_handler.buffer) if buffering_handler.forgot: return PAYLOAD_TRUNCATION_INDICATOR + payload return payload def ping_monitor(ping_url_or_uuid, config_filename, state, monitoring_log_level, dry_run): ''' Ping the given Healthchecks URL or UUID, modified with the monitor.State. Use the given configuration filename in any log entries, and log to Healthchecks with the giving log level. If this is a dry run, then don't actually ping anything. ''' if state is monitor.State.START: # Add a handler to the root logger that stores in memory the most recent logs emitted. That # way, we can send them all to Healthchecks upon a finish or failure state. logging.getLogger().addHandler( Forgetful_buffering_handler(PAYLOAD_LIMIT_BYTES, monitoring_log_level) ) payload = '' ping_url = ( ping_url_or_uuid if ping_url_or_uuid.startswith('http') else 'https://hc-ping.com/{}'.format(ping_url_or_uuid) ) dry_run_label = ' (dry run; not actually pinging)' if dry_run else '' healthchecks_state = MONITOR_STATE_TO_HEALTHCHECKS.get(state) if healthchecks_state: ping_url = '{}/{}'.format(ping_url, healthchecks_state) logger.info( '{}: Pinging Healthchecks {}{}'.format(config_filename, state.name.lower(), dry_run_label) ) logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url)) if state in (monitor.State.FINISH, monitor.State.FAIL): payload = format_buffered_logs_for_payload() if not dry_run: logging.getLogger('urllib3').setLevel(logging.ERROR) requests.post(ping_url, data=payload.encode('utf-8')) borgmatic-1.5.1/borgmatic/hooks/monitor.py000066400000000000000000000002401361605604600206230ustar00rootroot00000000000000from enum import Enum MONITOR_HOOK_NAMES = ('healthchecks', 'cronitor', 'cronhub', 'pagerduty') class State(Enum): START = 1 FINISH = 2 FAIL = 3 borgmatic-1.5.1/borgmatic/hooks/mysql.py000066400000000000000000000117101361605604600203050ustar00rootroot00000000000000import logging import os from borgmatic.execute import execute_command from borgmatic.hooks import dump logger = logging.getLogger(__name__) def make_dump_path(location_config): # pragma: no cover ''' Make the dump path from the given location configuration and the name of this hook. ''' return dump.make_database_dump_path( location_config.get('borgmatic_source_directory'), 'mysql_databases' ) def dump_databases(databases, log_prefix, location_config, dry_run): ''' Dump the given MySQL/MariaDB databases to disk. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the given log prefix in any log entries. Use the given location configuration dict to construct the destination path. If this is a dry run, then don't actually dump anything. ''' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' logger.info('{}: Dumping MySQL databases{}'.format(log_prefix, dry_run_label)) for database in databases: name = database['name'] dump_filename = dump.make_database_dump_filename( make_dump_path(location_config), name, database.get('hostname') ) command = ( ('mysqldump', '--add-drop-database') + (('--host', database['hostname']) if 'hostname' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ()) + (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ()) + (('--user', database['username']) if 'username' in database else ()) + (tuple(database['options'].split(' ')) if 'options' in database else ()) + (('--all-databases',) if name == 'all' else ('--databases', name)) ) extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None logger.debug( '{}: Dumping MySQL database {} to {}{}'.format( log_prefix, name, dump_filename, dry_run_label ) ) if not dry_run: os.makedirs(os.path.dirname(dump_filename), mode=0o700, exist_ok=True) execute_command( command, output_file=open(dump_filename, 'w'), extra_environment=extra_environment ) def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover ''' Remove the database dumps for the given databases. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the log prefix in any log entries. Use the given location configuration dict to construct the destination path. If this is a dry run, then don't actually remove anything. ''' dump.remove_database_dumps( make_dump_path(location_config), databases, 'MySQL', log_prefix, dry_run ) def make_database_dump_patterns(databases, log_prefix, location_config, names): ''' Given a sequence of configurations dicts, a prefix to log with, a location configuration dict, and a sequence of database names to match, return the corresponding glob patterns to match the database dumps in an archive. An empty sequence of names indicates that the patterns should match all dumps. ''' return [ dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*') for name in (names or ['*']) ] def restore_database_dumps(databases, log_prefix, location_config, dry_run): ''' Restore the given MySQL/MariaDB databases from disk. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the given log prefix in any log entries. Use the given location configuration dict to construct the destination path. If this is a dry run, then don't actually restore anything. ''' dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else '' for database in databases: dump_filename = dump.make_database_dump_filename( make_dump_path(location_config), database['name'], database.get('hostname') ) restore_command = ( ('mysql', '--batch') + (('--host', database['hostname']) if 'hostname' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ()) + (('--protocol', 'tcp') if 'hostname' in database or 'port' in database else ()) + (('--user', database['username']) if 'username' in database else ()) ) extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None logger.debug( '{}: Restoring MySQL database {}{}'.format(log_prefix, database['name'], dry_run_label) ) if not dry_run: execute_command( restore_command, input_file=open(dump_filename), extra_environment=extra_environment ) borgmatic-1.5.1/borgmatic/hooks/pagerduty.py000066400000000000000000000037531361605604600211540ustar00rootroot00000000000000import datetime import json import logging import platform import requests from borgmatic.hooks import monitor logger = logging.getLogger(__name__) EVENTS_API_URL = 'https://events.pagerduty.com/v2/enqueue' def ping_monitor(integration_key, config_filename, state, monitoring_log_level, dry_run): ''' If this is an error state, create a PagerDuty event with the given integration key. Use the given configuration filename in any log entries. If this is a dry run, then don't actually create an event. ''' if state != monitor.State.FAIL: logger.debug( '{}: Ignoring unsupported monitoring {} in PagerDuty hook'.format( config_filename, state.name.lower() ) ) return dry_run_label = ' (dry run; not actually sending)' if dry_run else '' logger.info('{}: Sending failure event to PagerDuty {}'.format(config_filename, dry_run_label)) if dry_run: return hostname = platform.node() local_timestamp = ( datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).astimezone().isoformat() ) payload = json.dumps( { 'routing_key': integration_key, 'event_action': 'trigger', 'payload': { 'summary': 'backup failed on {}'.format(hostname), 'severity': 'error', 'source': hostname, 'timestamp': local_timestamp, 'component': 'borgmatic', 'group': 'backups', 'class': 'backup failure', 'custom_details': { 'hostname': hostname, 'configuration filename': config_filename, 'server time': local_timestamp, }, }, } ) logger.debug('{}: Using PagerDuty payload: {}'.format(config_filename, payload)) logging.getLogger('urllib3').setLevel(logging.ERROR) requests.post(EVENTS_API_URL, data=payload.encode('utf-8')) borgmatic-1.5.1/borgmatic/hooks/postgresql.py000066400000000000000000000127621361605604600213530ustar00rootroot00000000000000import logging import os from borgmatic.execute import execute_command from borgmatic.hooks import dump logger = logging.getLogger(__name__) def make_dump_path(location_config): # pragma: no cover ''' Make the dump path from the given location configuration and the name of this hook. ''' return dump.make_database_dump_path( location_config.get('borgmatic_source_directory'), 'postgresql_databases' ) def dump_databases(databases, log_prefix, location_config, dry_run): ''' Dump the given PostgreSQL databases to disk. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the given log prefix in any log entries. Use the given location configuration dict to construct the destination path. If this is a dry run, then don't actually dump anything. ''' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label)) for database in databases: name = database['name'] dump_filename = dump.make_database_dump_filename( make_dump_path(location_config), name, database.get('hostname') ) all_databases = bool(name == 'all') command = ( ('pg_dumpall' if all_databases else 'pg_dump', '--no-password', '--clean') + ('--file', dump_filename) + (('--host', database['hostname']) if 'hostname' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ()) + (('--username', database['username']) if 'username' in database else ()) + (() if all_databases else ('--format', database.get('format', 'custom'))) + (tuple(database['options'].split(' ')) if 'options' in database else ()) + (() if all_databases else (name,)) ) extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None logger.debug( '{}: Dumping PostgreSQL database {} to {}{}'.format( log_prefix, name, dump_filename, dry_run_label ) ) if not dry_run: os.makedirs(os.path.dirname(dump_filename), mode=0o700, exist_ok=True) execute_command(command, extra_environment=extra_environment) def remove_database_dumps(databases, log_prefix, location_config, dry_run): # pragma: no cover ''' Remove the database dumps for the given databases. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the log prefix in any log entries. Use the given location configuration dict to construct the destination path. If this is a dry run, then don't actually remove anything. ''' dump.remove_database_dumps( make_dump_path(location_config), databases, 'PostgreSQL', log_prefix, dry_run ) def make_database_dump_patterns(databases, log_prefix, location_config, names): ''' Given a sequence of configurations dicts, a prefix to log with, a location configuration dict, and a sequence of database names to match, return the corresponding glob patterns to match the database dumps in an archive. An empty sequence of names indicates that the patterns should match all dumps. ''' return [ dump.make_database_dump_filename(make_dump_path(location_config), name, hostname='*') for name in (names or ['*']) ] def restore_database_dumps(databases, log_prefix, location_config, dry_run): ''' Restore the given PostgreSQL databases from disk. The databases are supplied as a sequence of dicts, one dict describing each database as per the configuration schema. Use the given log prefix in any log entries. Use the given location configuration dict to construct the destination path. If this is a dry run, then don't actually restore anything. ''' dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else '' for database in databases: dump_filename = dump.make_database_dump_filename( make_dump_path(location_config), database['name'], database.get('hostname') ) restore_command = ( ('pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error') + (('--host', database['hostname']) if 'hostname' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ()) + (('--username', database['username']) if 'username' in database else ()) + ('--dbname', database['name']) + (dump_filename,) ) extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None analyze_command = ( ('psql', '--no-password', '--quiet') + (('--host', database['hostname']) if 'hostname' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ()) + (('--username', database['username']) if 'username' in database else ()) + ('--dbname', database['name']) + ('--command', 'ANALYZE') ) logger.debug( '{}: Restoring PostgreSQL database {}{}'.format( log_prefix, database['name'], dry_run_label ) ) if not dry_run: execute_command(restore_command, extra_environment=extra_environment) execute_command(analyze_command, extra_environment=extra_environment) borgmatic-1.5.1/borgmatic/logger.py000066400000000000000000000122361361605604600173000ustar00rootroot00000000000000import logging import os import sys import colorama def to_bool(arg): ''' Return a boolean value based on `arg`. ''' if arg is None or isinstance(arg, bool): return arg if isinstance(arg, str): arg = arg.lower() if arg in ('yes', 'on', '1', 'true', 1): return True return False def interactive_console(): ''' Return whether the current console is "interactive". Meaning: Capable of user input and not just something like a cron job. ''' return sys.stderr.isatty() and os.environ.get('TERM') != 'dumb' def should_do_markup(no_color, configs): ''' Given the value of the command-line no-color argument, and a dict of configuration filename to corresponding parsed configuration, determine if we should enable colorama marking up. ''' if no_color: return False if any(config.get('output', {}).get('color') is False for config in configs.values()): return False py_colors = os.environ.get('PY_COLORS', None) if py_colors is not None: return to_bool(py_colors) return interactive_console() class Multi_stream_handler(logging.Handler): ''' A logging handler that dispatches each log record to one of multiple stream handlers depending on the record's log level. ''' def __init__(self, log_level_to_stream_handler): super(Multi_stream_handler, self).__init__() self.log_level_to_handler = log_level_to_stream_handler self.handlers = set(self.log_level_to_handler.values()) def flush(self): # pragma: no cover super(Multi_stream_handler, self).flush() for handler in self.handlers: handler.flush() def emit(self, record): ''' Dispatch the log record to the approriate stream handler for the record's log level. ''' self.log_level_to_handler[record.levelno].emit(record) def setFormatter(self, formatter): # pragma: no cover super(Multi_stream_handler, self).setFormatter(formatter) for handler in self.handlers: handler.setFormatter(formatter) def setLevel(self, level): # pragma: no cover super(Multi_stream_handler, self).setLevel(level) for handler in self.handlers: handler.setLevel(level) LOG_LEVEL_TO_COLOR = { logging.CRITICAL: colorama.Fore.RED, logging.ERROR: colorama.Fore.RED, logging.WARN: colorama.Fore.YELLOW, logging.INFO: colorama.Fore.GREEN, logging.DEBUG: colorama.Fore.CYAN, } class Console_color_formatter(logging.Formatter): def format(self, record): color = LOG_LEVEL_TO_COLOR.get(record.levelno) return color_text(color, record.msg) def color_text(color, message): ''' Give colored text. ''' if not color: return message return '{}{}{}'.format(color, message, colorama.Style.RESET_ALL) def configure_logging( console_log_level, syslog_log_level=None, log_file_log_level=None, monitoring_log_level=None, log_file=None, ): ''' Configure logging to go to both the console and (syslog or log file). Use the given log levels, respectively. Raise FileNotFoundError or PermissionError if the log file could not be opened for writing. ''' if syslog_log_level is None: syslog_log_level = console_log_level if log_file_log_level is None: log_file_log_level = console_log_level if monitoring_log_level is None: monitoring_log_level = console_log_level # Log certain log levels to console stderr and others to stdout. This supports use cases like # grepping (non-error) output. console_error_handler = logging.StreamHandler(sys.stderr) console_standard_handler = logging.StreamHandler(sys.stdout) console_handler = Multi_stream_handler( { logging.CRITICAL: console_error_handler, logging.ERROR: console_error_handler, logging.WARN: console_standard_handler, logging.INFO: console_standard_handler, logging.DEBUG: console_standard_handler, } ) console_handler.setFormatter(Console_color_formatter()) console_handler.setLevel(console_log_level) syslog_path = None if log_file is None: if os.path.exists('/dev/log'): syslog_path = '/dev/log' elif os.path.exists('/var/run/syslog'): syslog_path = '/var/run/syslog' if syslog_path and not interactive_console(): syslog_handler = logging.handlers.SysLogHandler(address=syslog_path) syslog_handler.setFormatter(logging.Formatter('borgmatic: %(levelname)s %(message)s')) syslog_handler.setLevel(syslog_log_level) handlers = (console_handler, syslog_handler) elif log_file: file_handler = logging.handlers.WatchedFileHandler(log_file) file_handler.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s')) file_handler.setLevel(log_file_log_level) handlers = (console_handler, file_handler) else: handlers = (console_handler,) logging.basicConfig( level=min(console_log_level, syslog_log_level, log_file_log_level, monitoring_log_level), handlers=handlers, ) borgmatic-1.5.1/borgmatic/signals.py000066400000000000000000000011641361605604600174570ustar00rootroot00000000000000import os import signal def _handle_signal(signal_number, frame): # pragma: no cover ''' Send the signal to all processes in borgmatic's process group, which includes child process. ''' os.killpg(os.getpgrp(), signal_number) def configure_signals(): # pragma: no cover ''' Configure borgmatic's signal handlers to pass relevant signals through to any child processes like Borg. Note that SIGINT gets passed through even without these changes. ''' for signal_number in (signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2): signal.signal(signal_number, _handle_signal) borgmatic-1.5.1/borgmatic/verbosity.py000066400000000000000000000007001361605604600200400ustar00rootroot00000000000000import logging VERBOSITY_ERROR = -1 VERBOSITY_WARNING = 0 VERBOSITY_SOME = 1 VERBOSITY_LOTS = 2 def verbosity_to_log_level(verbosity): ''' Given a borgmatic verbosity value, return the corresponding Python log level. ''' return { VERBOSITY_ERROR: logging.ERROR, VERBOSITY_WARNING: logging.WARNING, VERBOSITY_SOME: logging.INFO, VERBOSITY_LOTS: logging.DEBUG, }.get(verbosity, logging.WARNING) borgmatic-1.5.1/docs/000077500000000000000000000000001361605604600144245ustar00rootroot00000000000000borgmatic-1.5.1/docs/Dockerfile000066400000000000000000000023641361605604600164230ustar00rootroot00000000000000FROM python:3.8.1-alpine3.11 as borgmatic COPY . /app RUN pip install --no-cache /app && generate-borgmatic-config && chmod +r /etc/borgmatic/config.yaml RUN borgmatic --help > /command-line.txt \ && for action in init prune create check extract mount umount restore list info; do \ echo -e "\n--------------------------------------------------------------------------------\n" >> /command-line.txt \ && borgmatic "$action" --help >> /command-line.txt; done FROM node:13.7.0-alpine as html ARG ENVIRONMENT=production WORKDIR /source RUN npm install @11ty/eleventy \ @11ty/eleventy-plugin-syntaxhighlight \ @11ty/eleventy-plugin-inclusive-language \ markdown-it \ markdown-it-anchor \ markdown-it-replace-link COPY --from=borgmatic /etc/borgmatic/config.yaml /source/docs/_includes/borgmatic/config.yaml COPY --from=borgmatic /command-line.txt /source/docs/_includes/borgmatic/command-line.txt COPY . /source RUN NODE_ENV=${ENVIRONMENT} npx eleventy --input=/source/docs --output=/output/docs \ && mv /output/docs/index.html /output/index.html FROM nginx:1.16.1-alpine COPY --from=html /output /usr/share/nginx/html COPY --from=borgmatic /etc/borgmatic/config.yaml /usr/share/nginx/html/docs/reference/config.yaml borgmatic-1.5.1/docs/README.md000077700000000000000000000000001361605604600173702../README.mdustar00rootroot00000000000000borgmatic-1.5.1/docs/SECURITY.md000066400000000000000000000012411361605604600162130ustar00rootroot00000000000000--- title: Security policy permalink: security-policy/index.html --- ## Supported versions While we want to hear about security vulnerabilities in all versions of borgmatic, security fixes will only be made to the most recently released version. It's not practical for our small volunteer effort to maintain multiple different release branches and put out separate security patches for each. ## Reporting a vulnerability If you find a security vulnerability, please [file a ticket](https://torsion.org/borgmatic/#issues) or [send email directly](mailto:witten@torsion.org) as appropriate. You should expect to hear back within a few days at most, and generally sooner. borgmatic-1.5.1/docs/_data/000077500000000000000000000000001361605604600154745ustar00rootroot00000000000000borgmatic-1.5.1/docs/_data/layout.json000066400000000000000000000000231361605604600176770ustar00rootroot00000000000000"layouts/main.njk" borgmatic-1.5.1/docs/_includes/000077500000000000000000000000001361605604600163715ustar00rootroot00000000000000borgmatic-1.5.1/docs/_includes/asciinema.css000066400000000000000000000000641361605604600210340ustar00rootroot00000000000000.asciicast > iframe { width: 100% !important; } borgmatic-1.5.1/docs/_includes/components/000077500000000000000000000000001361605604600205565ustar00rootroot00000000000000borgmatic-1.5.1/docs/_includes/components/external-links.css000066400000000000000000000007541361605604600242360ustar00rootroot00000000000000/* External links */ a[href^="http://"]:not(.minilink):not(.elv-externalexempt), a[href^="https://"]:not(.minilink):not(.elv-externalexempt), a[href^="//"]:not(.minilink):not(.elv-externalexempt) { text-decoration-color: inherit; } /* External link hovers */ a[href^="http://"]:not(.minilink):not(.elv-externalexempt):hover, a[href^="https://"]:not(.minilink):not(.elv-externalexempt):hover, a[href^="//"]:not(.minilink):not(.elv-externalexempt):hover { text-decoration-color: #00bcd4; } borgmatic-1.5.1/docs/_includes/components/info-blocks.css000066400000000000000000000011741361605604600235010ustar00rootroot00000000000000/* Warning */ .elv-info { line-height: 1.5; padding: 0.8125em 1em 0.75em; /* 13px 16px 12px /16 */ margin-left: -1rem; margin-right: -1rem; margin-bottom: 2em; background-color: #dff7ff; } .elv-info:before { content: "ℹ️ "; } .elv-info-warn { background-color: #ffa; } .elv-info-warn:before { content: "⚠️ "; } .elv-info:first-child { margin-top: 0; } body > .elv-info { margin-left: 0; margin-right: 0; padding: .5rem 1rem; } @media (min-width: 37.5em) and (min-height: 25em) { /* 600px / 400px */ body > .elv-info-sticky { position: sticky; top: 0; z-index: 2; box-shadow: 0 3px 0 0 rgba(0,0,0,.08); } }borgmatic-1.5.1/docs/_includes/components/lists.css000066400000000000000000000054741361605604600224400ustar00rootroot00000000000000/* Buzzwords */ @keyframes rainbow { 0% { background-position: 0% 50%; } 50% { background-position: 100% 50%; } 100% { background-position: 0% 50%; } } .buzzword-list, .inlinelist { padding: 0; } .inlinelist:first-child:last-child { margin: 0; } .buzzword, .buzzword-list li, .inlinelist .inlinelist-item { display: inline; -webkit-box-decoration-break: clone; box-decoration-break: clone; font-family: Georgia, serif; font-size: 116%; white-space: normal; line-height: 1.85; padding: .2em .5em; margin: 4px 4px 4px 0; transition: .15s linear outline; } .inlinelist .inlinelist-item.active { background-color: #222; color: #fff; font-weight: inherit; } .inlinelist .inlinelist-item.active :link, .inlinelist .inlinelist-item.active :visited { color: #fff; } .inlinelist .inlinelist-item code { background-color: transparent; } a.buzzword { text-decoration: underline; } .buzzword-list a, .inlinelist a { text-decoration: none; } .inlinelist .inlinelist-item { font-size: 100%; line-height: 2; } @supports not(-webkit-box-decoration-break: clone) { .buzzword, .buzzword-list li, .inlinelist .inlinelist-item { display: inline-block; } } .buzzword-list li, .buzzword { background-color: #f7f7f7; } .inlinelist .inlinelist-item { background-color: #e9e9e9; } .inlinelist .inlinelist-item:hover, .inlinelist .inlinelist-item:focus, .buzzword-list li:hover, .buzzword-list li:focus, .buzzword:hover, .buzzword:focus { position: relative; background-image: linear-gradient(238deg, #ff0000, #ff8000, #ffff00, #80ff00, #00ff00, #00ff80, #00ffff, #0080ff, #0000ff, #8000ff, #ff0080); background-size: 1200% 1200%; color: #fff; text-shadow: 0 0 2px rgba(0,0,0,.9); animation: rainbow 1.6s infinite; } .inlinelist .inlinelist-item:hover a, .inlinelist .inlinelist-item:focus a, .buzzword-list li:hover a, .buzzword-list li:focus a, a.buzzword:hover, a.buzzword:focus { color: #fff; text-decoration: none; } /* I wish there were a PE friendly way to do this but media queries don’t work work with @supports @media (prefers-reduced-motion: no-preference) { .buzzword:hover, .buzzword:focus { animation: rainbow 1s infinite; } }*/ .buzzword-list li:hover:after, .buzzword-list li:focus:after, .buzzword:hover:after, .buzzword:focus:after { font-family: system-ui, sans-serif; content: "Buzzword alert!!!"; position: absolute; left: 0; top: 0; max-width: 8em; color: #f00; font-weight: 700; text-transform: uppercase; transform: rotate(-10deg) translate(-25%, -125%); text-shadow: 1px 1px 5px rgba(0,0,0,.6); line-height: 1.2; pointer-events: none; } main h2 .buzzword, main h3 .buzzword, main p .buzzword { padding: 0px 7px; font-size: 1em; /* 18px /18 */ margin: 0; line-height: 1.444444444444; /* 26px /18 */ font-family: inherit; } main h2 a.buzzword, main h3 a.buzzword, main p a.buzzword { text-decoration: underline; }borgmatic-1.5.1/docs/_includes/components/minilink.css000066400000000000000000000016331361605604600231050ustar00rootroot00000000000000/* Mini link */ .minilink { display: inline-block; padding: .125em .375em; text-transform: uppercase; font-size: 0.875rem; /* 14px /16 */ text-decoration: none; background-color: #ddd; border-radius: 0.1875em; /* 3px /16 */ font-weight: 500; margin: 0 0.4285714285714em 0.07142857142857em 0; /* 0 6px 1px 0 /14 */ line-height: 1.285714285714; /* 18px /14 */ font-family: system-ui, sans-serif; } .minilink[href] { box-shadow: 0 1px 1px 0 rgba(0,0,0,.5); } .minilink[href]:hover, .minilink[href]:focus { background-color: #bbb; } pre + .minilink { color: #fff; border-radius: 0 0 0.2857142857143em 0.2857142857143em; /* 4px /14 */ float: right; background-color: #444; color: #fff; } pre[class*=language-] + .minilink { position: relative; top: -0.7142857142857em; /* -10px /14 */ } p.minilink { float: right; margin-left: 2em; margin-bottom: 2em; } .minilink + pre[class*=language-] { clear: both; }borgmatic-1.5.1/docs/_includes/components/suggestion-form.css000066400000000000000000000003651361605604600244240ustar00rootroot00000000000000#suggestion-form textarea { font-family: sans-serif; width: 100%; } #suggestion-form label { font-weight: bold; } #suggestion-form input[type=email] { font-size: 16px; width: 100%; } #suggestion-form .form-error { color: red; } borgmatic-1.5.1/docs/_includes/components/suggestion-form.html000066400000000000000000000024511361605604600245760ustar00rootroot00000000000000

Improve this documentation

Have an idea on how to make this documentation even better? Send your feedback below! But if you need help with borgmatic, or have an idea for a borgmatic feature, please use our issue tracker instead.




borgmatic-1.5.1/docs/_includes/components/toc.css000066400000000000000000000021011361605604600220470ustar00rootroot00000000000000.elv-toc { font-size: 1rem; /* Reset */ } @media (min-width: 64em) { /* 1024px */ .elv-toc { position: absolute; left: -17rem; width: 16rem; } } .elv-toc-list { padding-left: 0; padding-right: 0; list-style: none; } /* Nested lists */ .elv-toc-list ul { padding: 0; display: none; margin-bottom: 1.5em; list-style: none; } .elv-toc-list ul li { padding-left: 0.875em; /* 14px /16 */ } @media (min-width: 64em) and (min-height: 48em) { /* 1024 x 768px */ .elv-toc-list ul { display: block; } } /* List items */ .elv-toc-list a:not(:hover) { text-decoration: none; } .elv-toc-list li { padding-top: 0; padding-bottom: 0; margin: .1em 0 .5em; } /* Top level links */ .elv-toc-list > li > a { font-weight: 400; font-size: 1.0625em; /* 17px /16 */ color: #222; } /* Active links */ .elv-toc-list li.elv-toc-active > a { font-weight: 700; text-decoration: underline; } .elv-toc-active > a:after { content: " ⬅"; line-height: .5; } /* Show only active nested lists */ .elv-toc-list ul.elv-toc-active, .elv-toc-list li.elv-toc-active > ul { display: block; }borgmatic-1.5.1/docs/_includes/header.njk000066400000000000000000000004111361605604600203210ustar00rootroot00000000000000
{% if page.url != '/' %}

borgmatic

{% endif %}

{{ title | safe }}

borgmatic-1.5.1/docs/_includes/index.css000066400000000000000000000604151361605604600202200ustar00rootroot00000000000000@font-face { font-family: BenchNine; src: url("data:font/woff2;charset=utf-8;base64,d09GMgABAAAAADFYABEAAAAAX2gAADD4AADrxwAAAAAAAAAAAAAAAAAAAAAAAAAAGh4bj2AcIAZWAEQILgmSYhEICoGQSIGBDwE2AiQDgwgLgUYABCAFOAcgDIEGG75VFezYC+A8QAq6uT9B9v+3BE1iKKT2IG91MwzFYrVtDyfavY9ii6qSIJybn7qqPfVk4Jv4IPPDqz8vFV7HmV9WXLRjVL2OAjH0oMfYZod2qMIF73BHXHv4/Ifftah4dMb/iIGvGyHJrM+/P9V7H/zP8jeJLYv8BWiW7SR6IVbskBymqWtgzVjAtacj0Zazd+vp3NO5w94M8HPr36JeLfK9tyi2UQvGNgYMYWMMcPRwRA+QkYKkioR4YGEUFkZdGFj9lbvWS734XkRYl/Dw/X07f2+lVbAEmjDQbTdqQoroJxC+7o868/ValnqbIclHIMcB+ohbYIl/N7mjLDv2IYDhYJhKLJl4wDepkfVmxuhZlZp298zsLCLJC1J+J0qOAaR9T5YLcgQVXlcoemjbv6ifY4f5g28eysziQmieNyjHNp5nrNibQZPNkF07pqVYu/Y/ABlN+P9XV27CPZACz/kBsgO0gJrlanZCUlAeqtOUOeqqKy+bNWzJt0YvmdyXrAdVlxMGurtj5p2hWY112P/v175KT2//rqOS4WIDPnM2JKqERIp41f/fpp/tfc/6/vLyLGoDVBH3XxvGMnVOTj9z3xt6b1gyjS1b1nzUJ3u0JHmd8+ePfVYLCFWAKrK1QBwgqPZ3CXfp0m6XMn2blOmIiq7LwzYShGFUDPe+imPMpII1pV5Lqa6ioiJ0ZxB2k6v/TwECgksCksiIbtU+yO33VMUhL1f+AIA+d93tAEoBgBn7e5y62gEmtXYO4skgeMqJgKrkFEOynt7+/0LUi+sZ1r3+XL58KQFt71M8Bv3+E/L56De+l8P3HaXzZza/tiPwR/OxefD5NB57wixv2OH9NS9ceRsPnZk3QxQ2P8sO5Lwep99bsOE4PMpQgaz+afvKp+75zbs+i/szOO+yya+O6M8cKp/lMN9QVaRZSKZe0U0ONkWYdPTvHLFPgG/NVPFldejfNI3/4K/6OPyhgvmqRXw9O5ob23PMjLPkDxHfUbI55aMMDXPUPNg2zh+iHhwDD++jZvfHR/8nGvKF3791Gra1j7In/ONQrYwf+VmtCybZQeiyh/+XdvFAIaw+Kv4S78j5m8yfjwVxaLimiaULz/TsSPzVpRAnuPLyTfftxcSUo6YVQc8gRiwzCysbhwSJPDL4ZPLLV6BIQFCJUmVIypxIAEECFH22m8VWHC7xpL8PkvDQE6RvYmOEi5cvm1ZmCHdLErenYXAEcm0jpm9CmaFWSKoKWV2haClUfYVmSaFbVRjWFKZ1hWWggDYUyKYCO6AgDimowwpHhsKVqfDkK3yFilBAEQsqEiUKplTBlSmEWle0t7+hPLlxee3M1/lVNJiYDR+D2Dy+d3fuLfodyNu/BkBX+vzX1w0oKT+M8WXaASTFfD/GRyBoBonanvEla4WqbztIxKW9/G6U2BxQvEnFt/W3mXT59/G3mLi4kTEN0O/iZ/h/Sj0Rv1VDxV/xcH5QCoMZUCzTGkJj8a6PSj9Q+WeorHL5shj1qY6Jjuhgl8F7THm5HVN/q0Is+oXot8K7GZoqv21Zney0k8UO+EzXYC1Isag7ENmIAd+axJ/wGgFtCOXtWGj2A9o8lnlXVRGWGgmrrNK46A+vxhmdp41Fld9kKhc/v5+J0m9H5HMh9V07iMFiOmo+h0A4zvk0GrUp4JjHrinqxKbDVeHwRQeFJ3mSyzsa0BG4oAOLPIJ5mNb3/dbiCCtWy5M2cqWCLmCEAyVspniAEWsE6bf2ulu7jaLbGda57gBCU3jcpdCGwLxK+O/IA/E7a1zoREndb4uEcqUSMCmbgOAGbBEPBeODX0MJ8w5YPW7EldSEexHqxWt4Q/w63DZoSO+HVlBxfitmU86iMjfj1XVDtZq9nx7xuIUPZ0u8Hqrgc1og07YOCa46qjL5Gh8F7cNfwgyDeSVYDxMTFRhun28WrxU8euEeRAI1Z0Qg+x5cKoJHJfCpDAFVIKQqRFQDQHWA1ABEzXcHbv/QW/MSQiRCkQCjEsRUhoQqkFIVMqpBTnXg1ACAFanbuwQlEkU9g0VuGq3bC6bFZdOYWt5x3F9CTRmUgtKipgJpS3KsWztrcKwXwhfC9fGr8nwCt6ksKluvBZALwuZ/ncTQh42yoYYZjNXFQCBefnnYPoAAOLRssyVW4fOaOrSorclHAOVhqmmgubFY02yZghzdHriFIkT0SI1h2+hGCt/Jb7dsy9ohNN81lbR19TmR9tQ2yivWsjhWfjHJXZMW84BX6FTU1E2hzFPE4qZv3HKzF/bF6Tml2xfpHB69N+FSuULZbpsJRV4PkyoCeBYRTGNDsh2LtghbRyJ5omajWhclKz3paGed4XLTaGrH2AwmFcKIbQuRFNbU3qKHaS7ImXTGA54uVjmrIrNGrOmKYms3qBMZscXZo77es+Gal7SJrFHFsr92kK5QzEHxFBotWuWcSVdGt5ad7Su546MbmUc2dBldmsIsRyzeoYSMazUZLZYpGxIPljCKRuiRzUHYTY9NKavgxdHy5bL+oaxQ5rdv2ALYcoKog9HXXmmoL85elD1r96JAFyENsvbd260OVo+7HTzNOP6yg6ym5ARVOLkdhZyiOn0NdCIXHdBxjisPWMxAsaFsyIUBWbMvDlTh0nYUcpmqv26TJBkuWzctStzZJdHTZJ2eIev0LHHQEbn48B6guC337n0kbtk0rhTB5OKLNusq+xqsIqd0PxnQA2RAD5IEeohcRualc4a8f94knIYwpE+9xEkziJP6yAbNJE6aOKjSlCihjLppAUmkhfgERduRABKgJDGPvKtXozQqM1ZrahVslZ74xkWIsjfhTFzQV06soqIPZiQeKn0TJVQVTFBdMMmaHG+hdrkZ6gpmCBXMUF8wQ8NyCzQWLLCwYIGmgiUIxyRpZsMMRZvfj8r7jQUIcG4uQZZ1v5Png3FgC6d2E6QBRUDpEImlJD+84xLQgAB4aOLVkbT8xhxGmV/zhxeVvmCyjGa90PwPGhEOlzOPyqA7eDAMwQUirlRpNUtRmAuJhPbkKLeOLWYp2REIDBOwkcOxRXJKISVbjCA2tihGn2viEWqxOYKDSflarhW2KExig5YwSri4ks01w1rQJEO1oAVCsqNgWALHwWmxPKke0kJafVw0bo6Pljs5CWpU4eonFHyQUhUUZTykahG0uiXU1PMunvFwVishTcoM2ZejtpzC/c3H0HSS4NZ4jDRBRQChs/WLy6eqxgXvSNmzBze6tOwivM8yW4Hmf56n4BdUOSDgJUSPlGFv8tD3lzLG4VWi8KIGwsWUfPVc1JsuUUHgRWoD5FIgQm+EvIsP9QDgY7Idi1KfFVJ23WzKFNZ3M5/B8T8HH7Vwm6BU309566hlXSl7BGIYNU1fE0QIwddPjF4xO8J3Ie7dxvRV1s2mm8eHl1V9bWyqq8t0YKjh4mUJp76ws6LK51n0/dPrZ0Fsdrn6wfljNNGObAQ7LQ5rQTn7kaUzwOnbtv3ubpLy1TtBHLspOgkIlgJZ5b2APiLPE23eoevTjLaYSdSDeFbxHjIQ45gRVWG/UBt8cFyQgw+TlDLBh6N72+iJYbM7m8GIhQehYKeWR7oCAL7iYieuO59jo7rJWg2GDWEROmofgWWoiaWr8bJ+dv7ilB6fOz8htbBPcyQoeVyJ/TjetxyCix/AxwM/mESoy3j/GgU0wftZEJhr3fC60B7hqjw1CI3G7jcoDFHhYvZ39GteqwXk3nUCQeCogqrEGTyFwiIcuEOdXEOjMLH/wrhipx2JOomIjquAVVKZrstNnaO11Uf9h1slcuqmaDVaWkZGVshl6C+G92F9ursdF6y9XYMpaKRVBa3VuCLab7ViNHf9bB59yKgJG4LFJNFiF8VBQ6QWzNPv9COG1i5tUBAecXHX0QPUzNIHzLRvBFBwfCu23qChiyr1DD66w0yq/N3M3t1R//5uF8131H+YQ+c4htjh8Z74L5h9uVnaTs6dH7tZPabX/Sij467pq2Elp+6Yctld7JLap/f/3Nlpw+v6nEd+MV9ctDV2/wLtQpyVxYShnQ22pxzUrR3y0Nbe79rRTuP+ueWUqUlJ3skl+MAm8aeOSs/4tsVrf2R/cLvslOxHgiAWwJsrRM23Vzs8sNfbi5a/dRWGGae94FMq4+A13XiXWq1pebjZG7WYITNx0GiJGjp0TW7TajyiriHL1Qo0LcMADa6HKCbrRAMIYd1NwpH9P1CYXfcAabmtUDYMgBm9XbXbmx9SLAuPiA6QihxTyDMEIQeTxr7n584UVhYNPIZ7pOTYdIjcG4Q4eXihfaIw0+Vs4xhLkzvYkJ/ggWVFxR8IZmz4RKFMKIVBmtraRw8vI25l9BHM0T1tR22xykJavjbpyuss8NjuOw/o3ZoPoIJiDZ5NzPDuu1Hx0EaJOFoq4psJTrHp9hvXYA6sxTUT4vd2I3bVNsiH7QRKg9YxDkpnlrltbWNdfU1BAHDAfCohBHs77mMI8FrSVln8raK7kad/v9BlZh4unBdNHViTKRkd4ULFBKNXUJ+vu7uoSfyHE41vFYcJhq4xNLJus63K8vR84ZXOSLudibesK4fyyMKtYbfe8ZKlo0V/B3C4lQR4ZlVdm2MR7Y5+nmH+SzbfnxqOg5f0JFQqbqXXEtixMTsvopXpKHz86dvPsrI4MbNYF/XOnWF96+3a6e21g5MzoNwnjejWOlTSQ72xj7P9PhMfMNv21Bjh4lW0Idd8tZV9MbsdtrD1e6leOxPPW9gg2jKn+YVu06mqp7uLNuSVaCGxwCKov/doN7XIg54wB04fVx/vemQHFfderGn2TSjgllx28MRTiZRlVDPG+ROT8X5cuyGjjXdlck7XCOKcodkKGveUTHBJ9sbcMOJJ79dQQ14Gjlo5jVuNC4qjsLRo4+1FjYPDsNCMuZaglIwgkqoJFRrwiNeoVDe91aHjtrtU5+1Zm1MWSle3O8lOTLXaK2QK5iTFSirhhDGNsTkdGc3mKJbkS7z2FperIF/bfF/jDIUigb0b9+xI5Q6D3LiWHK0WutxPErrc8yCpKmur/i4YYCWiVd1g0maww/B0cWbE/uAI8XXddzfOX1VZrCKC0GNwc9V9iQmWIsdOjvewDoz8B9G3GDean4MugfC6MTVUd4t1tKCT6PAVE0aEeGFEFyDGSXJfC4oNWXNR6ADf4dq7/lNPrwDKXn8k2hozARJvfRjl3M6iric+Or37ykGeM0w55ThmQj1uGaIfjH3O+mfQvdfEBxaZmqqTnh7hjc3Mr0wEOwTeob2BdXQvIdzZtoexteOwZbaaQI7ZdRxnKIDzPUsk96j3zvpDIQuJWvTrWC2kGhZ3aUTKxtMXOrRxYCVbXHZVv1OZCFdI00pHuNf5Gwr21u7Gv/UpRKpN0vFStx0s1u8Fa/oK+GERb7uJNFSQxcReA2JrAqacvd7h4OiM3BDHiv6jET7fo5yLxpceR1JLAhjo+Gsmd2blqEH3thW7L0RBCaawLBpuoqGvY7gmZwtUj6vMFn02cYXHLOHqLrvxFs4/PcFAOun1tCCB8Bn58S6IE5iXMjd7HFH5SiarqGKSZYDMFTGhfKBHSOSNiSbqUKoT2hoSjpDoXWuzGhXSI2KgnpjpdyjycaT63KXv6WbcasdObJycG5hkugAZ6667/S6Ix1vG5BHfS+7IYZbycfxLQmliRgfSjqtsKw0UxBh4xkY8tJVl5RgLzZkC33++tRfXbuCBsSCmBfAeHBFHa1HIbit8j+HE0q1m1r9gatLoIHQ06dOkvSj6vedazLhoO352FQt97j7e9ipv26aLwwM7aZRHAtpYZHY2aTGOo/rMsTQ61Mz/tXmp9Pcr/xtjhHUqjliFzMqnrHjxOwJyJnm2jyX9EuqFPiNg8pl+cibfP/KPgOyN4AA+/iVYB94jWFoO1cv7gk/x6jgs4iswBM7isJZD9vJj10xTi6UR7zolcao4UZa0xS2J08aJC2Pm0M20HIgXMzcRO8dDsy3UKXSJzgOhH0MPpgzEgSdr1ch8kNeJiYvv5/TlE0TLxA6Jl/KMryPH7wl9G3c53tG6yVZbQvYgbHcP4+7A+RJUk1CIg/0dk0OMO+3Zu+4nBORd3uOHPltFXZbGFqFpg4dY1YO7eZrEHAFdk1PZBN0ssr088tehqU9XCJa40eBoJuO4gCFFaatBLx7JWMMHpchDWRyDD5ZF65avmdSNRMfUweQa+KVGd5WWnX/1ldJzZaUl5155tfT8D1ELWidXF/5ekVIV/x/w5kfc/KGYhmrHkcUrui77hD5e976Vr5x8aXtdzDqCRaXlwrvJj0U9uDsUAhbV5q/JfsiW07+lyUdl9u/k6FZFUmyKYisq47/ZFDV3niYGQWP7Q3f6ykzcC7z9rGX6g2dfIe9XNrcbPMlDW0mUBin7h3SgcmtHElY5EDDOFbf/w6cY7BOodE1mYKH/oqoORFWF5jbDZNb3hM98BOMZTN99NL+VedTiI75nyYYngcarzjRfYKoNxm+/M5jUpzeHVRny/Un2YXVzz6veejuqsu4/yaPDojEuLyJ7mSNPc8n+wCzCG/lInqPct83AbmXk6yVTgthYAWlPaAtTMCuKGDgZM751cjXMg/nC2d6Xin4wzMkXodxfpW+gC5ln2HO4Cv+PfYa5EH1R+is3k/Op9EW0jzXD/i/576Y59gyrD31D+inH3J1kIfOjeYpqcpmkOpKgxwqdm5OJbjx1W0C22lg0I+niI8/LrfJZ5DkszmrF5P24dSoOX0XETVkJrOI2CyfRyOiRR25kFYgxyOj5x25IdnVDsYeebE+nHHOTHUnpjMABZ7WupNF1QMs4Psp7S7XNssiRnhGVzo5MqlCUVyXvjoDOfh3zPuYpUN7M5ZMKjDWGboZHSReZAB3lwTV1wAPnpVCpzA3llJ5zXfty9/jKy8vTddnsoSuFKjnXz9WunRvXeeMWoMFIl0fXOd+tXupszmzNW5o5mjrDqyazOEP0HF6c2RTLRydvBgZ41HQyxCDH5ZvKLv8NdJeMBQAt94SwRSLVHKZ8P5LXlt+eazwqoDLJNDeDuSZQaF2T3hJZXGLc1Yw9b6kXNV7bGbZP1RQu0t5MBBZcn00Kr0pYVOYffyk/cwu5UhFEo2s66VtZX211RdRGuLaOxiTMS4gZ/cwVYZxE912PORNjYELOQTrFDSzZffLKuHR56fSzZlq2oCTRozdG/hS9cx1qRheRmVR+B5Iv04AXBaTCKPiQOI68Omvx1MaJx593qo8mN1ekNUnyF57Ye6z6tZQx3YWGlQwq3QcIT24LJhRklHns53dii2JthQPejGHlgcZ21cGEUDCpSug6eSBzwRHTsGrVkhSwGi9I0qWWRff/6r7qvAb+X8xkMA/1R6GzuLEjBp+QWEAa6wWCkcZXKt9JvIZtcz/gfT1SX0f2lMX6YL/KkRpbLd5Qudz9/IWC4qujGnMeP9tmLZavLq82Hnw4ZN8/FlZPO6sCSQsE1mvZ2d1TTl7eicU3iqVXhPheNx6/HXVst334/yhaDqVmQ1HhUqu7PPOZF+wLbGnNawZct9e2Kg86mopTq7DkrrUrspOZ+uycrU913cp2xx/dKH8mIdXkTlv39JqEbb3LidTbqUTJ70cPnnpzqTF9Pg6Fb+QSmb7XHRyiSzeShf2tt6YrnyfPftElDOP+1wEejWxgxKgDPtFDkUVE/3yJqvIZlG3gJbDk+HLhG1w+8I94gjfxCaL+2HvM3orLWfO4BrmvADkpj5SfQPYhJyQ5iewVfSAnOfnxN13ewJbwHQwYS9Q1a9dYr/I+FfULnvoY0QBHrhmqFGAqT8QGf1AXh/+giSxMOEeyR/imhs9VVv5fjjK075fXg6u0Krfbm2EeDEyTw83d/sXh7ORXaAIfqB3UTFrS7FWSoHNeUD9OCvzKmJ4fa228gULCf4QH5B3yA8Lw2Z+vGt/cvevB6NTnefrn1hgfh95aP3W7eOnI7YKJdWWvGb5b9fPgj0F3bi0tEM+glSnCnW2hkQPf37+765uhC8kfpTBhaLRGy+SYMGqJ1qNGR8d4+eDKYJ/8pYxjrf0HPU0NR/I6xv2X1K93diheyL7UNXYqf1HvYBJAEnxfjHa379XXfbGg6fDeXU3PFFeWnowqPzkXkc6WcREKKowduvnaqSHkXizFBFGl3MOSJu1/EczfGfbIfxJm4LbLtxk8Oo9updDJFBqjiEIn813r0TekgPbK3kDbZesulWRN0HcsK3nQbz5aUdVnyDuwdaikZvNQZ/FGz15U8r0AefaII621cklgWfYwKtoucP2ExvtZpeqfUtoipsQfT/7PyPft6Ao2ibMqrwBxahFtAGWQZnvOeXimNIEnPXIAe2fLdtE7miXzPH+YfXWkEGkxqQ54caV3emlOUbnhwdf3jI8Lyt0rYFG86Llp5IYU0NnYdaSkZNhbu9csenmS+750p615OqgqBF0x65DrBKCz8RsIytxcy8tT3aIFsGoLq5bCx4w5RoxHDYGvNLC+PtnriLXr7j+wVixlhfFLAvAv5BpWtpr2J7WDFUP0358xlmGO7jqJ2AmBdAOnY1FPZWwWVKibS+7QbRPfXzUp/cS4PastO3mhupjljKoT3gZCdsZSlEnyGsbv8W3zBTnppgHZO1vWCl/XDCbMf1ZvfEHeAa6sj2V1sg6t4BUSnyIInrJSHp8/ic7IAZ0qn0E3ynUIWG2UGMpyFl92E9mMNKqwFGtLjyAkhCJBYSgKotevwwZBih1c5svvnFMIlrITUYwobw5vni/q2w0GKb05B6M5h7nR7+0z3Ld3hZPzQdk6zA7oODu2CbdvfrCVh20RitGq3xYuxFNIq2pRMbKJsDI6u+IJ3bqJtegfapdFKBW5huVj1xHlgMRCI98z0TunP7XMBzefpng8VPDgyfVv7et0kB+a6GasXSIkPaDQlUoKVI8M4WmSvZCASdOQqGTKf3Zx1CQyKwe0w4ZPCb8WWteLEpndNqIPswE6ST6DtIILa8Wb33dIydRZ+SwacXsyI6tK9eSXbxQ/ZdZkZmTXKX754ifVk8wa756iLk1lle2oFPhjC++TqP3OcDAlJPEUdGkrKx2HpP+cuUyQbEcrK7o0MeuweEAnyU4hOqObVijclpihqRVtqOjXHU+qrUoIspMmId6JeEy9uT8yebPACzblL+D1EmnuJ7oGUODlWWSlWdlpQBa1MXJBbdwh4u+Zc0pg82Bd1WL1ocXqUIXzoJJ0/i+7qWkyiGVr9hFjkLr4bm0Wl0HkyjJzApH//HZT9yi3NCszt0L/+Kuzgi/Mg+4Y+zpkRgpoj/wGkj+vBKmMTSjQrqufL2yPTte71HSVeTY2XdxDLqbkSOJ4OwEqB2ZAp2oMR56eItZDIq90BiFbtSNH/xgpa2yphQTHiHis58TVponI8tejpGWpt6g7kOcLuaOWF7w88teFS9/siU8vKzfYXxnwNvmMQ8VJ6lfk7IlPEqnwLdlZDDoBjtbw8ogZ1IbMEHnYMfQ5CDsrv4loPkHtOcwi6WZ7iWKQd2qxwQH/NFH3Inux2s/NUbbtplLOqSm1fAbZwhxrEBi6eWcVy+zB0UJpFkPjH7nGPkxwGVGczTiYCt4Vg2NiVioxwNbzMPQhcXyjkrOR5YaKBRgvkrMCts2j/3GOufh0Sircumkt848XGZRXUyLgTk4kYoSEApa7c8lGlocVOsziHxnq1rkgAsl/Mvzz00EmAd0ktENtaxappH0Iwc76Jv/JR/O5cuQMwd989PAzFsygR87Kn0MXj+WiCs4W6d218auswUp4kKNEUlDPyAkrpm1FlMtL1vzGH9M6QA5Dgxji76b/yYmNhPnP6zA6Sk8mUZi1HZxi7HVEv36ZFAb0/1hSB25ttWKjsCqohEfwxEo77i65A7nR849JNDLOWoOo0SOPKAwJUzY8hMzKl1vxEUhlV0IjeFKrE0+AfkUBfZNJuHuqpDJuN39x0zv/lVblnYfgncY+RCHLFd45/9PRwqXo0sIvj758hTAMDOK9hEnag9/EeghznEVOaDfCjSkBByIFC6EjCqEbESrZWhkUah1aUm4fL/SGYouRVDjD1mJsTom3p/Lcaz5rwhiFjFSsdrC+Z16FwVup3tcdbRUqefcxdRtOVlkoeHl9pW0oI3Bh2M/inWvB6wfqxrtnwH3O1wRkKrtOUodKBSmARpTvQx3Q+8r32KCIyCjJiDcVxHs79W9Ojkfdzupa4CgY7xs9Pe9/CYnZt/3Z96+tBrVH3XiMJVfRwttVO6i6bG/NT8sRml2ZIvMAtZm+JO2mQjbwrXQ+b22oibsBy/m2X2WYZ15HD3Fna2KLQcYcHXgRvyZMoiLfJGPLsGRACz0SmegKviNNnYAvbSsLsq55zrnkJzQ53G0duxTHE6JGj7T/1tG3rteUIyhKc7Zqrw+PaK47WvNTs/ix5M8zsAomBnyWcTBddad0tSkCLRDHdzi/ImMMWbMlbZD2VFzCFO76R8mIh09iLpM4nqEZkT7tCmYrHN5zIWWa1MyaZ4su6S2hU9aJotySfdAEU/mPC6/oeWlZamd9OHvMlZS+ZGEb4wtGKBTIS8uITOti/UoU6hrCTcvGg/kVgeXxLKhI+WxBaTqYMG3UQCbMn7svzIIW/G5Z6tu16eoG/PLGHpBFomzeDBYwhqLSCHPbSyPVV/aseW6Nd/8H1zWBppZtKxo63LGNGUk1ujIkOIrcQt3fi0OY7S82jWKT+8M+u8UX6ayVzQwh8lvwMHJart4ZORaflH9ohJCfhuNXYp6wG1skcYc9Ekb6Ksw+YsP6MNuIHfOrcqFPcJdVotel1Jx2Yx2Y+2QJf1CRnvGnliJ8n+oLWjKKuQDtSsY6seQAmw6Uk9aNc7aIRlkizuSNATsM6t/6uJIjg4ZEc1NHnVQ63XbkwsU6KcmF1Y0HKyvdCRZp/deXHTOPQHcVtcqXIQ32HPepaBolJu7O2mHLNT2FzpBUNlcn6DeOaUqF2xeK0rFuLN0wpW8zWd1rdzAVd6HFeDqxJ7In1mAPiMaC/+4+IS+MwLfkI8mSaJTr5w/iNCrGis95sFbjyNOnVCmODUrkpxF5/NnJuC5qpSWlYN+AXn4LHqxZlrpT4+fu7u/h7tD6d6U8p/Vzp3r7Obu0OS0H3BFpYpY1RxEW7HtZMK0KW/1Mt4iWSksTMa1+ZVgw/bJgnyIcl8NKE8uXB8XeqxliHAO9VCh1Dw2nUgzEot5wuiFT7cmxdEmPNXNWP+m2lvolPiGVjmzzSnbfHew7k/6qM9l3Z+Zs5q01nKA4HvgGodNomK/Yb/2TNRH92vzOEps1va2DWGygUHG6aMsxDBe73lMcgqG7SRm3zp7y3k5ypr58rm/NCHRPUZHD10FX60TjchpVLgsXNdzk4Ji0XjII6r7qsJ1ruKW4BxUoQe9oGpjYVbaw0pmY63QkZ+TaPx22+SRlyT0jO8JDZfWu3oycTJPVHZfg8n9z+6tEv6whdflTG+xNJMOzjwwebCBAEkApmNluwVIggjumFddjtpUEPIxZ7GZsmJGBUBNR/+xrQh8RkRwpq/CCkam8tyOLJBMVWfJbSC98S17fZ6mPT/RNNzui4iLknxvDLgxHSTmidTHMaElDZ2iQL4E9UFEzxtOyl2BrOi9hdFbNpaXSRWw1Mx482Aq7Md23RD/zYDeXqeeGJIPlbwEGKrPynVa8TLys4i6TSmYseKcNz+FoESNjtmeCCBNLgmEGXVQaXk2kJjFxyHB+v56HXXnFCRHguNgSAvRUmq6aosA7WRjiODbhA1nOVz7ToQRrArfUQZF0hqGR3TMBR388IFZlezn8KthjkjG17AGU5x85mUSiQg3HXVQkwNWzZJBmr6DLMnBfCRa82h+RLSF7+JX19N1wF37zm7XfKtOjwQYCWssmhpXbPh39Lm050oghevw8A/ocx5X/CTWcfGDbbmApvnKvhWNZjy3p3gns51ZECP9V4fjnUGGO5zjp/yuBdlkgtQaIcuigGrOsltRBB95QgA9Vh69Z7ZNb4a+UgCVTfgVHPcfyxY2j76T6d5dl+Nj+uJZJyQBQ7vifkhwDUqnYXMP+SushsVlaR2oX9gCbVYeuW5O1/MyKRNcLHEFMJpc9/F5RTOxo6sQ8+9D8/maONCJVyD4Fn1ZFNOiLze36lp6fS8Y7lTupoL41emUnkDPd0Ina+uWrr6Jdev0YdMf/W+NL6+538vDI+eD9zX9jEr+8h3/F5KmfjL6alKZkM8yuhwKJs4pAAJMzCBYUTUvTF3xsphIhkZx284mcrRCwK/3eTDPAEw5y0sIN4GwkO5ipSRHZVFhE3LIRcaLjI915noWyih8Jajr5ox99NXWb4XHptwLEg0QrDZ0IwWXTyMznWuQ+2vGpOVWY4eGG3M59GqROj7E4D/7jJ969NzFxYL8Zbt3dXJ4f2zKPXVPmacIwCL2ga4pjH/dOOi3zcQNK/6ngWK5Mmo6k+YR0eWqFzGX3iEpo7Fh03w0FM74DeUUzDt+tnLZl7LssYSQK3IPj702/P/kjG301HY9w8MDtpOzlRb6AvK5uVcyI6A4opc4Da+3r8alMnOUeaZ/q6Vl90BvYpcl+7zxzDgYv+XefuPdjE69d8hxEIvZsXr5cmaM3T22dj3xkJMpAZtmYZJQQhZArTdxSRGX6gXBsyp0U2jKZ+UC86vjbwJvRsOgds3ouiBgOksZiAl2X66lWnHV7AVM4jir6kj8Xk6+98GZ+l+TsxdvqEqqLD122bf+HwTwhaAxbL4zrF3VnjYtPoClvvyNuXD13f/v+kUNbG1fn+zr1TZkllPghRnw39nuTJYQ8JxKsJIoqsVmIdcQAiZNoAS0K3gHI4UlNDilCGEBOdxtnAeJgOllK4f7kD3N0NuEzUl/EGVAgZaBD2l8UVDmg2FoHMTYaMe9TDpSJtsIgDLdVCLgH6UaYsbtKh8Gn+z0cwlXH0CyOIGDzEqjrhkEWxQtCXrhkF7i43EbXxsq/uojotkuesMDDEpfQ5xJXmCBPTgUR5u7BDGctQuIg0YjLnOHQcwt67NqBfHXbvJL+ACDCcXmjlCf2Zku5R4JlSDjsHbI3DzdFCHT5NLbDZxtpGhoQ8yIeM9f2SAyMo90Qb2++mIPSlTB9R2TTdjEoWhoe24vrK0BBEHchdQc5S3uUsAtmOK3k0VNSlKPUrGBgZSL5jRRY6uVp4GFVrOpg/8sFVXACgm5OHXK6uCKgFL4kscICNXZDmUIRl28yQvwu7QdEc9p9H5zWvNxGzCWyRMlYLw4TBYKUSGvzE9G1RZ5QGHpBW7Ngi1tesDezphUSkmenOvrtq46rJc9iiv0gDA3WHWXx1Xrv7ClqghvN5y/JqKdKSE8zgDKQoOB0EJuDy7Nt9uHnhJHtYj9/a27FVVLU+pIqNEHQrtg3mwAmh6GCmfTKr6R5Ee9bd9FfjO1S6dEjN9NNVoNPHXqJJGfbjuUCCG4IdwQClUzIws7dfN2w3Ffr0zXFZt2rz2jh3kQ1UcWOwgttfCXoExx2ztt0CLpQMBP2iUWm8lrLdzd7bJJxK6LTao/uyFMYeQGqrDqOa6YS5Tc9wp6YjpxUmPQBnolh+Zbriu5b9a5tJp+reNKl7fldqXnZU0UJdNjpXuIUs0ufusrJSEQmGfAwNy1McEzzR3qK0rzwDVe4eloWQEhATikgQ5d5+nooRPhsxvPX+/EpTfnu6t6bxOvG2Wm8ma6nDmzGvgzrFa3hIR2Tr6UPoAw2FCalOlj9zHRq1bUrAmGJYdn0qk51VQvakfCwg6bZv1Zn3theeOhHs9f11lOQV51YBVD0Ivo2ZN4iY4gc1osnI9xgMz+mTcA7IRyy8cXBBONm734jbl4f39m9s3329Mn9w9XzbR76tt6Y7Qg4Hi7zvKXoLvBh3H3BI1/t8ZIllJ1BdgdhyaaK5eLVN2rFVm0xgaFpY54Fp6dwuoLQeYFIv9KmdAXBVFxlBHs3m73bi4zUrDatjcZSNcL8dDyCflZ1H8ShjiTefDxw2jDrEpr2FgLCivR1wu99oWBYBlMzO+155Bn7P4A2eAg7BKzYQX9N2wmB9KxfjWrbQULWLHalTIX5gvpKJxFRXCNRLfOU4ch3C1psjo3dy+FpyqQ4R1EVx1zIf0OvBGFxgkcNryuYJVlWVNisiQ/FUmqiEUDetONHzN7B1TJ1TRo7Xhac5h8tiAEYCSLjnK4yYzbo7tplR2Ku+gVPP3kyVKTFxA5gRjoCS0rTyDILlbW6YMZSOLAmlcpLzWc8Px0lFnUAuBzTu0dkJ7dcrG9yX46xSsmwsjWuJBSERmxArNOsQk6Fyv7gOoEqK3pPR3R6jkI+GMLBtOkmvVk0CIDZsbjrRrW7WhGterrlG4GGRo5ZFc0+T4uQzyiw6wyJ//uQHQZOieK6F6rr3Fy0F3UZV1pibw6P0jdIdlMVV5enbQ3Ony6fIlEBZ7zExWzkGgwuV/QVHvK4R8XgzghuV0tUSjyj2PFqQZ99/iYLyR2uX6Gu9+8ECpzTh/V9tl9dQqXIiOejy67BevgUOJFAoGTboQ4giaWVKyXx1HUVOtIgdDitlcx6K3+yNbxIXnVXWtR6aYvE066xHoUcCBwXFZbPLl6mXGV3ggU6mhLZDs34TeDOplWXT8QlAj0fHbb1ntKYhnk9tINTndMQ5mlDtBP7BU7R6XrpSDGp3f1wypjRGJuwj0wPSHIaDNJjZtLzayDD9FlnK5Qg8QBRhXty1Eug3RGZNAgsXAU9r4h31wSHahksMLfdiG87rZbBt1QR+MjqFRHekOPYzVAHUiEdUcxeHUZzWMvdniayWGBUUlzcr3JtZb8/7tsqjf0QPfY8PsdztOHJdCJ8IRyLYadj6V1yMzQJXqT6syePR6BPv0qQ+l5Vh0Vu9xe5wwBwN7ILjYhmvVnbNWEYhZ6XYaiw4kXwMAMybKPrhjizaa0aRH64OTB/FKUlskL1CVFChXE8FDF0oJRD5ioPV51u3L3vXLVZZLu9qB2uSsxU6ZKjyA+a9eydZ94Zu4T/YNO5JgPCQ2l2RjCJQjWojrndelmfTZUx6eKr5uCetILmEEe9MdLvyrW90B3jZ1a70VZ5gsBuEPfZI5+tj0Pq8jcTJcpqE0qUCwC7wj6jQqraAtJRPiGSzy3dt8o8Tjv+h2V7dOTg1gb9zm6ByE4FqWpgQ2waCmjBbv0OIbtvgl66UkpZFLie2GiwbtvqIctv5ujwKoyHlxkPeSk2ZzOUtyJtOohxNAUjTaP1MbwwWtrqAZTV1WSoiPk8rVnlBzWwyLPhpquU4AAA+o9S/ri2w8y1l++Q62lEbaHiEtoeTC6yz1wG43nSuvEt2elDwONCUiPuzSgoKkF9apjJOUzySRyrtVpPkOU8xfmzhbslGesL8OSma4U6vii3j82MhOkEMzBBTZuXYuzpSxAb6+f7PLaVd7kwCrygKTA8+CtoZKEo2LYC86kmgysv51C5h49uuLvt82jEnOm4ylMEPHPGbbYISfRuxmnMdd4K8zxNAyVq/XWo5DFkSxvKSI2WSkYZylwC4hqzs4nboWFdh7p3ZTztmJnTvTk22Xp/hJnqj+GoipPshh+mR6TSQzhHLSMX0k2Zs4wJkCFb7TocQfK768jcs9VotnbLE4Ki4DtjDUiZf+OVZCDX1k7unrZXETefhxLuluy7qUYuA0MN4GiYjZwnRW4+VIK224MNmYW7o5OYj0dLM3SNmDEYqIiA2KdSN6DYzM7bZJc2g7R2mnWRxSQyQC+kDrFnZREU9KfF6xGaX8IHWUWu9TwZWstjM7wFAwXLfIsgAkCMsfVKx0XzNGVODREUhnSpCEy4PqPHLs3HBvalbfZCUkytTg1wb3tqzXJ0k241+4anTDgn/HnvUN0TkhXHoNuWJWICIz/AMpeg+QugoNsR5qUXJnTnREH9M3Qdj09NfgzBII/wmBYPsts5jVpdMBwFgmZiyX0aRSpOflJijnlxzmgzA1x+KtsoM53SO6GULe51hpOpjyAxLAy4bqdtu05wfOLtoshIDMJPOzDdOnos9AUrw1RZ5xqXxJOX4QHtiDyhOGXaaoKxGDgWOAiTT6TJmO0YwqjcmHKSQp4RcMSQLErZ/H9w5X9WsL8o3AHQAyWKkSEc9JH95ZBQSVV89jzle1ysvkTAfy+t+PUSr/aTf0U6AODPp4b/CQDwt8UdiuFkzv3NT8ghAADhf1kVOG6K/+v4KXv/g307lTShA1/CFfL2vvGYGmdhvD3a06I11zXllgd9wZ2pdGSDsm5QmgFVjRfYUgtL8Pc9o26TsOWCy7Jofk3NlrkCzauzMAGh1Ks5i50yMz/eBKPafhqUzM62Yd2942YfBfvXGo7dhD2lYmd054SeisTNeAZrn+wS1e5WuN1au84nF8nu7y+mRreT7JL6KUVfM9sixfVD5b5mpz499apspoVHt7tzMlucVeR3nem/HjZDwYTyekFBvtKewtC4mtKiKVuw8o9HadF++Zom9G0AbRiBVMyvn7AGGAoq9Q9Xup1jOzl7mLHbpDRdoKjdZOk++zLvnLwXwNXkK0UC3NaP4SUqoSlBMwBfN3UoBEAooGDZRVOA11HBkaXdGrQLaZIrnKiwjbt1aFWP4v63puGTJ/gbhEL9jaeOMBEStHUh7R8zWaJepsoXwv+9JkDcde57g3Yr9v3V3wcmt9tip2aXvYP1PVy8kmI170tVJ/YC5v/lFSiNx0+UlHYsep3wfyXnf8oFSQcAAAA=") format("woff2"); font-weight: 700; font-display: swap; unicode-range: U+20-7E,U+2014,U+2019; } * { box-sizing: border-box; } body { font-family: system-ui, sans-serif; margin: 0; color: #222; } img { border: 0; } a, a:visited, a[href] { color: #222; } strong, b { font-weight: 600; } hr { margin: 3em 0; border: none; border-top: 1px solid #ddd; } p { max-width: 42em; line-height: 1.5; } /* Blockquotes */ blockquote { font-family: Georgia, serif; font-size: 1.1875em; /* 19px /16 */ color: #666; margin: 1.5em 0; padding: 0 1em; max-width: 31.57894736842em; /* 600px /19 */ border-left: 6px solid #ddd; /*text-indent: -0.3684210526316em;*/ /* 7px /19 */ } blockquote + blockquote { margin-top: 2em; } blockquote img { height: 1.3em; width: 1.3em; border-radius: 50%; vertical-align: text-top; margin-left: 2px; margin-right: 6px; } /* Main */ main { font-size: 1.125em; /* 18px /16 */ } main:not(:empty) { padding-bottom: 3em; margin-bottom: 3em; } /* Tables */ table { border-collapse: collapse; margin-bottom: 2em; } table th, table td { text-align: left; border-top: 1px solid #eee; border-bottom: 1px solid #eee; padding: .4em; font-size: 0.8125em; /* 13px /16 */ } table th:first-child, table td:first-child { padding-left: 0; } table th { border-color: #ddd; } h2 + table { margin-top: -0.625em; /* -10px /16 */ } @media (min-width: 37.5em) { /* 600px */ table th, table td { padding: .4em .8em; font-size: 1em; /* 16px /16 */ } } /* Headings */ h1, h2, h3, h4, h5 { font-family: BenchNine, system-ui, sans-serif; } h1 { font-size: 2.666666666667em; /* 48px /18 */ margin: 0 0 .5em; } main .elv-toc + h1 { margin-top: 1em; } main h1:first-child, main .elv-toc + h1 { border-bottom: 2px dotted #666; } @media (min-width: 64em) { /* 1024px */ main .elv-toc + h1, main .elv-toc + h2 { margin-top: 0; } } h2 { font-size: 2.222222222222em; /* 40px /18 */ border-bottom: 1px solid #ddd; margin: 1em 0 .25em; } h3 { font-size: 1.666666666667em; /* 30px /18 */ margin-bottom: .5em; } h4 { font-size: 1.444444444444em; /* 26px /18 */ margin-bottom: .5em; } h5 { font-size: 1.277777777778em; /* 23px /18 */ margin-bottom: .5em; } main h1, main h2, main h3 { text-transform: uppercase; } h1 code, h2 code, h3 code, h4 code, h5 code { font-family: inherit; text-transform: none; } /* Lists */ ul { padding: 0 1em; } li { padding: .25em 0; } li ul { margin: .5em 0; padding-left: 1em; } li li { padding-top: .1em; padding-bottom: .1em; } /* Syntax highlighting and Code blocks */ pre { display: block; padding: .5em; margin: 1em -.5em 2em -.5em; overflow-x: auto; background-color: #eee; font-size: 0.75em; /* 12px /16 */ } pre, code { font-family: Monaco, monospace; } code { -ms-word-break: break-all; word-break: break-word; -webkit-hyphens: manual; -moz-hyphens: manual; hyphens: manual; background-color: #efefef; } pre + pre[class*="language-"] { margin-top: 1em; } pre + .note { font-size: 0.6666666666667em; /* 16px /24 */ margin-top: -2.875em; /* 46px /16 */ margin-bottom: 2.5em; /* 40px /16 */ text-align: right; } @media (min-width: 37.5em) { /* 600px */ pre { font-size: 0.75em; /* 16px /16 */ } } #quick-start ~ .language-text { border-top: 2px solid #666; border-bottom: 2px solid #666; } @media (min-width: 42em) { /* 672px */ #quick-start ~ .language-text { border: 2px solid #666; } } #quick-start ~ .language-text, #quick-start ~ .language-text code { background-color: #fafafa; color: #222; } /* Layout */ .elv-layout { padding: 1rem; margin: 0 auto; max-width: 42rem; clear: both; } footer.elv-layout { margin-bottom: 5em; } .elv-layout-full { max-width: none; } @media (min-width: 64em) { /* 1024px */ .elv-layout-toc { margin-left: 18rem; max-width: 60rem; margin-right: 1rem; position: relative; } } /*.elv-layout-wider { max-width: 60rem; }*/ /* Header */ .elv-header { color: #222; position: relative; } .elv-header-default { display: flex; flex-direction: column; justify-content: center; align-items: center; } .elv-header-docs:before, .elv-header-docs:after { content: " "; display: table; } .elv-header-docs:after { clear: both; } /* Header Hero */ .elv-hero img { max-width: 80vw; max-height: 60vh; } .elv-header-docs .elv-hero { float: left; margin-right: 1.5em; } .elv-header-docs .elv-hero img { height: 3em; } @media (min-width: 37.5em) { /* 600px */ .elv-header-docs .elv-hero img { width: 4.303125em; /* 68.85px /16 */ height: 6em; } } /* Header Possum */ .elv-possum { display: none; position: absolute; right: 1em; top: 1em; width: 16vmin; } @media (min-width: 31.25em) { /* 500px */ .elv-possum { display: block; } } /* Header Heading */ .elv-hed { font-size: 3em; margin-top: 1.5em; margin-bottom: .25em; text-align: center; text-transform: none; } .elv-header-docs .elv-hed { font-size: 2.3em; margin: 0; text-align: left; } @media (min-width: 37.5em) { /* 600px */ .elv-header-docs .elv-hed { font-size: 3em; } } /* Navigation */ .elv-nav { padding: 0; margin: 1em 0 0 0; clear: both; list-style: none; } .elv-nav-item { float: left; padding-left: .25em; padding-right: .25em; font-size: 0.8125rem; /* 13px /16 */ } .elv-nav-item:first-child { padding-left: 0; } .elv-nav-item:last-child { padding-right: 0; } .elv-nav-item a { font-weight: 600; } .elv-nav-item .elv-nav-light { font-weight: 300; } @media (min-width: 20em) { /* 320px */ .elv-nav-item { font-size: 4vw; } } @media (min-width: 25em) { /* 400px */ .elv-nav-item { font-size: 1rem; /* 16px /16 */ padding-left: .45em; padding-right: .45em; } } @media (min-width: 35.625em) { /* 570px */ .elv-nav { clear: none; width: auto; margin-top: 0; } .elv-nav-item { float: left; padding-left: 0; padding-right: 0; } .elv-nav-item a:not(:hover) { text-decoration: none; } .elv-nav-item:not(:first-child):before { content: ""; border-left: 1px solid #ccc; padding: 0 0 0 .75em; margin-left: .75em; } } /* Version */ .latestversion { font-size: 2em; margin-top: 0; } .latestversion code { font-size: 0.75em; /* 24px /32 */ } .latestversion { font-family: BenchNine, system-ui, sans-serif; } .tmpl-docs .latestversion { position: absolute; top: 1rem; right: 1rem; margin: 0; } /* News */ .news { text-align: center; } /* Direct Links / Markdown Headers */ .direct-link { font-family: sans-serif; text-decoration: none; font-style: normal; margin-left: .1em; } a[href].direct-link, a[href].direct-link:visited { color: transparent; } a[href].direct-link:focus, a[href].direct-link:focus:visited, :hover > a[href].direct-link, :hover > a[href].direct-link:visited, :focus > a[href].direct-link, :focus > a[href].direct-link:visited { color: #aaa; } /* don’t use a direct link, should be a link to the page */ main .elv-toc + h1 .direct-link { display: none; } /* Style Guide */ .elv-sg-component { background-color: #f9f9f9; border-top: 1px dotted #ddd; border-bottom: 1px dotted #ddd; margin: 2rem 0; } /* Screen readers only */ .sr-only { position: absolute; height: 1px; width: 1px; overflow: hidden; clip: rect(1px, 1px, 1px, 1px); } /* Language List */ .elv-langlist { font-size: 0.8333333333333em; /* 15px /18 */ background-color: #f7f7f7; padding: .5rem; margin: 2em 0; } .elv-langlist-hed { margin: 0; float: left; border: none; font-size: 1.4em; /* 21px /15 */ } .elv-langlist > .inlinelist { display: inline; margin-left: 1em; } @media (min-width: 37.5em) { /* 600px */ .quicktipstoc { margin: 0 0 3% 3%; float: right; width: 32%; border-radius: .25em; font-size: 0.8125em; /* 13px /16 */ } } /* Breakpoint Overrides */ @media (max-width: 37.4375em) { /* 599px */ .bp-notsm.bp-notsm.bp-notsm.bp-notsm { display: none; } } @media (min-width: 37.5em) { /* 600px */ .bp-sm.bp-sm.bp-sm.bp-sm { display: none ; } } borgmatic-1.5.1/docs/_includes/layouts/000077500000000000000000000000001361605604600200715ustar00rootroot00000000000000borgmatic-1.5.1/docs/_includes/layouts/base.njk000066400000000000000000000015341361605604600215120ustar00rootroot00000000000000 {{ subtitle + ' - ' if subtitle}}{{ title }} {%- set css %} {% include 'index.css' %} {% include 'components/lists.css' %} {% include 'components/external-links.css' %} {% include 'components/minilink.css' %} {% include 'components/toc.css' %} {% include 'components/info-blocks.css' %} {% include 'components/suggestion-form.css' %} {% include 'prism-theme.css' %} {% include 'asciinema.css' %} {% endset %} {% if feedTitle and feedUrl %} {% endif %} {{ content | safe }} borgmatic-1.5.1/docs/_includes/layouts/main.njk000066400000000000000000000004761361605604600215300ustar00rootroot00000000000000--- layout: layouts/base.njk templateClass: elv-default headerClass: elv-header-default --- {% include "header.njk" %}
{{ content | safe }} {% include 'components/suggestion-form.html' %}
borgmatic-1.5.1/docs/_includes/prism-theme.css000066400000000000000000000055511361605604600213430ustar00rootroot00000000000000/** * prism.js default theme for JavaScript, CSS and HTML * Based on dabblet (http://dabblet.com) * @author Lea Verou */ code[class*="language-"], pre[class*="language-"] { color: #ABB2BF; background: none; font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace; text-align: left; white-space: pre; word-spacing: normal; word-break: normal; word-wrap: normal; line-height: 1.5; -moz-tab-size: 4; -o-tab-size: 4; tab-size: 4; -webkit-hyphens: none; -moz-hyphens: none; -ms-hyphens: none; hyphens: none; } pre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection, code[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection { text-shadow: none; background: #383e49; } pre[class*="language-"]::selection, pre[class*="language-"] ::selection, code[class*="language-"]::selection, code[class*="language-"] ::selection { text-shadow: none; background: #9aa2b1; } @media print { code[class*="language-"], pre[class*="language-"] { text-shadow: none; } } /* Code blocks */ pre[class*="language-"] { padding: 1em; margin: .5em 0; overflow: auto; } :not(pre) > code[class*="language-"], pre[class*="language-"] { background: #282c34; } /* Inline code */ :not(pre) > code[class*="language-"] { padding: .1em; border-radius: .3em; white-space: normal; } .token.comment, .token.prolog, .token.doctype, .token.cdata { color: #5C6370; } .token.punctuation { color: #abb2bf; } .token.selector, .token.tag { color: #e06c75; } .token.property, .token.boolean, .token.number, .token.constant, .token.symbol, .token.attr-name, .token.deleted { color: #d19a66; } .token.string, .token.char, .token.attr-value, .token.builtin, .token.inserted { color: #98c379; } .token.operator, .token.entity, .token.url, .language-css .token.string, .style .token.string { color: #56b6c2; } .token.atrule, .token.keyword { color: #e06c75; } .token.function { color: #61afef; } .token.regex, .token.important, .token.variable { color: #c678dd; } .token.important, .token.bold { font-weight: bold; } .token.italic { font-style: italic; } .token.entity { cursor: help; } pre.line-numbers { position: relative; padding-left: 3.8em; counter-reset: linenumber; } pre.line-numbers > code { position: relative; } .line-numbers .line-numbers-rows { position: absolute; pointer-events: none; top: 0; font-size: 100%; left: -3.8em; width: 3em; /* works for line-numbers below 1000 lines */ letter-spacing: -1px; border-right: 0; -webkit-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } .line-numbers-rows > span { pointer-events: none; display: block; counter-increment: linenumber; } .line-numbers-rows > span:before { content: counter(linenumber); color: #5C6370; display: block; padding-right: 0.8em; text-align: right; } borgmatic-1.5.1/docs/how-to/000077500000000000000000000000001361605604600156415ustar00rootroot00000000000000borgmatic-1.5.1/docs/how-to/add-preparation-and-cleanup-steps-to-backups.md000066400000000000000000000065501361605604600266320ustar00rootroot00000000000000--- title: How to add preparation and cleanup steps to backups --- ## Preparation and cleanup hooks If you find yourself performing prepraration tasks before your backup runs, or cleanup work afterwards, borgmatic hooks may be of interest. Hooks are shell commands that borgmatic executes for you at various points, and they're configured in the `hooks` section of your configuration file. But if you're looking to backup a database, it's probably easier to use the [database backup feature](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/) instead. You can specify `before_backup` hooks to perform preparation steps before running backups, and specify `after_backup` hooks to perform cleanup steps afterwards. Here's an example: ```yaml hooks: before_backup: - mount /some/filesystem after_backup: - umount /some/filesystem ``` The `before_backup` and `after_backup` hooks each run once per configuration file. `before_backup` hooks run prior to backups of all repositories in a configuration file, right before the `create` action. `after_backup` hooks run afterwards, but not if an error occurs in a previous hook or in the backups themselves. There are additional hooks for the `prune` and `check` actions as well. `before_prune` and `after_prune` run if there are any `prune` actions, while `before_check` and `after_check` run if there are any `check` actions. You can also use `before_everything` and `after_everything` hooks to perform global setup or cleanup: ```yaml hooks: before_everything: - set-up-stuff-globally after_everything: - clean-up-stuff-globally ``` `before_everything` hooks collected from all borgmatic configuration files run once before all configuration files (prior to all actions), but only if there is a `create` action. An error encountered during a `before_everything` hook causes borgmatic to exit without creating backups. `after_everything` hooks run once after all configuration files and actions, but only if there is a `create` action. It runs even if an error occurs during a backup or a backup hook, but not if an error occurs during a `before_everything` hook. borgmatic also runs `on_error` hooks if an error occurs, either when creating a backup or running a backup hook. See the [monitoring and alerting documentation](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/) for more information. ## Hook output Any output produced by your hooks shows up both at the console and in syslog (when run in a non-interactive console). For more information, read about inspecting your backups. ## Security An important security note about hooks: borgmatic executes all hook commands with the user permissions of borgmatic itself. So to prevent potential shell injection or privilege escalation, do not forget to set secure permissions on borgmatic configuration files (`chmod 0600`) and scripts (`chmod 0700`) invoked by hooks. ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/) * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) * [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/) borgmatic-1.5.1/docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server.md000066400000000000000000000107671361605604600304240ustar00rootroot00000000000000--- title: How to backup to a removable drive or an intermittent server --- ## Occasional backups A common situation is backing up to a repository that's only sometimes online. For instance, you might send most of your backups to the cloud, but occasionally you want to plug in an external hard drive or backup to your buddy's sometimes-online server for that extra level of redundancy. But if you run borgmatic and your hard drive isn't plugged in, or your buddy's server is offline, then you'll get an annoying error message and the overall borgmatic run will fail (even if individual repositories still complete). So what if you want borgmatic to swallow the error of a missing drive or an offline server, and continue trucking along? That's where the concept of "soft failure" come in. ## Soft failure command hooks This feature leverages [borgmatic command hooks](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/), so first familiarize yourself with them. The idea is that you write a simple test in the form of a borgmatic hook to see if backups should proceed or not. The way the test works is that if any of your hook commands return a special exit status of 75, that indicates to borgmatic that it's a temporary failure, and borgmatic should skip all subsequent actions for that configuration file. If you return any other status, then it's a standard success or error. (Zero is success; anything else other than 75 is an error). So for instance, if you have an external drive that's only sometimes mounted, declare its repository in its own [separate configuration file](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/), say at `/etc/borgmatic.d/removable.yaml`: ```yaml location: source_directories: - /home repositories: - /mnt/removable/backup.borg ``` Then, write a `before_backup` hook in that same configuration file that uses the external `findmnt` utility to see whether the drive is mounted before proceeding. ```yaml hooks: before_backup: - findmnt /mnt/removable > /dev/null || exit 75 ``` What this does is check if the `findmnt` command errors when probing for a particular mount point. If it does error, then it returns exit code 75 to borgmatic. borgmatic logs the soft failure, skips all further actions in that configurable file, and proceeds onward to any other borgmatic configuration files you may have. You can imagine a similar check for the sometimes-online server case: ```yaml location: source_directories: - /home repositories: - me@buddys-server.org:backup.borg hooks: before_backup: - ping -q -c 1 buddys-server.org > /dev/null || exit 75 ``` ## Caveats and details There are some caveats you should be aware of with this feature. * You'll generally want to put a soft failure command in the `before_backup` hook, so as to gate whether the backup action occurs. While a soft failure is also supported in the `after_backup` hook, returning a soft failure there won't prevent any actions from occuring, because they've already occurred! Similiarly, you can return a soft failure from an `on_error` hook, but at that point it's too late to prevent the error. * Returning a soft failure does prevent further commands in the same hook from executing. So, like a standard error, it is an "early out". Unlike a standard error, borgmatic does not display it in angry red text or consider it a failure. * The soft failure only applies to the scope of a single borgmatic configuration file. So put anything that you don't want soft-failed, like always-online cloud backups, in separate configuration files from your soft-failing repositories. * The soft failure doesn't have to apply to a repository. You can even perform a test to make sure that individual source directories are mounted and available. Use your imagination! * The soft failure feature also works for `before_prune`, `after_prune`, `before_check`, and `after_check` hooks. But it is not implemented for `before_everything` or `after_everything`. ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [Make per-application backups](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/) * [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/) * [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/) borgmatic-1.5.1/docs/how-to/backup-your-databases.md000066400000000000000000000156161361605604600223620ustar00rootroot00000000000000--- title: How to backup your databases --- ## Database dump hooks If you want to backup a database, it's best practice with most database systems to backup an exported database dump, rather than backing up your database's internal file storage. That's because the internal storage can change while you're reading from it. In contrast, a database dump creates a consistent snapshot that is more suited for backups. Fortunately, borgmatic includes built-in support for creating database dumps prior to running backups. For example, here is everything you need to dump and backup a couple of local PostgreSQL databases and a MySQL/MariaDB database: ```yaml hooks: postgresql_databases: - name: users - name: orders mysql_databases: - name: posts ``` Prior to each backup, borgmatic dumps each configured database to a file and includes it in the backup. After the backup completes, borgmatic removes the database dump files to recover disk space. borgmatic creates these temporary dump files in `~/.borgmatic` by default. To customize this path, set the `borgmatic_source_directory` option in the `location` section of borgmatic's configuration. Here's a more involved example that connects to remote databases: ```yaml hooks: postgresql_databases: - name: users hostname: database1.example.org port: 5433 username: postgres password: trustsome1 format: tar options: "--role=someone" mysql_databases: - name: posts hostname: database2.example.org port: 3307 username: root password: trustsome1 options: "--skip-comments" ``` If you want to dump all databases on a host, use `all` for the database name: ```yaml hooks: postgresql_databases: - name: all mysql_databases: - name: all ``` Note that you may need to use a `username` of the `postgres` superuser for this to work with PostgreSQL. ### Configuration backups An important note about this database configuration: You'll need the configuration to be present in order for borgmatic to restore a database. So to prepare for this situation, it's a good idea to include borgmatic's own configuration files as part of your regular backups. That way, you can always bring back any missing configuration files in order to restore a database. ## Supported databases As of now, borgmatic supports PostgreSQL and MySQL/MariaDB databases directly. But see below about general-purpose preparation and cleanup hooks as a work-around with other database systems. Also, please [file a ticket](https://torsion.org/borgmatic/#issues) for additional database systems that you'd like supported. ## Database restoration To restore a database dump from an archive, use the `borgmatic restore` action. But the first step is to figure out which archive to restore from. A good way to do that is to use the `list` action: ```bash borgmatic list ``` (No borgmatic `list` action? Try the old-style `--list`, or upgrade borgmatic!) That should yield output looking something like: ```text host-2019-01-01T04:05:06.070809 Tue, 2019-01-01 04:05:06 [...] host-2019-01-02T04:06:07.080910 Wed, 2019-01-02 04:06:07 [...] ``` Assuming that you want to restore all database dumps from the archive with the most up-to-date files and therefore the latest timestamp, run a command like: ```bash borgmatic restore --archive host-2019-01-02T04:06:07.080910 ``` (No borgmatic `restore` action? Upgrade borgmatic!) With newer versions of borgmatic, you can simplify this to: ```bash borgmatic restore --archive latest ``` The `--archive` value is the name of the archive to restore from. This restores all databases dumps that borgmatic originally backed up to that archive. This is a destructive action! `borgmatic restore` replaces live databases by restoring dumps from the selected archive. So be very careful when and where you run it. ### Repository selection If you have a single repository in your borgmatic configuration file(s), no problem: the `restore` action figures out which repository to use. But if you have multiple repositories configured, then you'll need to specify the repository path containing the archive to restore. Here's an example: ```bash borgmatic restore --repository repo.borg --archive host-2019-... ``` ### Restore particular databases If you've backed up multiple databases into an archive, and you'd only like to restore one of them, use the `--database` flag to select one or more databases. For instance: ```bash borgmatic restore --archive host-2019-... --database users ``` ### Limitations There are a few important limitations with borgmatic's current database restoration feature that you should know about: 1. You must restore as the same Unix user that created the archive containing the database dump. That's because the user's home directory path is encoded into the path of the database dump within the archive. 2. As mentioned above, borgmatic can only restore a database that's defined in borgmatic's own configuration file. So include your configuration file in backups to avoid getting caught without a way to restore a database. 3. borgmatic does not currently support backing up or restoring multiple databases that share the exact same name on different hosts. ### Manual restoration If you prefer to restore a database without the help of borgmatic, first [extract](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/) an archive containing a database dump, and then manually restore the dump file found within the extracted `~/.borgmatic/` path (e.g. with `pg_restore` or `mysql` commands). ## Preparation and cleanup hooks If this database integration is too limited for needs, borgmatic also supports general-purpose [preparation and cleanup hooks](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/). These hooks allows you to trigger arbitrary commands or scripts before and after backups. So if necessary, you can use these hooks to create database dumps with any database system. ## Troubleshooting ### MySQL table lock errors If you encounter table lock errors during a database dump with MySQL/MariaDB, you may need to [use a transaction](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html#option_mysqldump_single-transaction). You can add any additional flags to the `options:` in your database configuration. Here's an example: ```yaml hooks: mysql_databases: - name: posts options: "--single-transaction --quick" ``` ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/) * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) * [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/) borgmatic-1.5.1/docs/how-to/deal-with-very-large-backups.md000066400000000000000000000061501361605604600235440ustar00rootroot00000000000000--- title: How to deal with very large backups --- ## Biggish data Borg itself is great for efficiently de-duplicating data across successive backup archives, even when dealing with very large repositories. But you may find that while borgmatic's default mode of "prune, create, and check" works well on small repositories, it's not so great on larger ones. That's because running the default consistency checks takes a long time on large repositories. ### A la carte actions If you find yourself in this situation, you have some options. First, you can run borgmatic's pruning, creating, or checking actions separately. For instance, the the following optional actions are available: ```bash borgmatic prune borgmatic create borgmatic check ``` (No borgmatic `prune`, `create`, or `check` actions? Try the old-style `--prune`, `--create`, or `--check`. Or upgrade borgmatic!) You can run with only one of these actions provided, or you can mix and match any number of them in a single borgmatic run. This supports approaches like making backups with `create` on a frequent schedule, while only running expensive consistency checks with `check` on a much less frequent basis from a separate cron job. ### Consistency check configuration Another option is to customize your consistency checks. The default consistency checks run both full-repository checks and per-archive checks within each repository. But if you find that archive checks are too slow, for example, you can configure borgmatic to run repository checks only. Configure this in the `consistency` section of borgmatic configuration: ```yaml consistency: checks: - repository ``` If that's still too slow, you can disable consistency checks entirely, either for a single repository or for all repositories. Disabling all consistency checks looks like this: ```yaml consistency: checks: - disabled ``` Or, if you have multiple repositories in your borgmatic configuration file, you can keep running consistency checks, but only against a subset of the repositories: ```yaml consistency: check_repositories: - path/of/repository_to_check.borg ``` Finally, you can override your configuration file's consistency checks, and run particular checks via the command-line. For instance: ```bash borgmatic check --only data --only extract ``` This is useful for running slow consistency checks on an infrequent basis, separate from your regular checks. ## Troubleshooting ### Broken pipe with remote repository When running borgmatic on a large remote repository, you may receive errors like the following, particularly while "borg check" is validating backups for consistency: ```text Write failed: Broken pipe borg: Error: Connection closed by remote host ``` This error can be caused by an ssh timeout, which you can rectify by adding the following to the `~/.ssh/config` file on the client: ```text Host * ServerAliveInterval 120 ``` This should make the client keep the connection alive while validating backups. ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) borgmatic-1.5.1/docs/how-to/develop-on-borgmatic.md000066400000000000000000000111421361605604600221770ustar00rootroot00000000000000--- title: How to develop on borgmatic --- ## Source code To get set up to hack on borgmatic, first clone master via HTTPS or SSH: ```bash git clone https://projects.torsion.org/witten/borgmatic.git ``` Or: ```bash git clone ssh://git@projects.torsion.org:3022/witten/borgmatic.git ``` Then, install borgmatic "[editable](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs)" so that you can run borgmatic commands while you're hacking on them to make sure your changes work. ```bash cd borgmatic/ pip3 install --editable --user . ``` Note that this will typically install the borgmatic commands into `~/.local/bin`, which may or may not be on your PATH. There are other ways to install borgmatic editable as well, for instance into the system Python install (so without `--user`, as root), or even into a [virtualenv](https://virtualenv.pypa.io/en/stable/). How or where you install borgmatic is up to you, but generally an editable install makes development and testing easier. ## Automated tests Assuming you've cloned the borgmatic source code as described above, and you're in the `borgmatic/` working copy, install tox, which is used for setting up testing environments: ```bash pip3 install --user tox ``` Finally, to actually run tests, run: ```bash cd borgmatic tox ``` ### Code formatting If when running tests, you get an error from the [Black](https://black.readthedocs.io/en/stable/) code formatter about files that would be reformatted, you can ask Black to format them for you via the following: ```bash tox -e black ``` Note that Black requires at minimum Python 3.6. And if you get a complaint from the [isort](https://github.com/timothycrosley/isort) Python import orderer, you can ask isort to order your imports for you: ```bash tox -e isort ``` ### End-to-end tests borgmatic additionally includes some end-to-end tests that integration test with Borg and supported databases for a few representative scenarios. These tests don't run by default when running `tox`, because they're relatively slow and depend on Docker containers for runtime dependencies. These tests tests do run on the continuous integration (CI) server, and running them on your developer machine is the closest thing to CI test parity. If you would like to run the full test suite, first install Docker and [Docker Compose](https://docs.docker.com/compose/install/). Then run: ```bash scripts/run-full-dev-tests ``` Note that this scripts assumes you have permission to run Docker. If you don't, then you may need to run with `sudo`. ## Code style Start with [PEP 8](https://www.python.org/dev/peps/pep-0008/). But then, apply the following deviations from it: * For strings, prefer single quotes over double quotes. * Limit all lines to a maximum of 100 characters. * Use trailing commas within multiline values or argument lists. * For multiline constructs, put opening and closing delimeters on lines separate from their contents. * Within multiline constructs, use standard four-space indentation. Don't align indentation with an opening delimeter. borgmatic code uses the [Black](https://black.readthedocs.io/en/stable/) code formatter, the [Flake8](http://flake8.pycqa.org/en/latest/) code checker, and the [isort](https://github.com/timothycrosley/isort) import orderer, so certain code style requirements will be enforced when running automated tests. See the Black, Flake8, and isort documentation for more information. ## Continuous integration Each pull request triggers a continuous integration build which runs the test suite. You can view these builds on [build.torsion.org](https://build.torsion.org/witten/borgmatic), and they're also linked from the commits list on each pull request. ## Documentation development Updates to borgmatic's documentation are welcome. It's formatted in Markdown and located in the `docs/` directory in borgmatic's source, plus the `README.md` file at the root. To build and view a copy of the documentation with your local changes, run the following from the root of borgmatic's source code: ```bash sudo scripts/dev-docs ``` This requires Docker to be installed on your system. You may not need to use sudo if your non-root user has permissions to run Docker. After you run the script, you can point your web browser at http://localhost:8080 to view the documentation with your changes. To close the documentation server, ctrl-C the script. Note that it does not currently auto-reload, so you'll need to stop it and re-run it for any additional documentation changes to take effect. ## Related documentation * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) borgmatic-1.5.1/docs/how-to/extract-a-backup.md000066400000000000000000000105011361605604600213130ustar00rootroot00000000000000--- title: How to extract a backup --- ## Extract When the worst happens—or you want to test your backups—the first step is to figure out which archive to extract. A good way to do that is to use the `list` action: ```bash borgmatic list ``` (No borgmatic `list` action? Try the old-style `--list`, or upgrade borgmatic!) That should yield output looking something like: ```text host-2019-01-01T04:05:06.070809 Tue, 2019-01-01 04:05:06 [...] host-2019-01-02T04:06:07.080910 Wed, 2019-01-02 04:06:07 [...] ``` Assuming that you want to extract the archive with the most up-to-date files and therefore the latest timestamp, run a command like: ```bash borgmatic extract --archive host-2019-01-02T04:06:07.080910 ``` (No borgmatic `extract` action? Try the old-style `--extract`, or upgrade borgmatic!) With newer versions of borgmatic, you can simplify this to: ```bash borgmatic extract --archive latest ``` The `--archive` value is the name of the archive to extract. This extracts the entire contents of the archive to the current directory, so make sure you're in the right place before running the command. ## Repository selection If you have a single repository in your borgmatic configuration file(s), no problem: the `extract` action figures out which repository to use. But if you have multiple repositories configured, then you'll need to specify the repository path containing the archive to extract. Here's an example: ```bash borgmatic extract --repository repo.borg --archive host-2019-... ``` ## Extract particular files Sometimes, you want to extract a single deleted file, rather than extracting everything from an archive. To do that, tack on one or more `--path` values. For instance: ```bash borgmatic extract --archive host-2019-... --path path/1 path/2 ``` Note that the specified restore paths should not have a leading slash. Like a whole-archive extract, this also extracts into the current directory. So for example, if you happen to be in the directory `/var` and you run the `extract` command above, borgmatic will extract `/var/path/1` and `/var/path/2`. ## Extract to a particular destination By default, borgmatic extracts files into the current directory. To instead extract files to a particular destination directory, use the `--destination` flag: ```bash borgmatic extract --archive host-2019-... --destination /tmp ``` When using the `--destination` flag, be careful not to overwrite your system's files with extracted files unless that is your intent. ## Database restoration The `borgmatic extract` command only extracts files. To restore a database, please see the [documentation on database backups and restores](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/). borgmatic does not perform database restoration as part of `borgmatic extract` so that you can extract files from your archive without impacting your live databases. ## Mount a filesystem If instead of extracting files, you'd like to explore the files from an archive as a [FUSE](https://en.wikipedia.org/wiki/Filesystem_in_Userspace) filesystem, you can use the `borgmatic mount` action. Here's an example: ```bash borgmatic mount --archive host-2019-... --mount-point /mnt ``` This mounts the entire archive on the given mount point `/mnt`, so that you can look in there for your files. Omit the `--archive` flag to mount all archives (lazy-loaded): ```bash borgmatic mount --mount-point /mnt ``` Or use the "latest" value for the archive to mount the latest successful archive: ```bash borgmatic mount --archive latest --mount-point /mnt ``` If you'd like to restrict the mounted filesystem to only particular paths from your archive, use the `--path` flag, similar to the `extract` action above. For instance: ```bash borgmatic mount --archive host-2019-... --mount-point /mnt --path var/lib ``` When you're all done exploring your files, unmount your mount point. No `--archive` flag is needed: ```bash borgmatic umount --mount-point /mnt ``` ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) * [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/) * [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/) borgmatic-1.5.1/docs/how-to/inspect-your-backups.md000066400000000000000000000064521361605604600222610ustar00rootroot00000000000000--- title: How to inspect your backups --- ## Backup progress By default, borgmatic runs proceed silently except in the case of errors. But if you'd like to to get additional information about the progress of the backup as it proceeds, use the verbosity option: ```bash borgmatic --verbosity 1 ``` This lists the files that borgmatic is archiving, which are those that are new or changed since the last backup. Or, for even more progress and debug spew: ```bash borgmatic --verbosity 2 ``` ## Backup summary If you're less concerned with progress during a backup, and you only want to see the summary of archive statistics at the end, you can use the stats option when performing a backup: ```bash borgmatic --stats ``` ## Existing backups borgmatic provides convenient actions for Borg's [list](https://borgbackup.readthedocs.io/en/stable/usage/list.html) and [info](https://borgbackup.readthedocs.io/en/stable/usage/info.html) functionality: ```bash borgmatic list borgmatic info ``` (No borgmatic `list` or `info` actions? Try the old-style `--list` or `--info`. Or upgrade borgmatic!) ## Logging By default, borgmatic logs to a local syslog-compatible daemon if one is present and borgmatic is running in a non-interactive console. Where those logs show up depends on your particular system. If you're using systemd, try running `journalctl -xe`. Otherwise, try viewing `/var/log/syslog` or similiar. You can customize the log level used for syslog logging with the `--syslog-verbosity` flag, and this is independent from the console logging `--verbosity` flag described above. For instance, to get additional information about the progress of the backup as it proceeds: ```bash borgmatic --syslog-verbosity 1 ``` Or to increase syslog logging to include debug spew: ```bash borgmatic --syslog-verbosity 2 ``` ### Rate limiting If you are using rsyslog or systemd's journal, be aware that by default they both throttle the rate at which logging occurs. So you may need to change either [the global rate limit](https://www.rootusers.com/how-to-change-log-rate-limiting-in-linux/) or [the per-service rate limit](https://www.freedesktop.org/software/systemd/man/journald.conf.html#RateLimitIntervalSec=) if you're finding that borgmatic logs are missing. Note that the [sample borgmatic systemd service file](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#systemd) already has this rate limit disabled for systemd's journal. ### Logging to file If you don't want to use syslog, and you'd rather borgmatic log to a plain file, use the `--log-file` flag: ```bash borgmatic --log-file /path/to/file.log ``` Note that if you use the `--log-file` flag, you are responsible for rotating the log file so it doesn't grow too large, for example with [logrotate](https://wiki.archlinux.org/index.php/Logrotate). Also, there is a `--log-file-verbosity` flag to customize the log file's log level. ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/) * [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/) * [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) borgmatic-1.5.1/docs/how-to/make-per-application-backups.md000066400000000000000000000123101361605604600236100ustar00rootroot00000000000000--- title: How to make per-application backups --- ## Multiple backup configurations You may find yourself wanting to create different backup policies for different applications on your system. For instance, you may want one backup configuration for your database data directory, and a different configuration for your user home directories. The way to accomplish that is pretty simple: Create multiple separate configuration files and place each one in a `/etc/borgmatic.d/` directory. For instance: ```bash sudo mkdir /etc/borgmatic.d sudo generate-borgmatic-config --destination /etc/borgmatic.d/app1.yaml sudo generate-borgmatic-config --destination /etc/borgmatic.d/app2.yaml ``` When you set up multiple configuration files like this, borgmatic will run each one in turn from a single borgmatic invocation. This includes, by default, the traditional `/etc/borgmatic/config.yaml` as well. Each configuration file is interpreted independently, as if you ran borgmatic for each configuration file one at a time. In other words, borgmatic does not perform any merging of configuration files by default. If you'd like borgmatic to merge your configuration files, see below about configuration includes. Additionally, the `~/.config/borgmatic.d/` directory works the same way as `/etc/borgmatic.d`. If you need even more customizability, you can specify alternate configuration paths on the command-line with borgmatic's `--config` flag. See `borgmatic --help` for more information. ## Configuration includes Once you have multiple different configuration files, you might want to share common configuration options across these files with having to copy and paste them. To achieve this, you can put fragments of common configuration options into a file, and then include or inline that file into one or more borgmatic configuration files. Let's say that you want to include common retention configuration across all of your configuration files. You could do that in each configuration file with the following: ```yaml location: ... retention: !include /etc/borgmatic/common_retention.yaml ``` And then the contents of `common_retention.yaml` could be: ```yaml keep_hourly: 24 keep_daily: 7 ``` To prevent borgmatic from trying to load these configuration fragments by themselves and complaining that they are not valid configuration files, you should put them in a directory other than `/etc/borgmatic.d/`. (A subdirectory is fine.) Note that this form of include must be a YAML value rather than a key. For example, this will not work: ```yaml location: ... # Don't do this. It won't work! !include /etc/borgmatic/common_retention.yaml ``` But if you do want to merge in a YAML key and its values, keep reading! ## Include merging If you need to get even fancier and pull in common configuration options while potentially overriding individual options, you can perform a YAML merge of included configuration using the YAML `<<` key. For instance, here's an example of a main configuration file that pulls in two retention options via an include, and then overrides one of them locally: ```yaml location: ... retention: keep_daily: 5 <<: !include /etc/borgmatic/common_retention.yaml ``` This is what `common_retention.yaml` might look like: ```yaml keep_hourly: 24 keep_daily: 7 ``` Once this include gets merged in, the resulting configuration would have a `keep_hourly` value of `24` and an overridden `keep_daily` value of `5`. When there is a collision of an option between the local file and the merged include, the local file's option takes precedent. And note that this is a shallow merge rather than a deep merge, so the merging does not descend into nested values. Note that this `<<` include merging syntax is only for merging in mappings (keys/values). If you'd like to include other types like scalars or lists directly, please see the section above about standard includes. ## Configuration overrides In more complex multi-application setups, you may want to override particular borgmatic configuration file options at the time you run borgmatic. For instance, you could reuse a common configuration file for multiple applications, but then set the repository for each application at runtime. Or you might want to try a variant of an option for testing purposes without actually touching your configuration file. Whatever the reason, you can override borgmatic configuration options at the command-line via the `--override` flag. Here's an example: ```bash borgmatic create --override location.remote_path=borg1 ``` What this does is load your configuration files, and for each one, disregard the configured value for the `remote_path` option in the `location` section, and use the value of `borg1` instead. Note that the value is parsed as an actual YAML string, so you can even set list values by using brackets. For instance: ```bash borgmatic create --override location.repositories=[test1.borg,test2.borg] ``` There is not currently a way to override a single element of a list without replacing the whole list. Be sure to quote your overrides if they contain spaces or other characters that your shell may interpret. ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) borgmatic-1.5.1/docs/how-to/monitor-your-backups.md000066400000000000000000000307061361605604600223020ustar00rootroot00000000000000--- title: How to monitor your backups --- ## Monitoring and alerting Having backups is great, but they won't do you a lot of good unless you have confidence that they're running on a regular basis. That's where monitoring and alerting comes in. There are several different ways you can monitor your backups and find out whether they're succeeding. Which of these you choose to do is up to you and your particular infrastructure: 1. **Job runner alerts**: The easiest place to start is with failure alerts from the [scheduled job runner](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot) (cron, systemd, etc.) that's running borgmatic. But note that if the job doesn't even get scheduled (e.g. due to the job runner not running), you probably won't get an alert at all! Still, this is a decent first line of defense, especially when combined with some of the other approaches below. 2. **borgmatic error hooks**: The `on_error` hook allows you to run an arbitrary command or script when borgmatic itself encounters an error running your backups. So for instance, you can run a script to send yourself a text message alert. But note that if borgmatic doesn't actually run, this alert won't fire. See [error hooks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks) below for how to configure this. 4. **borgmatic monitoring hooks**: This feature integrates with monitoring services like [Healthchecks](https://healthchecks.io/), [Cronitor](https://cronitor.io), [Cronhub](https://cronhub.io), and [PagerDuty](https://www.pagerduty.com/) and pings these services whenever borgmatic runs. That way, you'll receive an alert when something goes wrong or (for certain hooks) the service doesn't hear from borgmatic for a configured interval. See [Healthchecks hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook), [Cronitor hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook), [Cronhub hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook), and [PagerDuty hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook) below for how to configure this. 3. **Third-party monitoring software**: You can use traditional monitoring software to consume borgmatic JSON output and track when the last successful backup occurred. See [scripting borgmatic](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#scripting-borgmatic) below for how to configure this. 5. **Borg hosting providers**: Most [Borg hosting providers](https://torsion.org/borgmatic/#hosting-providers) include monitoring and alerting as part of their offering. This gives you a dashboard to check on all of your backups, and can alert you if the service doesn't hear from borgmatic for a configured interval. 6. **borgmatic consistency checks**: While not strictly part of monitoring, if you really want confidence that your backups are not only running but are restorable as well, you can configure particular [consistency checks](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#consistency-check-configuration) or even script full [extract tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/). ## Error hooks When an error occurs during a `prune`, `create`, or `check` action, borgmatic can run configurable shell commands to fire off custom error notifications or take other actions, so you can get alerted as soon as something goes wrong. Here's a not-so-useful example: ```yaml hooks: on_error: - echo "Error while creating a backup or running a backup hook." ``` The `on_error` hook supports interpolating particular runtime variables into the hook command. Here's an example that assumes you provide a separate shell script to handle the alerting: ```yaml hooks: on_error: - send-text-message.sh "{configuration_filename}" "{repository}" ``` In this example, when the error occurs, borgmatic interpolates a few runtime values into the hook command: the borgmatic configuration filename, and the path of the repository. Here's the full set of supported variables you can use here: * `configuration_filename`: borgmatic configuration filename in which the error occurred * `repository`: path of the repository in which the error occurred (may be blank if the error occurs in a hook) * `error`: the error message itself * `output`: output of the command that failed (may be blank if an error occurred without running a command) Note that borgmatic runs the `on_error` hooks only for `prune`, `create`, or `check` actions or hooks in which an error occurs, and not other actions. borgmatic does not run `on_error` hooks if an error occurs within a `before_everything` or `after_everything` hook. For more about hooks, see the [borgmatic hooks documentation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/), especially the security information. ## Healthchecks hook [Healthchecks](https://healthchecks.io/) is a service that provides "instant alerts when your cron jobs fail silently", and borgmatic has built-in integration with it. Once you create a Healthchecks account and project on their site, all you need to do is configure borgmatic with the unique "Ping URL" for your project. Here's an example: ```yaml hooks: healthchecks: https://hc-ping.com/addffa72-da17-40ae-be9c-ff591afb942a ``` With this hook in place, borgmatic pings your Healthchecks project when a backup begins, ends, or errors. Specifically, before the `before_backup` hooks run, borgmatic lets Healthchecks know that it has started if any of the `prune`, `create`, or `check` actions are run. Then, if the actions complete successfully, borgmatic notifies Healthchecks of the success after the `after_backup` hooks run, and includes borgmatic logs in the payload data sent to Healthchecks. This means that borgmatic logs show up in the Healthchecks UI, although be aware that Healthchecks currently has a 10-kilobyte limit for the logs in each ping. If an error occurs during any action, borgmatic notifies Healthchecks after the `on_error` hooks run, also tacking on logs including the error itself. But the logs are only included for errors that occur when a `prune`, `create`, or `check` action is run. You can customize the verbosity of the logs that are sent to Healthchecks with borgmatic's `--monitoring-verbosity` flag. The `--files` and `--stats` flags may also be of use. See `borgmatic --help` for more information. You can configure Healthchecks to notify you by a [variety of mechanisms](https://healthchecks.io/#welcome-integrations) when backups fail or it doesn't hear from borgmatic for a certain period of time. ## Cronitor hook [Cronitor](https://cronitor.io/) provides "Cron monitoring and uptime healthchecks for websites, services and APIs", and borgmatic has built-in integration with it. Once you create a Cronitor account and cron job monitor on their site, all you need to do is configure borgmatic with the unique "Ping API URL" for your monitor. Here's an example: ```yaml hooks: cronitor: https://cronitor.link/d3x0c1 ``` With this hook in place, borgmatic pings your Cronitor monitor when a backup begins, ends, or errors. Specifically, before the `before_backup` hooks run, borgmatic lets Cronitor know that it has started if any of the `prune`, `create`, or `check` actions are run. Then, if the actions complete successfully, borgmatic notifies Cronitor of the success after the `after_backup` hooks run. And if an error occurs during any action, borgmatic notifies Cronitor after the `on_error` hooks run. You can configure Cronitor to notify you by a [variety of mechanisms](https://cronitor.io/docs/cron-job-notifications) when backups fail or it doesn't hear from borgmatic for a certain period of time. ## Cronhub hook [Cronhub](https://cronhub.io/) provides "instant alerts when any of your background jobs fail silently or run longer than expected", and borgmatic has built-in integration with it. Once you create a Cronhub account and monitor on their site, all you need to do is configure borgmatic with the unique "Ping URL" for your monitor. Here's an example: ```yaml hooks: cronhub: https://cronhub.io/start/1f5e3410-254c-11e8-b61d-55875966d031 ``` With this hook in place, borgmatic pings your Cronhub monitor when a backup begins, ends, or errors. Specifically, before the `before_backup` hooks run, borgmatic lets Cronhub know that it has started if any of the `prune`, `create`, or `check` actions are run. Then, if the actions complete successfully, borgmatic notifies Cronhub of the success after the `after_backup` hooks run. And if an error occurs during any action, borgmatic notifies Cronhub after the `on_error` hooks run. Note that even though you configure borgmatic with the "start" variant of the ping URL, borgmatic substitutes the correct state into the URL when pinging Cronhub ("start", "finish", or "fail"). You can configure Cronhub to notify you by a [variety of mechanisms](https://docs.cronhub.io/integrations.html) when backups fail or it doesn't hear from borgmatic for a certain period of time. ## PagerDuty hook [PagerDuty](https://www.pagerduty.com/) provides incident monitoring and alerting, and borgmatic has built-in integration with it. Once you create a PagerDuty account and service on their site, all you need to do is configure borgmatic with the unique "Integration Key" for your service. Here's an example: ```yaml hooks: pagerduty: a177cad45bd374409f78906a810a3074 ``` With this hook in place, borgmatic creates a PagerDuty event for your service whenever backups fail. Specifically, if an error occurs during a `create`, `prune`, or `check` action, borgmatic sends an event to PagerDuty after the `on_error` hooks run. Note that borgmatic does not contact PagerDuty when a backup starts or ends without error. You can configure PagerDuty to notify you by a [variety of mechanisms](https://support.pagerduty.com/docs/notifications) when backups fail. ## Scripting borgmatic To consume the output of borgmatic in other software, you can include an optional `--json` flag with `create`, `list`, or `info` to get the output formatted as JSON. Note that when you specify the `--json` flag, Borg's other non-JSON output is suppressed so as not to interfere with the captured JSON. Also note that JSON output only shows up at the console, and not in syslog. ### Successful backups `borgmatic list` includes support for a `--successful` flag that only lists successful (non-checkpoint) backups. This flag works via a basic heuristic: It assumes that non-checkpoint archive names end with a digit (e.g. from a timestamp), while checkpoint archive names do not. This means that if you're using custom archive names that do not end in a digit, the `--successful` flag will not work as expected. Combined with a built-in Borg flag like `--last`, you can list the last successful backup for use in your monitoring scripts. Here's an example combined with `--json`: ```bash borgmatic list --successful --last 1 --json ``` Note that this particular combination will only work if you've got a single backup "series" in your repository. If you're instead backing up, say, from multiple different hosts into a single repository, then you'll need to get fancier with your archive listing. See `borg list --help` for more flags. ### Latest backups All borgmatic actions that accept an "--archive" flag allow you to specify an archive name of "latest". This lets you get the latest successful archive without having to first run "borgmatic list" manually, which can be handy in automated scripts. Here's an example: ```bash borgmatic info --archive latest ``` ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) * [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/) * [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/) * [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) borgmatic-1.5.1/docs/how-to/restore-a-backup.md000066400000000000000000000001741361605604600213310ustar00rootroot00000000000000 borgmatic-1.5.1/docs/how-to/run-preparation-steps-before-backups.md000066400000000000000000000002301361605604600253260ustar00rootroot00000000000000 borgmatic-1.5.1/docs/how-to/set-up-backups.md000066400000000000000000000244461361605604600210400ustar00rootroot00000000000000--- title: How to set up backups with borgmatic --- ## Installation First, [install Borg](https://borgbackup.readthedocs.io/en/stable/installation.html), at least version 1.1. Then, download and install borgmatic by running the following command: ```bash sudo pip3 install --user --upgrade borgmatic ``` This is a [recommended user site installation](https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site). You will need to ensure that `/root/.local/bin` is available on your `$PATH` so that the borgmatic executable is available. For instance, adding this to root's `~/.profile` or `~/.bash_profile` may do the trick: ```bash export PATH="$PATH:~/.local/bin" ``` Note that your pip binary may have a different name than "pip3". Make sure you're using Python 3, as borgmatic does not support Python 2. ### Other ways to install Along with the above process, you have several other options for installing borgmatic: * [Docker image with scheduled backups](https://hub.docker.com/r/b3vis/borgmatic/) * [Docker base image](https://hub.docker.com/r/monachus/borgmatic/) * [Debian](https://tracker.debian.org/pkg/borgmatic) * [Ubuntu](https://launchpad.net/ubuntu/+source/borgmatic) * [Fedora official](https://bodhi.fedoraproject.org/updates/?search=borgmatic) * [Fedora unofficial](https://copr.fedorainfracloud.org/coprs/heffer/borgmatic/) * [Arch Linux](https://www.archlinux.org/packages/community/any/borgmatic/) * [OpenBSD](http://ports.su/sysutils/borgmatic) * [openSUSE](https://software.opensuse.org/package/borgmatic) * [stand-alone binary](https://github.com/cmarquardt/borgmatic-binary) * [virtualenv](https://virtualenv.pypa.io/en/stable/) ## Hosting providers Need somewhere to store your encrypted offsite backups? The following hosting providers include specific support for Borg/borgmatic. Using these links and services helps support borgmatic development and hosting. (These are referral links, but without any tracking scripts or cookies.)
  • rsync.net: Cloud Storage provider with full support for borg and any other SSH/SFTP tool
  • BorgBase: Borg hosting service with support for monitoring, 2FA, and append-only repos
## Configuration After you install borgmatic, generate a sample configuration file: ```bash sudo generate-borgmatic-config ``` If that command is not found, then it may be installed in a location that's not in your system `PATH` (see above). Try looking in `~/.local/bin/`. This generates a sample configuration file at `/etc/borgmatic/config.yaml` by default. If you'd like to use another path, use the `--destination` flag, for instance: `--destination ~/.config/borgmatic/config.yaml`. You should edit the configuration file to suit your needs, as the generated values are only representative. All options are optional except where indicated, so feel free to ignore anything you don't need. Note that the configuration file is organized into distinct sections, each with a section name like `location:` or `storage:`. So take care that if you uncomment a particular option, also uncomment its containing section name, or else borgmatic won't recognize the option. Also be sure to use spaces rather than tabs for indentation; YAML does not allow tabs. You can get the same sample configuration file from the [configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/), the authoritative set of all configuration options. This is handy if borgmatic has added new options since you originally created your configuration file. Also check out how to [upgrade your configuration](https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-your-configuration). ### Encryption Note that if you plan to run borgmatic on a schedule with cron, and you encrypt your Borg repository with a passphrase instead of a key file, you'll either need to set the borgmatic `encryption_passphrase` configuration variable or set the `BORG_PASSPHRASE` environment variable. See the [repository encryption section](https://borgbackup.readthedocs.io/en/stable/quickstart.html#repository-encryption) of the Borg Quick Start for more info. Alternatively, you can specify the passphrase programatically by setting either the borgmatic `encryption_passcommand` configuration variable or the `BORG_PASSCOMMAND` environment variable. See the [Borg Security FAQ](http://borgbackup.readthedocs.io/en/stable/faq.html#how-can-i-specify-the-encryption-passphrase-programmatically) for more info. ### Validation If you'd like to validate that your borgmatic configuration is valid, the following command is available for that: ```bash sudo validate-borgmatic-config ``` This command's exit status (`$?` in Bash) is zero when configuration is valid and non-zero otherwise. Validating configuration can be useful if you generate your configuration files via configuration management, or you want to double check that your hand edits are valid. ## Initialization Before you can create backups with borgmatic, you first need to initialize a Borg repository so you have a destination for your backup archives. (But skip this step if you already have a Borg repository.) To create a repository, run a command like the following: ```bash borgmatic init --encryption repokey ``` (No borgmatic `init` action? Try the old-style `--init` flag, or upgrade borgmatic!) This uses the borgmatic configuration file you created above to determine which local or remote repository to create, and encrypts it with the encryption passphrase specified there if one is provided. Read about [Borg encryption modes](https://borgbackup.readthedocs.io/en/stable/usage/init.html#encryption-modes) for the menu of available encryption modes. Also, optionally check out the [Borg Quick Start](https://borgbackup.readthedocs.org/en/stable/quickstart.html) for more background about repository initialization. Note that borgmatic skips repository initialization if the repository already exists. This supports use cases like ensuring a repository exists prior to performing a backup. If the repository is on a remote host, make sure that your local user has key-based SSH access to the desired user account on the remote host. ## Backups Now that you've configured borgmatic and initialized a repository, it's a good idea to test that borgmatic is working. So to run borgmatic and start a backup, you can invoke it like this: ```bash borgmatic --verbosity 1 ``` By default, this will also prune any old backups as per the configured retention policy, and check backups for consistency problems due to things like file damage. The verbosity flag makes borgmatic list the files that it's archiving, which are those that are new or changed since the last backup. Eyeball the list and see if it matches your expectations based on the configuration. If you'd like to specify an alternate configuration file path, use the `--config` flag. See `borgmatic --help` for more information. ## Autopilot Running backups manually is good for validating your configuration, but I'm guessing that you want to run borgmatic automatically, say once a day. To do that, you can configure a separate job runner to invoke it periodically. ### cron If you're using cron, download the [sample cron file](https://projects.torsion.org/witten/borgmatic/src/master/sample/cron/borgmatic). Then, from the directory where you downloaded it: ```bash sudo mv borgmatic /etc/cron.d/borgmatic sudo chmod +x /etc/cron.d/borgmatic ``` You can modify the cron file if you'd like to run borgmatic more or less frequently. ### systemd If you're using systemd instead of cron to run jobs, download the [sample systemd service file](https://projects.torsion.org/witten/borgmatic/raw/branch/master/sample/systemd/borgmatic.service) and the [sample systemd timer file](https://projects.torsion.org/witten/borgmatic/raw/branch/master/sample/systemd/borgmatic.timer). Then, from the directory where you downloaded them: ```bash sudo mv borgmatic.service borgmatic.timer /etc/systemd/system/ sudo systemctl enable --now borgmatic.timer ``` Feel free to modify the timer file based on how frequently you'd like borgmatic to run. ## Colored output Borgmatic produces colored terminal output by default. It is disabled when a non-interactive terminal is detected (like a cron job), or when you use the `--json` flag. Otherwise, you can disable it by passing the `--no-color` flag, setting the environment variable `PY_COLORS=False`, or setting the `color` option to `false` in the `output` section of configuration. ## Troubleshooting ### "found character that cannot start any token" error If you run borgmatic and see an error looking something like this, it probably means you've used tabs instead of spaces: ``` test.yaml: Error parsing configuration file An error occurred while parsing a configuration file at config.yaml: while scanning for the next token found character that cannot start any token in "config.yaml", line 230, column 1 ``` YAML does not allow tabs. So to fix this, replace any tabs in your configuration file with the requisite number of spaces. ### libyaml compilation errors borgmatic depends on a Python YAML library (ruamel.yaml) that will optionally use a C YAML library (libyaml) if present. But if it's not installed, then when installing or upgrading borgmatic, you may see errors about compiling the YAML library. If so, not to worry. borgmatic should install and function correctly even without the C YAML library. And borgmatic won't be any faster with the C library present, so you don't need to go out of your way to install it. ## Related documentation * [Make per-application backups](https://torsion.org/borgmatic/docs/how-to/make-per-application-backups/) * [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/) * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) * [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/) * [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/) * [borgmatic configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/) * [borgmatic command-line reference](https://torsion.org/borgmatic/docs/reference/command-line/) borgmatic-1.5.1/docs/how-to/upgrade.md000066400000000000000000000100601361605604600176070ustar00rootroot00000000000000--- title: How to upgrade borgmatic --- ## Upgrading In general, all you should need to do to upgrade borgmatic is run the following: ```bash sudo pip3 install --user --upgrade borgmatic ``` See below about special cases with old versions of borgmatic. Additionally, if you installed borgmatic [without using `pip3 install --user`](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#other-ways-to-install), then your upgrade process may be different. ### Upgrading your configuration The borgmatic configuration file format is almost always backwards-compatible from release to release without any changes, but you may still want to update your configuration file when you upgrade to take advantage of new configuration options. This is completely optional. If you prefer, you can add new configuration options manually. If you do want to upgrade your configuration file to include new options, use the `generate-borgmatic-config` script with its optional `--source` flag that takes the path to your original configuration file. If provided with this path, `generate-borgmatic-config` merges your original configuration into the generated configuration file, so you get all the newest options and comments. Here's an example: ```bash generate-borgmatic-config --source config.yaml --destination config-new.yaml ``` New options start as commented out, so you can edit the file and decide whether you want to use each one. There are a few caveats to this process. First, when generating the new configuration file, `generate-borgmatic-config` replaces any comments you've written in your original configuration file with the newest generated comments. Second, the script adds back any options you had originally deleted, although it does so with the options commented out. And finally, any YAML includes you've used in the source configuration get flattened out into a single generated file. As a safety measure, `generate-borgmatic-config` refuses to modify configuration files in-place. So it's up to you to review the generated file and, if desired, replace your original configuration file with it. ### Upgrading from borgmatic 1.0.x borgmatic changed its configuration file format in version 1.1.0 from INI-style to YAML. This better supports validation, and has a more natural way to express lists of values. To upgrade your existing configuration, first upgrade to the new version of borgmatic. As of version 1.1.0, borgmatic no longer supports Python 2. If you were already running borgmatic with Python 3, then you can upgrade borgmatic in-place: ```bash sudo pip3 install --user --upgrade borgmatic ``` But if you were running borgmatic with Python 2, uninstall and reinstall instead: ```bash sudo pip uninstall borgmatic sudo pip3 install --user borgmatic ``` The pip binary names for different versions of Python can differ, so the above commands may need some tweaking to work on your machine. Once borgmatic is upgraded, run: ```bash sudo upgrade-borgmatic-config ``` That will generate a new YAML configuration file at /etc/borgmatic/config.yaml (by default) using the values from both your existing configuration and excludes files. The new version of borgmatic will consume the YAML configuration file instead of the old one. ### Upgrading from atticmatic You can ignore this section if you're not an atticmatic user (the former name of borgmatic). borgmatic only supports Borg now and no longer supports Attic. So if you're an Attic user, consider switching to Borg. See the [Borg upgrade command](https://borgbackup.readthedocs.io/en/stable/usage.html#borg-upgrade) for more information. Then, follow the instructions above about setting up your borgmatic configuration files. If you were already using Borg with atticmatic, then you can upgrade from atticmatic to borgmatic by running the following commands: ```bash sudo pip3 uninstall atticmatic sudo pip3 install --user borgmatic ``` That's it! borgmatic will continue using your /etc/borgmatic configuration files. ## Related documentation * [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) borgmatic-1.5.1/docs/reference/000077500000000000000000000000001361605604600163625ustar00rootroot00000000000000borgmatic-1.5.1/docs/reference/command-line.md000066400000000000000000000007241361605604600212520ustar00rootroot00000000000000--- title: borgmatic command-line reference --- ## borgmatic options Here are all of the available borgmatic command-line options. This includes the separate options for each action sub-command: ``` {% include borgmatic/command-line.txt %} ``` ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [borgmatic configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/) borgmatic-1.5.1/docs/reference/configuration.md000066400000000000000000000010601361605604600215500ustar00rootroot00000000000000--- title: borgmatic configuration reference --- ## Configuration file Here is a full sample borgmatic configuration file including all available options: ```yaml {% include borgmatic/config.yaml %} ``` Note that you can also [download this configuration file](https://torsion.org/borgmatic/docs/reference/config.yaml) for use locally. ## Related documentation * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [borgmatic command-line reference](https://torsion.org/borgmatic/docs/reference/command-line/) borgmatic-1.5.1/docs/static/000077500000000000000000000000001361605604600157135ustar00rootroot00000000000000borgmatic-1.5.1/docs/static/borgbase.png000066400000000000000000000132761361605604600202160ustar00rootroot00000000000000PNG  IHDR%b/R@iCCPICC profile(}=H@_SU38dNDE EjVL.& IZpc⬫ ~:)HK -b=8ǻ{wP+1tñάJW""fs@u_'ď\W}~XJRfK'"hABggTa{2i#E,AQ()6k2Trȱ2t(~wk&'p |qݏ ԫ} \M|^mj#njp >xRgM`^{kHQWSzwwF? r-$D pHYs%&tIME &B !tEXtCommentCreated with GIMPWIDATxy<ǟ0c{"B,V%ERD тnڴȍ67J RiED.T :O=fꊹ?>>ۑqyPO{3egiδcNAxyDETUfϜ3}su!0ЛVwp*-y.[[,ع}nȨں^# RHPp?N+<D4t=n*ִOx'Bo;~SXsiXyQ?FӦZr[۩>*x́>|l""?pdBmmm>>}y~zw7eqh*9HIm{ 5473i(FmjUVU ;1Feu5D*HFhW8`^^^3iiK i4ZyeՓ§Wn|շ4'7Rb_tF+vvv~)IIk s#=ъ$:+ѵoJJDl>Kp8].omk$%$2FvKxyy|BBBBJ f%o :-͙??u'o9B{G\".Fp]<,<;6ٜޞr#ߤRDE4얤MĪ"nWzeU $1vbK#Cŗؘ|'MzBO'I#dCôMmlKyDmlיeke)#-5cTuUNh5JR[emRƆ]%P};CVcI/d=̭rŲI:ڨwcgϘSY}yY9)bFSV;;JKIO4?;]oQ[7sjdYKk` .&6Q{2k#RV^U+tkҷNkpy3VS[gFŞ)8{=.|ZU]:::vfr;6{I\^K,AxX vKs .]z/Ŀb8zޖ ;(1*fsfg3RJ߽ɎPi,:-M@v`9wusn'#8yF)wR3^նC>ɺQ'F~BL4հo`0E`nKF-gʝb/2s/_aJحFm }3yHHoh|`>۠AYukruMƢi験$-͉ >Fqa;JS"ν~;8|RX:TOfύXkimmi$ _ϚWUQbTdj?GyYAfXh~#-Y9#W4Y("DZZZ1L:%*+ Q͌ a:`_tf6`IL✼ vwwgd=(*.qzUT-+#F j7"z T ]/:)G )`ԝbOaNlߒ~># 9wvC\&83 ;~8;}anM^N+g}N!,zGwp#$t:йq;icr&$]Rָ8 #-iq7y?GMVgu9.>fyyyǏ/f9;$]aK&Z۾dl%\嚺:ˍeu֮tX뭢zCXxx5qbw]+!6wy]+Lqpt5Z$G`A,43\una>*zm,H,g hAъ1BAXyfo!AA4&;0we Da(WV]iVPHP# rz[6zmp_sk^Aܛxw~d0ĸ+IOc,zs6&v7*%!>J$%EӍ0A')+8uo #LE7RUQNNy.":n%cߎWT~99:}TԋHI=Omlmke񯶺N#\vc 4^q7)¥^9gdIEEOM7z-s jH#0[l=\$1=s3~e%A^OL]YwOݥVc vOo/0x Mkxy3 ۿiˣ: fݴDȡ޴nq&B_L@q4}Q(׮{7N#th?**֙3lPTgFsZ:JlvRr$ӛmmn^C;F8ػ8[_Y~8`XLn'ys{===7o]h}9DƦ&٬/tttԠU'kXR{{G`hة9f)7ljPL:phiߕ/Z~1nWW٘K^f3WU9E%YԷw _[R~~F&f){l~ńj q1eK4z{{[[ۨMMoJJ ޸utK榳 (==uO;iw3Dž,?o*J FWge_V]Skp>vpۿ\<1#ܺgkea<@yh!Aweg$$]G3CcJi)ɾ~um]F'"̙ab0e}\K~nk[۞sMgMA/@-*.ιz3y(~ a0U~6g$ūsf/^lh8qUQsIQo\*'oOwncFC1,h 9ߏ(~;>.. )9~sSv'IyywddySR:(+p{LEi4Kk7yέ<Qn#;BF:,̅f0LJ.Af~?yiFGe&%E;tt^^I$()ZYܵey\w@ph;`KxA&՛)\Za#&|l(yv{{]ۿMH c6zQI?"~ttt\Lǩʞ7RlwowB012eb,-%O$Rޗ$Lppx0 @o7@oz7z7   ڭIENDB`borgmatic-1.5.1/docs/static/borgmatic.png000066400000000000000000000070111361605604600203670ustar00rootroot00000000000000PNG  IHDR<qbKGD pHYs B(xtIME Dw IDATxyˑ]$ /"@AԠ J%xĨ`Ru FK,( l ,,{#.߷;oOL|Tu%tLO{B@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@( "h;Z@CM V[ 迃݁߮mv3h>>jm)t48 oX LHLOxq p5"T Ti>-+Q"j[_szTVM$x&JݪxjtW$09x`]Xcp~xYe({gs |OƟ6Z~>;b( M(w+c50xe;;ܞ huZEo⛖Ud1 x%#B.Շ ?ɦ[\Az:rꋲ5S)^3"jx6pSJAnW^5n/FySfErI0OhA: [>HX^0T e8ȘTOLByE90r.oݞky? ,mThV@tmQШNP_K7eK-۝b#U es_&fheR\Z -+j?My02]L٘z et-%~^ty"K]i?wLP9#4;鷵?eׄTk1 LZ@n]|'̣ETLc@n6+0lSf~{-$|v83#E'9G;ruewj=*P"*& x w Y%6da)s ]o H5Sxr`r. S?qLڃޟLf_2HcQBOQ!v,poDWVEij@ro95_k=8[ < [AkX= z="|C)O9n>Hqg6oנ >ؘ)V32&U *6Y0$aGXڬ'z#sCp7B^GzN NX=.x*UtDxL, Y6=0Ik 5$+֗iK19uCQ>`O%qH}Œ097y=p3rKR8JN>aOWobIMF(_ {8fOX35G'jOq*QD戮}&*EJ,SrbOQi6$M뇵=&oaMoƢuLX 9x$%M#O>ʣ+K/u.f;\f'E_Hju,*}.CyuN'=\aGuR;`078fM4T1 ³Q1\גP *Xۧo!>E,/oh^Ga `]RL7ak .j$'Rxh@7U&3"ȢV-˶p[esEqI]$T3 B%X=b(JTcI[! [I0D9_$@[+~ejRp汛1 ļl71E mK<:di C-ˆX9[4'I2j;ZF=</8][A6_fIlϟj|^<$TcPR? BL-̪$*,~"z'܏ 7U<vKXa.av0k\uRi|l||]xHp#¦*[qĘn"P")&9~J(WPzѾ Cx5g'G0`dЮY+h}{Brm+$Kک`+VnPFe8exos?NLU5_o;lG>(}_UQBj c't!oC5/tη<9 rO F`X&noTL( 5(ӏ|${|d<eT_ޢ=&U3QyWT8cbw\Qb[M0qqNU!T13 Y3Wڣȵ!F2YhU(f6;.Ӿ1淔 '19 3;I(GV8ɶ?*//'b],k"Zfkя lx"2gO`ät`^JX,_lz/ L٤1U>I(_b͋P|<"U9*ue@K(!=;)_gXL4*?1ƉO 8 sı*(nɱ^ar@|FT{pPW| 4DtG4m ]Q1"^mo ]9%: wg kz,)^ y! W MP#ch"0䩒fQⰘ胼ͧXJXXf5yaPc^(;t~Oj8t\w;Nһ&) [ ,n;@{lv;jz P>j0ϪROYUPӜF? wD>ef;fjıU78cpMikb91ɵ x8 12T%6 ;]A|ԭ(̈́ {E%Y 'c'F}Sg Y 1\\Db߻[93;Yf;YUi ԼR~ wU 9zs+?W=5v!z,+CABA >Qz6TX$C"qi&U|{}Js,YR9f*R@ UXLtVCF*Xv O6r7b=G*K^ *Dʧk!PS~gDIENDB`borgmatic-1.5.1/docs/static/borgmatic.svg000066400000000000000000000032121361605604600204010ustar00rootroot00000000000000borgmatic-1.5.1/docs/static/cronhub.png000066400000000000000000000557601361605604600200760ustar00rootroot00000000000000PNG  IHDRD4zTXtRaw profile type exifxY$ Dy $.!AL7=#3%SU.\#*6j(zFe'z|{y}ۧxsQ.z49?.hs|}=}韁>?|P|̇Yg F~?|=C|?$yL*d5=s$>G_>mE!?m[8}'}(qE)N?~Dzo!5~~yw7 8~6m'N\S*Ɵ=w#𛭮_FDv[2Xɍ-sK#ts dcK'WFZ33N2н<ʎ/W"!~bO|SxOA}bi-y,QB>1]RI4[-%d/24hެ2Jrzܬ= *P.$-z@h-ZiסJ-Uk:O&4mhK/]{>y4aFcɤLƚ?9UkiNJUkmyˆ®Ǟ'tSO;3/Xr[o;gկY9s:k铵$k?־ NsFrIdytŞJɞ9YYfVcﵶ(2 JNyD*{P(}<zukR9=!{.Dԩ-QL\k֤i6Zɦn_({[#eD]q8O'X@-D4g@b2I5DsX+sr d6VKuw+$?c+ (Je%?ʺ*`<[ ό8 `fXY+T.hLe6# {hg<q,0]FL֠W+vhtBֹLV(1K<@Vx> I<⎞SaCc:FKj_Ĩ8۬k6_:%!5pWVt_&b`d֓Q_k]wLSw'ZcqZW> XK&u,qVBQ=ЙGCʒJǖ$!g]WjᘦB$0h䧯=ЍWq5 qr(H]Ա@m?2?[}D= 6вV 9k1c$tF' {ZGÜ\X$dLu_K UTp PشF k|ƂuWi:Yp>29cGg Ėβ|()&K9%pz̙oFvxb[ؿ#Œc:OCHxSyRVBpR@:QTy=ܷbxXX&eJ Eæu)2?B0%ܰ2N%tAyF3oݘt$- 8m|kac1)̃{ Qg'2Xw.9u` Є b}¡C욀0J݇Lg ѥS%%' L5ژ dK#P߭:n̔-LuhR8 M.V(_p_EQ 946[)y]+e2o*.5&Q#9E_AI:vkQ~B{q~ruCHhqW dec@cpt"B! []=.lCSZ"n~3*^sY}hrQV<~;>p&}Esu-TݠcɃ]Q频}u Ӈ4`&R= (!Uю"JnW];ˠ}Ymޱ+H,E \zG?c>K0Xz, k}<ü"ܤ̑ 'Hr?~陠Y<1~efEЦ?-Ay q+=tݻF~h$M;=">N6*s#34KH@fx` 4yt m]w験:3K6#dYLĈ*,'¹a-l$=X.hqXz' ɍ9Fp-V_tة|nYE?=_XVC`wrU1U[jloC:nB,!XcȠa&Tۂ| Ã؃ψ~~@@#bfzTXtRaw profile type iptcx=A@'%7e[{ M]%oC`@j3~s'r> +iTXtXML:com.adobe.xmp J=&> pHYs  tIME / IDATx{|\Wy/Glj%@!i ~ȱ-dpIuRyK@{i:oiҗC{ R %04HXۺM  [[z?ޣ,HAy^k?RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ-Pjql$qmlGCA q XkCɮ5 ltdUc=IrMQV[Y^)V J)QJI D)V J)uChR'V@RJiRJ+RZ(C'ѕRJD'ѕR`j*@R"*@RJiRJiԪs J+RZ(RZ(*RJ]ϟ;TZEu_VTpb@6:H/yyP=)Ndy;ҽMZy\Ɨ/vJB+jU}*mGTB˨OOOfyQ2djZiӟaSR1N'UUn%R2rVTmSYIS 7߬JҜs3[mg|P!en2+ qz۩cDZZ揑bԔ1 8R<#־hÆ $;B>H;)N8Q;\!.c^+yB>GwB-XOD&exx\SĚ ўjIyNB}}}v ?ŤrQ I!SB Bh? B'%nno>ZDCǃD粓>&<7F$=Y/ !~@O&x֩, J*+\.`" =sn`$$H;YvcI<O$ET_%Z@$ BK]|>ZLLͥ|i{FXccFҩEAϨT*\< mZ>,r"R?F`zz[:̳_›X "$A@EY ޿n&S{GFFfjs\uB럮ղ4uΫ6e4 MdZr>?^ lj#ft* jE뮻RTzZ+s>E9x:_X5"LPT[L"A~qy 03L[rj_T-4SSU"xefK }*s-` "> {ș7c^{gn5Ŏo@,b~BJ[wm]C: _槽0 fZy@xvcփspi΍ey$"Ad}239M_*(I)"[+Z D| M!#4똨2~/`f8{B0eR?AQ#km|bWm?VZڎPJjYt6g(<,d1 ?_67fL)Rc".Os9C6ƘS1͒fi>Woi^B㭽 $ST ") /3B_J%i"s-0FЍ[kϗ>S+V>1ffRW/ 2{DĹ4MI~Z#wýE$̼!k peǶ*~0a}s>Ӧ JB\ꌱR9~#s+X87'B@WgatygѢUM9ZiP_43#7diwI Q01izB _"ryxj/$+˗/߰9 !kt+iTCB-/Dt6|{Zy4 'T|=oFǩ|GwN(ß.9C%"L|Nb+t_B @>?tYZ +H%4KiELHeBǦR"ө cVvygml޼XX2iLin޼8I5@'}ƘSM :{ '>Q*{X B]X=VGSvA[جgF8|Zۓ(k$D!Ykz$q}}} sn1Gk p D ݞN`$4{ⵎbTcǎ.xQwȖFrl"⭵]μ۷f~` ~+3(9/ӽCq7w x[X!&g౱C l{&[5ЉxüNnf_ˆes捙 |Ul}Q^:Tgo)N3"d{ݻ`O;7o_ BA:X$d]zN@9O1gĊ:!!Oˀ쌉a ,` C 5!Kԍ1 oܱcGW86Zm߾f3CWyffc1Zc5Y^BFcm7sCX+[vu0/^"/!Ck 3?݊i(KQ.^Z+夛|92K: ǡĭTmBKgn_ޘ782GlQG=M>T`r 8Н_bC'K@+suΥ#λ[%c i-7{؎"늠U$y|R8 /Oyޡ5Y8>:ޥ1W$`ѫ} YlfK :٭ +L4c\QO02xZ41Y`H/"a#ٳ,ŮHE2 Dװ>Vf1xG?=Mw]yS9xuYmPȆ~0햗CNw U s%3]Љއcf}"YE߿"k!c̦Lv^ !8k޹W|"qhclvA~ lF\YoSqq\'`U8O_4.@'\ EƘ󂈕l-`\._066v-q ;1!x`7c7M]wu`h\R5F'Ӎ󮿨@Q MAv5{O ڟ FLDH~)X erlG01qT7SƘv)6C4LŞyCVT!錹k1=#&6N;Zff:nrΞle<,@rcs6g~EpF3޷o }*<?96zqW'{m6f]Zdރ<6*f#!?σ[ ^ſqȧnҲ,5p___{H2M[ QWmp ewHʩ1Bn`{N$/kN3fa׹t/`f_ޓPAنP>T*|c/!ne^^d<v-w [nQ?kCgM-6Zd=3N9pN!D&Ir|_+Yp )Py6!Ϭv 7g<W #DFo0M 2 !"H=> ےgÆ # LOO=3==]8&6 P6[m>gr,ŤybQ'"_^%RuO<{) ٰE3UyQj(zQ~ܽk\~Zf>CA*oh /^]I>Bxdd$=ZTlҕ$/RWwgE$b6}z),ֈ$ ye[Oy{?83}+]K>X;R/tddccv3f?G|f6i6$Xk9Mݗ'Dz# gU{qdd|p藌1opI{Q\4҄bwW&wgDZ\{-3olՒe&vQzmϊVxs ]TZ$9Z""+ />3ӟ c9IWT2,9ky4StDc udOMNN@8s'/6¹i Mf%|n5xEJ`ΝNevq3;5t`);{[y/< .[-\T&'{c(GY鼧go M2/孍 [ߞs.B6Щ瘊 Fƒ[`dSEo9m|::.qrttj)}/ {;=wfE''ձFv0Hs-E~$ND>\zEmēc^$<"{ EK˻ds17w6S! aÆ)vv<| މ_cӞ1|6ey @5Z^\x!GrO6$ԑ-k"`m-<rO>H8m&_ F;S|}14jh@D$OiäMa!v>D}9a[~E?n~[;dfC:6-MAnm.9Bo}X{%BD$laLVZYeg s,l,W-[h4LNN 2P;CXi'Zj1>%{h`wn5e B333f9,Ɔm;eۋ{b/[/B"lpO ~!PWtZ#C;4mcZ?:k [a,j*,pf @y)P'k\ [\Ȧ|(u?+ IDAT3`n̥*m=h7 ~hT'E%R*"R#~q*C+!U{S;͕N >/˒G۹bᡇ:%[_4[uj,U.(F qxG usp"݁n{/NC'lIk62Eδ7R66t5Mk$ jCj`]y"#{]C+1dU|;I:DݽY hd Kp&a"9.:fvgthiE:Z}*ZxסZ:s.H{/{X}0c5̟[)xt6 md1IȆvR\~n!9ؑGɁx@(1$:ؽ:F;DNpZyh}ă!3pcnHvQtM?dim;#zsZV⧚gsqȷM:S:A;9{dM:R-Y#z 3XKJGX)M@Gt֮'/*k\^侼mg~E1B"1O^w!74( UPȚțUvT r8TJB1uo馹@L,so[U̮gR0jd[\ޭ|zyn{[[} g5ӏTnE^O۪86@yo{@XE<.ʇl_; @[ؽ{{5 "G"HYc;B!H83sZ-V!x!O>ZbC!.inXHG =P>޻N:K"@OK$&/#mq$ɽ}}}Q}}W> $YGD$BdI ÄU58*{AG|iDKރ ?=~퍦"Chiy!A6 ʖD!y>5d#V\V!+J@JS%vFy$oQTriIh%6;mn>f>;lyƘ^**Z8ڶT.u?0ɜb}CѥYa'BߘuZL>V*7== z5-V du6B켜gLLLZme8l^_*oHZ=VvQVo{sϴ $Ifq-Hi@/ޔ7:hWR1}}}vdddv c#|YL$"ya)qy%r DZ㘧-q,"1Mou^j55wZX)cZ{s8_6222GʼʃGV8~.@dZ)CiYoW,"0-HH>dQf>-Pt1[yз jorb Ttw/D&[;皮F3XkxW2Rq"F/$ozFWWby|>}g1{[=Di]U@+B0Dp:1=#{Qդ ]1y;r&@BD {p;38*O$'{c<r_߻kO :~>53ƜݔѭqChCo}0p y$.Z}0{g9{O6yx\=cSs6?s-PfٓU䭾ܸ{===' '@} ݴM^l%<Mv T.f:Yyɖ {c"b\< &ރK4C.`/fc.*j^Ȏ'FBӵZ}kID@d[~/Xk" ýq"O$31gJv2q2"3OyZ(%DW0t-cSRJOa?o:84EȘ(σ,ݝ=Y) }˭86Β#{w6![Ieؘ_`_v$_le_8{/;qi,?5W@nt0"*1&f>N8^jӅl%}Ebˋ$I~OBY/h BٺwA mftQ0=眀9{KdzV޹a_D/[_|y@z#KS/zZqꓭ8;@nG˱b btisxP)! !}I+{:~AV_`λ[5Œ)2mu'V;X]IUxZxদJb&GGoѩgq~ay^H u|{keıo>ʗ#I rm Vy[#4a"p^iP<ٽ{xDZ{B{0S ْNCz@Utzzֽ2>佟2Rt;ԍK/WVho8}Hn ރ wGZ'` $kw?4E/M~ O[˙bZi߮5CMo?&3spV |3G*o xV-'^X6: "rg򚏯Q#(xC}}}QI#Wjf'\=zs锵[ݙN1: ?|ݣBCjL7z"`c,316 a?$D/vZس"s8Vv c`jjA01ʒUD(yA$/Irw1aVkb dBV|N Q)bB:μGkDT8ݒsgps9ֹ(X뜻RFPań迃u޻G5%iZg5d>eEƉO-Y˅ʺ4ME,[MAT\"Sק{q'vyC$fdsGI_V8OYԙ;ޜ$ɽ m`GjI^Z[ʗ QA<>{@cjO%I:u\9k-BP_$XE&M1{p____-WmY;8Q||s7JSX7E8S`lt'oWSrQ+RV*^pFQAx"="xr{0d0OF_ iwDZ7%ocuW4c+NT*~+MӻldK! e^Gw=CWJkw3\PAZS !x\muQC*Jٽ{/yAZO5`nH =u0Q)*y8LJw+p6""f;5?F+HQT !p޿gbw+7<ƇVGwH$T*fϮ]wP׸ڨ tG4R"HT2!q>AT\!|=<`|S"(JQK~nd= dRBvbb&F_]OlE2Ӣ"ex@ulMwݻ>]$Xk 4W"cgX6_B6~E+Wy }Pw.hry:ƴRtV JŌﺅap.nDL0NFsd5x "ݶ50|%al>v=E cK+KMd5ΥxPZ7X@&ym>BxFQd)΍HzYBWy d#޹޿muWLNU*srMs#}%k@ooﲷ&$qVRO}5H^|aKQUG-2J6Jr{-œoG~x"{MDpPX%Gʏ簿 ӫ@|Q~?ݍA^QzϑL><8,OFe[d_XO5̌"^b](f8"2Q)*wٺ^yȣpdPzݢ}qA~%LXssn||& 4hA6݈~yw`fEQ$?cKyrrYRV;6˟_˘B6&+>(tY+o spsKI h9zJ눩[ bvf.iT}jj*͛nJW4}۬ ۭ417j~,1HY\\}npgW*uo[ QTŒKOmܦbv tZT*iZng (۩q X3HX_Z REplh%6J(-4 YN^sZcD#Ɵx}?KHlf6Q!k 23\t.G ]4Ǻl5#A$tz?fGJ]%8}͢%"]^7٘0qqPfl_;u1 %m[|V"JC 1&ilT1;o A/|>>991.)ކeyȮ+YJȔx}Ɋg}(Ijjm0%s5H`j-N@lZ$4|!|@B$7>sKqZZ$IG6}v9ćaO7٪,)䝼Q,s4=@3(J%LMMAW~M\E!iΟ[Jr |s{s8<%c='kiq_>$9F1o9{W/z}s"a>pVy@#ne6ߺyH {aǎ]###|K!Mc,c;d5h|vF<~Xrg$`B,tӟr]^V6 nE%*ځM16{!L1Uձ.kA$ _ Eg%DTp:'`=@b w^aSd|Ԕ===2۟*EK@t` g|Z(@?v&>01==]A !D!ȷ#4~)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)RJ)Rk~E SIENDB`borgmatic-1.5.1/docs/static/cronitor.png000066400000000000000000000241741361605604600202700ustar00rootroot00000000000000PNG  IHDRT24zTXtRaw profile type exifx[# D //r|_33?]T` 㧌Cm)~~>d^=u&Bw ǿ.T w+mt{?i7S-皣j9V%o̵͒Bw:5SJ|'(|-Oe7sWZ~O)RǓ|->vwFU>{Y料zxhlvcK'x4R7tL7&Ě-+9sSStRS: @p4%=6:!ĩ9ϞxHsqeGQDSORI?^BIsg3YbIS‰맗wRĥ`hbKER#"YS"MB4ԢI$̵Fqaw4=fɟÐaHiE Hj𣵃)E4Q2dҼZ:hUѦ=K]z{}< +~}1'<9W^e%-]}>nm{y)>>giɀUkmؼ@[v5~ǝ?wPV-?G8Dr:5QpjT `{bOf,LWH&HR~\ O[}jy?'5Ꝺkw'/ךkYH9-w1tijC66UB"A RZy搨Rm*ݭx*A=萓Nz*"*۩ڥ}l&5v{fLk7kYFH)`eA6(6r))$Cz; `x棝4gQPڴzi=;M8;D,BZUzݛ]2 qW LOaa(=Oo ܈*{aNTm!V F-h!r7Mj ;{@},bjcRmRXX4dž5Wi{7B=n,2![e85AP/|; R:6Kl$67\rv.gɨ &)mm# fޛbYlRЀM*:-4Ü`ַ>PB , yhkؿ2l0BPd%4Ij$YSM?[cdÚt _u(*|N"s_Z+-oՐ0K ؠ ~55"Uwî*FXP)Daxt?8;"<Бa/iFH4X y\05#H%d\Fԓ}r ℣3 ؙOZ|ve@zrGY;O gb@(iCd*U$gdiPdA: }XchF+ '-ׯs[TL5ㅳDCߩhېw̛9rFT\e Pu!5CC\bϞ[;0oే qEJG<.J3vqia -D)f g1$h5<G&vϙG$ 1LM.;XjeCBS)p8t72 *w<@{kNW*S+hR p~"trt)d'x(`S3Lثzm"D8 V&`bV?l  D|? UZ1!L`GAwd!fS~ ۥ79mN㇀h ` ޔmv;0X\x'zht`Pח㗦#p^PS'dg=~w hFU˗.a LplӯNF\B_-JbP f FQk)uiF/ң怍qaT{Tpx~1H;+L-ɢaSxI*"t;v0%kxM}ztM/.ya;j7D/KmZw#0(P}[l\ݿF9mmy0G 0 3PLA\K9N`uDa⒋,e"m an6` `nv6Ă `XVӹ TB^ t\ٚ6P+j~+B!'̸1h9EK w4@pu$n!xQ i bOqOm?BK \Fɤ-l c5Ol圄5aFOKb(3GNc@6:N"-RM;&.-Q<$a&|Yi:kv~ǨL5| #Lohe~4ȅ6 Ij |b=lڎBIy _FȤ.e&4g=_FZQ"Yy&JE tխA{ R|3r~#陭LFi:gw>h> *h(*5{giy >VFcd5` gRٳ f6uհt%DZpb FO'34V$޼Ndܱq~ϔ6C<B6r4lo9 #lH $qDw̶e0 b}4RZ2'DZI1&S A` XhT//WL"><"0gzTXtRaw profile type iptcx=A @ }Ohs]{112u?öwdtʿ2cxPH8-2*Yt^b n;M{dھEiCCPICC profilex}=H@_[ ␡:Y*U(BP+`r4iHR\ׂUg]\AIEJ_Rhq?{ܽ2S͎ @,# |E0$fswQ}ѫLYs'tAG.q.8a#' 6ۘ x83.+8*kޓ0VNs ,b "Ȩ2,DiH1U#*P!9~?ݭI8b@phlض'@ZJ$"G@6pq=rz%CrM>7e[{ M]%oC`@j3~s'r> DiTXtXML:com.adobe.xmp 3 pHYs%&tIME ,,KtEXtCommentCreated with GIMPWIDATx=PgVZV_B^⋑= dB1E8&¤ⰋӘPk&G2'EC B "KZؕJp$yr+f?}Y>ԯ\)" )"RD!ERD!EB(BHQH" )" )"ERD!ERD(B(BHQuYZN$V9O 2dt !$;8:2nFJ<ȴ(]Ge=k''^`_f4v7nF?FdD1{Ao޹j,v,{:[ӟj5uu(.#ҋyN؄MS\L U)X>Y^&5Suu~_ZIM$B:5<|_ c8K7y/5X+inLKO Y~j!sFc:#!Ӫiwd-fo*'Ȍd/ҩ5 v*'C6SF^?3 Kuc) pGqè~ЎU%|Wg_xKnǧW)-NPn}xEB,mfldmY ،/ݻ[[U:GU.i ?٬0<ɽR_5vbl^,\?=k+?=vQeTW<*eGak箃w2/B_FamSJdۼ|x-/Zb&U(xWT d UehR<͋tx" LxVzη4Q_,/Å/ ۱TGMd3(oc{eZ笳ïZ5P(F[-R['.tZ-$R<~QCڛg}P-sayq*TZ4HЃ([rYjū@*m-Ȧn.ėZػLpj9S˿bQCG:Ug-$4[ U)dI~fpߔ[1 3r}!e?) F&MjZ6!-O<w@UKb5 B$m;]ozGR}ESo`Nk[ c `O+T-ZP >"W/LwY;\I@_N%I5g8HfUǑO)L9`ykq> K)C/ 8 _e~p;&Xsd5VDٍOWtRNSI3 w0Ug)'" `f 6",F]€% IbHܐ!  D>" ȥ|73" R9ZW;fw$#켻>ߝgp)y[שh;=c&o]v 1cնIf>}ȐtyȡCrOOz|Ռ'5޳lӂV7n9؈MhUfLtunXqI9-qu،zuuWk 꾡В2SozFY1" WOi8G ~Wfn f,zû-Hb$svA"܃6g_ZnΞD H:E_^]G\^o{jfvmmoFs$t@1kk׋{]t@cfT^AMUW: ,uuVދ>ޠh{uv: 0kijzw킍+T@Yi"93 Q7J1z!f3uY$Ŀ "9:G_b|g)Sbw[k+Oi_g3$ i+ 5J_qʰ[m^YH l(` 42 ںɌ%_ 1 ]$xJ]/7 #M ,h٠ F'׭ T[`mr٫.2) !vz?d5~?8LʴrȁTl1YobE u4iêRѢgUW ]љӲP1q (6k~g e6R~gh\r @:,Z G +d@Ha sŤBk)_H+`M6(ŽT5HY`^LR -3zB/@ʈlNzQ#WDPYbtbyr Ű?4^-Ӈzm[耬wLj\KY#Ol̜ l U̳e@pIx>H {0Oxd';][R$p搘2ݩy/L sf>?Z4#z|Tܖ% )O ݙp -ĕ9};@fj!'CEi<~v?0} *#Er*^vB+Jn#sʉ|ٸ] i%z97ہ_:ǵ{ݝ"V( *sB AYt@bO ANG0ѾxFnp!:@"!3w ȎU5mLrXDL9NyT@t(Wz e$kv  aχ˅ڬBxj8Q `ZHv ZN5 |C@桍52uBr_N]>],TJ/- >_?CRD U`8&URYxPj9t(Nym;sp:CM#*吭Mx QbJdX,r`Ag] p=ln)@ ^mWaJo89wvvI^bc`B)w|Q<ޣZf. ;a1c[b@OԖ40_jz~id,ԲKL uv9U"2?I5y] }[̆y(u2t9G4^rqxF  qĕgB8b5 *^<7Q́E$)~rJ⒩H 6)bVh2:UQ\Ŝ*U8 j@.zO101K^)܈|C3$$x%ϜrdhoGTR 6^|1to,KNdˁpN#s4xaㅱzf,aUغg \qܐ:Lș/,:) s;/򆂖\^m*@ '<z!ev)y@BYV65g4TB w(;SXMaQIRfvP8$c'HZ89+xOhdk~m̱ȁĤR|hy.&TbjeU9āo]sLƀod{_3e@td;@|u4fvm4<"<^("^xrfvmx,(u}Ax|yz#38Y2B8^qxLyƧd

#^T|W@WktLIۀtK+@%2MfIƿ^Ǭ9pHr[Iugޔx.zfvmͰ- 'wF2h% iޛuh ȋkܙ:d ~IfuE[Gh^_tk^^0ft= f|i+2jVogIos(շA':89~Pdj4 ~z=C{hxѹ)OU k@}bdh XPMX.4DdZG^P6MuaLorv<:Y5bV65IQ G(RY1'uJdjKKK<-1oKK('䴦-SRg'O L:D^Me(Դ"RIENDB`borgmatic-1.5.1/docs/static/mariadb.png000066400000000000000000000224021361605604600200200ustar00rootroot00000000000000PNG  IHDRx'b zTXtRaw profile type exifxڵi >Ip=A>?({zf[fU)K)q=7^"?WW]u͇~{s._m}}]8#_#t|8'.[ѩ__;];aM}x ÅJ佭6>xϴ5]S'\$5'xaXK9EsSFYH,5ː%W9D>R%w'3̥%3X?}?uO"F9G0I}$C0J2+o Cb 7̓ !߰%/򸑵2wc1YHAYZ9RFU$AE@n\YH'717^[ZTO.,$~F`h7ih.{=jnus:s9ܫ,$kڛ9w}vܽbo-*ZiסS|N=3<[zwyݖ (=VYaӖmk.^yӗY꿼寬7Sq5ΎC9#cf2>"D̵\,BU"[洟Ia\-sXUʯd2WYا #I>eC>q}&>jTBnVM)rhLAٓu葻GS99)4flG~ z)R.f$@vw;y0%rmOo52>o z1EXX׺\iCN_VY$IA%<aV7UZ\u 07.GC=MUfI:V'\S&@WEȑ47GO& ge$}d0`7{b\_1[S.,HMGϤRn>;'#] F̽fE{W- 06c+/.wSA }>ד^]GbRg'[Ct6-uzb[\Xkqo$!gzC 8!Q'>NvH_,X^(?\ⷷM,IZtX(Y cфdWVN·EL`ֶ̄x}\`Оk3imk)T2@!%OR< GOnjiudrW,eYi{a ]Ƈ`u2Ѭ#@4 [+ ʡ%,C/S)f7oHj:Uژz7Ff `(wLyAJb.b5@HLځɣ&X(fߎy1<xJ{CB- ]Q@aeQ-wJSO&vSwp5ryN':d`$bPȨIP$&i0lSI.aX';;zcP?%0GAAlIA78J!~F*p.I*Mٔpq:rMAqJ5s"vD[ԉa,:6ڪ1D%A D aV ۖy><&%Ǹ-W*~j(PwnAdF)>2rIwa}k=U܆m |AiQ.qohļO2ƔyfŦپDļp_PF"̉e@5i P]DGE5!֒5O [xB+O]@1e+~QYwPխRo$ZE`z0X H\$|j{M/~0@flp*@^`muxr@'W=Ճy><pAipȌZop21ß2Ӽ]p(9$1QH)8`i *h;1V"G/:=S'=]j/H.&Q"C`{:׸ !m(2핂F5hpWAC0ub3`FpQ<;7EBi`k!05z^-hvخ5+{3BGyRǣ_s[߄U>w-q^QŊ?hg|;$dvC[e+ßܮ<1AOTՀLӂo^|Cq* (,QOx4 ,EWEmo +hR"#xl/ۅ탳>ʣH3a/&4MXD:`ķ(OK<xKc|9J@b/ ,O&,2r!X`z%( NqS`SH V4o5ĽguLg7Zr#Hp)_.6:5RTksl )OQ H@dx75Bb,:BԢcq -kHqb@Codz tDJ\]@@6mn+3.(ԓE7}SIX$j%jp"Hp3j{a@(i'Ǝqa@bx]oPyV>)9p pQGFq+DQ"OtFQ *:Zld쥋Ft=GzdT?P wY |(=>$1IaHmLz򱋟蠂7@㟛ވ;zF5B0}l2Go;"7pPʤhQA*^yxű+EZj;N;=!B Jͳ'FN;˃l0ja<*l V bphgJBKq&P7#j;6.s2uQV"}nL G;k`fYӠ x\>\ ( ,=0c62L֍TDJ1u i,rr{$&Ax) ̠` 7j -J!̉04PNMR^|S :+OCi O?hu !80"+ hJ" B:A]Ai\g'h2F :<pW:;qx YpiCCPICC profilex}=H@_ӊU;dNDE EjVL.& IZpc⬫ ~:)HK -b=8ǻ{wP-2 nXTLWWt cPf1'Iq_.³Zst>xMArB PLTE0_# )4_-$$-=i39^7*';Js==^@/+GB^IW}J5/QF]T:2Wd[J]^@6eO]eqhE9pS]rK=s~zW\{PA\\UD`\[He[`LbMi[fOlXìkSm[vcnqVrZz;vZĝʧбպļ*bKGDH pHYs.#.#x?vIDATxnJqpȘdpxŃ1b!$$$K]0Yz]]%^\U]]= |@B !B !B !B !B !B !^= @  Rw;Z*A .@d%% ?BTO S/Q^ ez tq>w,S+&qWAچڻli^{˧Z8鳵bz[ጟ5=^~t ow_+y(-W[3>ro;!jb;a}޿Sh9cE߱߾~.5#Y:B%n<y%u) 왿R3o~wo {oV>.3oo{mHC~+O~20O_/A`Zm@{ߞ=J,B˻V`_2+G>v{$&P}!z8 yx~3?7Yٓק`6'mIm AnP#kyF>9๼#{'cB O:y/يd56JGi~wHo$8q;@rbk6#2>nC9&YK WdJ_d~@xdǿj`k Rx4!@سLdUu6-M.i@-3E Tw,Cr 䱴`UXrek_HC5=0Gk)Y%i9jlUFi@Gž=F$!F{V;TcgG} RWy$nJZ`<ܟe0vmn>F'k,bRS^i J,9V݋![*n@&ҜHX47?/ٮ5.ia]5 U,1M(3K=iaAvo$Jp:^$`# -IM?lq{:i2&VK:V*|,A̎DY8rttfB^V\i^wRC[0z "PWyQ۟`?tTX͹,f9@ ܌Ao y8Q'5oܷ0C f:)#hQ Ώw%6Se!0'0F}7!=@Ț"_7Hk.oe)1|-6E\9/@Kxi#Nɝc{u|W)˻oSi䵲 1KHȯT=0%IxH1pRmF)0K EtB:<]\K8 `dޒMVG_:kյDAY.~fc5ä.Ȯ@h+dm_YUl ^ͤ,pup;Wj7*z%D`z LE+ǾXO@`27ՀnćͶ*h BS Į&}úNe{:NnUf&hJH~Y%V|O;LQ(0I\s;U? @va;~wP8U)ͭQNPH-Zܥ. `u `N"Zq1x5LE7kOMpBZ]&M#n+L\@&uЍ'mwmYY@Aqx&tU:Һ+@{$e7J9x ;J~v?̃8&T39&H ˯;Хw|?ޑɴZ6gS0-6$LԺ8*1e_+_;ZupY2LX~¯;OJ| :/u!iIs<{P bvP/MX&xTTT@wLi(`>l_їM @0Շk@2M%{ 4?~^mޔ/4IXtX{6*&:g;~\ej|U[W AujkrUE7+]vNy5w4+Y}QeyuSc`QzD[d7^2ax-NL?R 6V0_ta,nn] jРxzт?m@tʚ5E @Ǹv>ȊetVg1i_ra|Rlye$A"{~~*ײַT}á9<.GAwPyU6SG8s;LjA/k閺Z/yz8;4xd1Tp^XP`3LqfThT&d©zǯ@wgBT0TU9k*U`%μrz fo J.kqKc 'nmEM`(7`@K' (D"]4 f "HN7t:/ Qa@n_4n u6%RNr#ځIUo?z!^ bbND[-lW1EN"ޜߺ/b}t28\?s Sҟ&-] rɹ zBԜU## ~K9*ڛfw*~6M$SXIM-4P ~_9/XM.,p @PbkɕqusE۷bi6 `d)٪Eպԡ,YZDȜ~8<ZtMv^sG69Dfzfw3vrotBtUEaߡʆf0]h5瑺W $eϭn|f=@f1RF[j.xض)j\u;XeJ8Ej {bK|p3-:E}Wf,5 `Z㛬JY/~;?xfi6>1m4&^G/ c֙oEA%ONlL7Q풳Av:ɘm+IoF ! ˏ /q$+@Psؼ>/XNwNu뽻{Psj90P&-ܠL&mǶ)w~ߐ G!Ŭ[A7% ?BSH0?MKbIyB LhB_/b 0z⺎%tXdفsu,[L }, 7=L T BIi4-yD`BU?͕E!np2PY * ʜ UUq&nNr{r0_"Df1M*~i^LB|ga`bY^Ӄ}S_SźU8b͉\QqXTSjD o.r&$XJܟ.éoS=Ҕ7M0o- dJEbƠ &N0CP㎍U-Jw\)UH՚MsЍ0*Էh-,<5D SK Ey4 ZV!Y\#H }7a!-/2Ԭgf37fS;D~rqs6c#S٘tUDɓNrok9 IӴIpA|O m :}lD;&!ąGBT&_%XtExqWe@aNaY69#dMq.T0IUBNHH jGTOy]egN78'$I<fS"08#$u+vW* @FmCef7,/L("Ԓ'5 ӟMRщ3O8>h d - ɛ rS=xQfLX-XoS@S@\ 1NE^xc껉Sa(0'={,ts5&Bd4/P*#Dlލ@d%.YJb"pO7ߣSHxILj;T:nL*eE9ZHXgjE  7T[Gc0|<+;)(jV$y.jP/Ra`4i|| .F,5Ϳ%#mBd_ws$Ӑv8xe;akH><e/իp<N>MOeUOiFɻb F.+R)f-P`AY_-+ߝ2A 6a?'I Ynt>kL\Z`Ae쉤Zs&:!hzUZ`p7k$_tg<0-D-zvR>M„lml\U,/DrLBຈhkD+߮Mz$ B5˚JEw{^5}h?=MqC|Z^T^*ơ(zH6t^٪лf`% XP]1ΆDm.%-E Mz @ӦK K jl ;0!@%Jrx1WxGx^(#Nabm{RZ=CGoaMZo!C3#%F*źu=EVrȮq=]*E9 \ЎQFj>ЍԭP%=c?NF͢BwZ3'֪_ A6Ux)9 y*w檈-"jF&M,v ^c@2 0 D3x(SDJMhbUTÐw *(vjNՋlD΁hIh.9@ 52f؅V A:ėWF% 86 $]U `Moa<]iTFV6BmEJ 1?`̠Y0A}֫y9TG~qi󭮔a:M`dnnH&cXxh# KP_o8ܠdm$J`cb#<$O"lfGc8}K <*HSEuSnd Bn FyU5- Cvu|*^t3$TQ9+C5j16+VaMU{X2%m#e_Y؇*_XNkVҸfb/n^9*[`vvԪgJt)Y;Tҵ!y귺C#{*/di(?uiι.QH>>km-v*>¹ sބkx%U6-!.e fS>t{Wxګa6f( ˜}k"z iY6۔꺬>p=:3AQ,aأ*/Q~&|עw{ Z%^+$o;fM_.4؈Eѭne uτ5<)7fEX"A"umx¸y[ yL=ǧwIs#d&6_Y Miu^`&uvCEI ðjsEaOaڅM "ogHt}"/Fo@x BGT\؂YIENDB`borgmatic-1.5.1/docs/static/pagerduty.png000066400000000000000000000472131361605604600204340ustar00rootroot00000000000000PNG  IHDRNhiCCPICC profile(}=HPOSEEE2T' "Z"TB&/& IZpg⬫ 89:)H%1^xy޻j%Ym〦f2әU }29IJ.ʳn5k1 2ô77m>qdx̤?r]segTr8L,[XiaV05)∪/=V9oqJ8'a(,s0X$PPA%؈_'Bc>A/K!W (Cl䄗/1t|;N>Wz_3WZ.\Olʮ%r=Sּ5qhVSϽ;[oOc~?#Ir=A pHYs%&tIME y_tEXtCommentCreated with GIMPW IDATxw\TXzҥ {Qc ĒDXr$Fc,5&zc4WcĎ5bDD/eܓ-gE@9sf>33S( .c F#   j4 j4  F#F#   j4   F#F#  j4 j4   F#   j4 j4  F#F#   j4   F#F#  j4 j4   F#   j4 j4  F#F#   j4   F#F#  j4 j4   F#   j4 j4   F#   j4 j4  F#F#   j4 !KPxe2z/IF5ZP={K$>r99r9W"H;'O۪N}A&8::ZYY:::v\$&J%GnF äƃ 9rMMMaB뇍i&Bill #('O55uɒ% 8;;/ZhРAĚ~z%Si.Wk2Q(NJMM]nݤI? K ߿ĉ'L ``ĀF_z>\nݺ(X6xavyyyoT*}E"4@/8p+WnݺX0@ U` ly]χ'{ݑ6k֬d2C:FLii-[NzuvoVj켗׎xBa333rT*#:E0Ǎw޽`)g5u B$+**؋1:ɫL&IIIϟ߿?1xFw}ܸqfffP(ASSSUUU]]]ee'O4MI&ݺuZ8"+iP(111W4O>P^^^UUUZZZPPpR\.:tLJ'_T° :99z?s۶muBM Q dffO-"s:lHh//={r\./+++((-(({n|||MM G& Ǐ?rȸq㘿oC^:#Hb1L8ۛa#G7ܹsqqq^4c?=zL&9+H`F*j:s^y7SRRsz 7Q8qSFn \$"? d'{]\\bbb-[_E|>0wft!s֩q:<1r}z{РAfڿ={LMM_tѣG_|YehlAgmJ8q";9ydaaaKB!Ht'dsy@@k_Zia6nܘ 2J-8@G 62 ]ri,ƪQ("cɒ%laFkܬ6{Ox`̘1ϧ>))ai vl6Tv´Ͱjd aD"Q~222&Ou*95~95r''~uԉd2YVV֓'OtZf48 v>|}/ػ9VIbڃ43_uP /Q٭$@&_~\LO0WGEz< {VTTD,//ɉT(p" IHXIWMFBH$  Ðy+9& _(̝daZp֟{dH3R]`cc ~ ~)0;w3CSrԯG_)H5@q>[ t>^.]J-P榦owER%%%-nU#ZDfdd@rrrHb1̀rhsRihhF'O!Kӻ166@&LÝVGF?Ԑ*jL}˗R-|p5D0l0/DEEq|d%/=h"j裏fFDѣ~~~;9r.sΌ3(n5\UUUaaa\ZKffr(idU>iĈ]t܎-ѣGr?zСC+++n4[r9} }O=zAg=p1..>شiN>;բ%f۶m ,ؾ};2 CnkrQk֬Yp?Os|W~W_][[ J(G$h]INNN5k޽{ 7|+III0ހ7/SdlslzڵkZ/~a?afe/[IR<~۷|x~eK@uDv ӧ/_޽{J yΝy-Z#4uA=(xٳg:p!%%eӧO/**'Ep"{}ݎxW^cǎ|/Jվ#5ІUԪfܹ3DJ̙3D )O?!!!`v#;ISU[$1|;w""" )gkO 8~"nS\zz: c6L@'MOP!۶m7oA~fssѣG_~U:xǏ=|kny;;e˖1jv i&&&0O?ȡC`FQ6J? 2 k#655A5de<~8I.\rڅMF!!:?~Υ:t,++sF뷈#߿?{줤$̴N) '۷o߲eK~Zñ =C-c@```FF[[.]WH52O Tۤ-fYzY"(""BKJJRSScoLJJ"E,/^xǎvvv-9NبVٟB___H"!Hz{6377Y+o߾Mt Q655mhhH$ȨNQ~[[[766J$hА|>UJkmm=lذ^u֭X\Clwޅ8Y#v_ .qqqjMMM%%%عs'_>}ؘv ꐐ~z.|kΜ9cǎܹqbqee̙3:믿2 !ƚ@ qرc?55 ]hQn<<<5...(arС ?g컰UVٙ^UUebbPwmǁ!!!D||X,(˗a<;& Ϟ=gQ~a'N411QyB yѣ՞cb7o2DkXOw!֫W/h:CbΝ <ƆJ?#Cu]O>`Щ=fjllЧO3gjZփO/^BdBUXXH6)Pɓ}}}t?**js̡$/޽;""b$~@_2djǃۡF5i4yފ ++ X,/xyy[լ:u1b&&~!CD`J \)NϞ=6|b'OHsss(:III7oޤhBغu41 uʔ)dw{?Ѥ\ ۷o4Y1tPgggbV(zxx3ܹssᒢ;ؾ}{bb"ė9r䣏> S\LسO񹸸L>bӭo݅VꚘ8{젠 ;;;A)0)ӎç) Ν;kP5W.堜QͻvJI >}ӧ:'7ndddhRGEEYZZ3ۤI)RӧUOJǠA˝;w=zDQ%WW?b0SP餲ɸqkMnٲE*.J֭[~-Ä`ҥ|󍝝;{ӳK.֭[dI˧;wFo+"ӷo߇j-նmݻǴ8+ٿtt0W{l}JRqv6pRBh=ێ?&NH/Rvvvbb"2<==cbb 4%$'O[SL'233(^ySk#>ngee5jԨ1cP VqF K&}ׯkZv"%gge˖͜98}%$$aqM%wqqYvɘӭI$؀8 L&Z8_umǑV]=C&nnnpOϟ?0#>z 3p@ځ)S.1677w…oj'={6!$C &;g]C22_>rY\P 2/022oj|Qxbr?`") L28Xt:"On$$ɀ P }0o1x`v;C⺥!cBE"vF k!=`S.{7TmO}Zեmܸ2ȃ]ewwwBzj0 3hРݻXw5;wQ }K pW_}eaa_;}!!!޶!(`CCÝ;wRSS5R( 裏);Y\.3gN=5v9aJx<>}ҡ̓‹D"ʮs;> =$$k׮ ۣGCʄ>bJCcV umMMMO<ÍRi]]]~~~ZZٳgw .2O>f".P;9 eԹs={Raccc]r.ٰaIvfi;k$ӣG`>scccȷ|0FE)ڵkyKKK=@)ݻGFFrWLMM!6z?Kmr$**СCZ`AAAjjNmx#sss_ib m޼y:B]3g>|(V&Zjs0 lݺښb^ؘK:,4r*}}} ԐNNNC7r( -@䀣CS2eJtt48 C vk:yM"$$1Dֱ : dG3 3|p߿?33300"pKvv Ė6l@mmZ#SGDD"(HDzwlMWy3 sEFۑř3gt!ctttpppjj3ܲ2SZyAa0YY[M>ї.]0`;Ov҅a%_v5 2֭[^@* }J3,zzjW^}#G*I㣚U-IDT666j5| `Q4uPGZѐk5*?9}4}"~Cv IDAT~BP~ر?H'H3 DZh%0 3iҤ////lll$g8:H;M}jɚ^%_G =n;vx7/*ˈ&j!O6$IrOi k: S)..]'<} 슃쭅gQ9.91 sׯ'&&nE$^Vى\O~jll4335 宾>7/Wk$ۇ. `{_YTTDqtH$#&Lj ,-- BuHoq /^zwN=$zk:d9)ơdfxKrY>{n4J E[r%DBBBdd$D!W2p{.I(VUJM)DUaT*ݲeKll _ӯ+}BG^vL3=TomIrz_\tiBB{L[2www''';;;N:ǔ݂t$. )5y^8ZЯqww۷/\e]]RV}V ^BS EwU1ZS,2đKۥK rZGMXX )NGhˋKHHi ԑu߾}}||}||mll4_Jaa!DI4E ﬤΰhѣGNHa/-E{e[[[:o%hMo| .1:Y"255ڵ=;wJNP5 EPPЀnG@:Ff߾}w 4СCG 6G8fmmM7o3=}XPm:Bm799Y ۰;FL&ve(-Z$`ީ~֖Љjjj My<3;wn$*uVV/T@P TFa|݃L@@&>wRS Ro͚5aaa>>>DV [pk@@Tٹ$炡d7Get̐~pH¢B؟G% sa/^qjCQ ؒnKR'O& k׮}ɸqzڡ٪vIo=iɒ%۔)SufffFlv$ͺnFFFNbbbQQa=02+v'\͎8>>LSVK.z˅U'oa{#$$dذaȑ#΃ʷo.--Um}􉊊*7n+ix - cbbBܻw/** t AÇӪ_~%V;M2HD͖4HR`H,]v =w9???\TTKW + E:4g82$Z*._ޮ~n!>M?ȧM< \Kt>**ۛ2\pw PabM<  \(Cٴi5 Ԙ4Ҝ ;,X$IKqz$xSNjz*D52iv*M|!yv06lp1ih⼼T_i$T9faJJJ u/ \q+nNj XEiiiR)W{WJ+v?yUy)鮡B#""\]]@vMMM02tW=,/'= % OwbqРAylHlf(QmmmffffffVVVVVǏ?~|CݴCX$/hѢf>w`MT9ŏ xdKC7oPlx Ì;`^Ӥ$=u8f]55M!zÃH$j|7##RQ122ZlJ0~oܸs ɟ9Z"5K#g577$1b;/H$DS(L^ӧOwi!CFM2Ψe{{{&wUbG8FYPpXGFR ݑ^__): }\'H)؇)W tWVV&%%Q ىT?~xx~HKR##̘1ݻD_J݃$1kjjN>p“'OD41kZzǎcϻ[2#|?>C^*J/ެT*MNNްa{ァU`9w޽;ezׯڗ7oބHa@m bcc)$&&<}TrlG<>O뙌<>CE2.V^Ms TVVuBaLLL~mYbM?/]Կ={ܿ6-7ի۶m7n͛7rqtt{, ,v.У@ LvSٳa) q/b޽{v9s&֣gx ~ uM-[S{Ÿyպ:{`t@ܼo߾Xp1 6P3c_zBJ;5kJPxcǎUVVUCח.%Ʒ#200pZYWW7w˗o޼ӏ?Jnjjjd2Ễ>[e2 ü$;Q{u] C̳5[H#}ҥ͛7ͻjKUad{MMML@>>>d§iҡ؉O83-)/by@ϲ_bǏÞWMNDQǏiVSBh:q755m۶mϞ=\޴B'.Ff͚5kp/TM9E9;;kI T*U([nߍͥc58r\"455y'O*fF[\+R?^{uuujk>dggܽWHb[[XX3QT`0h":::--M`F>ƛ7oKRTJ6}+K.]r_~\rR2'OTСCz[EFFƫJe ,kMMMUUUŷn_7n[oEEEU\WUtIRtಌ |EEEpL", ;w 2D'|2@W(gΜSׯ_WNZ">h}@zv4P(ӧltرc`)-|>_(/X0lr֑M<'r{Ŋuuu#|ٿOݺuꫯ9J[har'>WOֲev矏3F$鹪CWXX?YaE ͛{Ng>]/^\d ᲭK>_WW1iԆ'*GQ.{yy]|ع\4hѣG :t(111>>>""V ; SNe },ujvttJ_tkVX={~bGPٳgkCYbE~~~CC/bdd? !&; L[ $11Q*Ν;W;ZrS :a6lpں:8Z___WWWSSSXXx…KrSk2ډݒk|groc :ܷz/))!oǏ믐E;? ݎ& P_e-tO>HKR%z-[ yv4kmm=k֬]vQ2b}_~ʕ+G gϞꫯ&{њ666uqe2Y```\\\Ν׮]<%b'\2""c3Mc@ JZyyO7^UPgQdddppMEEE}}}JJJRRAGw҅؉|rlޚ^oGG%K[`sttkN.2dĈ%/wϞ=@6lmcccEEEީj8p(&>4wǃj]lr7CPteƌ;w֭ҜamUJGziGٵ{ȑ#!4F~j* E4WPϘ1СCdLaz- BLfnn~޽C!/UTjHmذѣGXkBznvJS]r%**PN$;;kqJ=PwW(jBJ9MGֵk~a Z}얤VAHΝ yI:+HRpUs.)f̘a؅+o&55ucƌquue1S(Z|Bz.^y7n@3ze2: ^-JOO޽;^&'Hm۶Ei[ꎽ… ߘD"њ#?6`iGFFB 5A411 tqqR=zt֭%ݘaiɓ'׮] +~d6 Lgg9H+HS .Ǎ5lhdd$""",YRQQ +** H۩S'[[[@@F/M9)v4ζy<^ddѣGwرb XJҚ{Í><|p8/ `#d ,8qbxx9Yr,{We[ 7XXZ2JL^5볰1cFYY٦M]6oܝr9N= Ѫ V-- W_}uժUE$CHӷ0aŠ+ ~ IQ655D"](GSN:uCi.jEptt[T+P˗/0a–-[I)%<̙3gΜ 8 \%gg) zSSSkkݻʊ]l6ţP(n8duݺusε3nԓm999\~˖-] - AڤSxx֢v f- d166$jseIRr :[.^xĉeeei5H ~@ɱW?R]]]RR~ro;j}ҝӬ ]IDATobtb^}ҥK㓒?~ )9"!itt!̱F\(:;;(L["p|XMM[ZX! BH$ pssS5ziNr죏>|_XXXRRCL@@Gn"""LMMgpu5HUUUiiiTjkk%Y;MMM% '~Ā=ܽ{֭[J_trr rww󋎎ڵX___)cK|.]9vUݻwSRR򊋋kkkvVܹs =O>+KKK5^;"uGꢱӧ555bX"+@mmm"#8Dۯt,d- =:qDJ&O>dzySSSiiiyy9X422277wtttrr"[C˖V):3sZg%fʦ& [{{N:g0`j“*͍****++# jii A8p`ڴijҥ7oV;]c?v9Nٛ:ZP();͛7ݻwЪc >3///$@_h*7׻IhӓY\ܓ{B¥rZ86PUo 0=3%򘦦 TZm%U!%oCCCeǶ&<r믿k;6zh2$dp}6}A#**ꫯׯ_gaK$v a& UkӴvVbׯ_=7nC$ -Lk5n O2~tqq!ZߓaL?j2^z||⨹(1Ïtp Z Ia[ud2Χtp=e>qę3gM|>رc /]xx8>Ɨ ˗/_jtk7z̙3SSSrD4")?ٔdpNKfee_oN*6jt48.:w>>$X(a9!{ڵkۧR7s{||vU>l' pPH1q>C'g'îcGm;ȴ/ 'bG˓'O/Xm;QH$%%%N֭޽{)nvZWWW8 ATa*Ŭ/,,_SRR &:u*3d!_ȽwgxK.=r%T o3f666}GgbqeeeeeeFFF||6 Rڵkwi)oYYR/--KOOg88!n00!jh .>G_sXXȑ#ͅB!onnnnn*//իW/F[-AݻPfǎ[f mmllԩS7mDFw0{ƪP!uJJ d#C˽Kߠ;wׯ5N =3f̨ךUmF?[[w>(r<88ƍ'1pڄ :8iSDѨѭnJO2l'N%1hZx'O޷o_޽a#ǔpep.ógφCd3ϗJcǎvo +xcb^GGG_ d1"S`Gݙ4iҍ7&N(J[?9WBL&ҥw}wԩgJxU3bFA]{RPAM8166qL"1kstҤI#ƍ'NwQ]A䊆ي /=dȐW_}_~@kՄ-:⼼;w\~$Mttt޽}||\\\<<<`H7My;޽{z-e!!!Ǐׯ);dkw4kZ,744TTTWWW2KX[[;;;D"+++tJ7[Ο?Y;BiiiiiiiccÎܺjt_@3"l EK7jO|P( UA?4fEy v7ϳOlyZİִ3| 5dTdi6۶F#_ j4   F#F#  j4 j4   F#   j4 j4  F#F#   j4   F#F#  j4 j4   F#   j4 j4  F#F#   j4   F#F#  j4 j4   F#   j4 j4  F#F#   j4   F#XIENDB`borgmatic-1.5.1/docs/static/postgresql.png000066400000000000000000000754501361605604600206370ustar00rootroot00000000000000PNG  IHDRxi=zTXtRaw profile type exifxڭi$7r:6rf!&g(SYKVd[uſ[qj+Ͽso| >¯M_r>_u__wu}Ӿ' f=QgQt;^o뷟s8_.JU9пWbJ|Z?]0_#r|N :}_{UB'oZ{w|ndp ?n{\zo+|T7mnu:?Hoan8 %xbk+ZK5^S>эXt.rc-]z\q82NFpD !Z uN9&:h̍bZ93/ X6sB XC V*cNМumS;6Z K%UZøЬ-,Y6b՚u%\RxjTsZj:ZjY+z=zcpё?xaƙf6ˬ>>+/[eV_cǝ6vu8riqM7_n㏮}k~ܿZv-Fk\Sщgt,@ǫ:z[9s1,JSsvP`>! ge}9tΩu_7]mz RS21~n뫴)nim;7AH}0'KN3[vu2 yf~asu)ʖ.$8.#uk4*AcyZRbck9F3O֎M]Ӆ4!m)O:nROj>W?XniA mnufa֝+@zF.WH6 WkOj.Sl9IJK\\jkxm0;t@xr=?pODX=i1..Puռg0PY=t+\,RX0#U@#x,}}i"87rИ-*[v VCGwQ?;'ĸmnbɥ @g4ƪQ܊nSm<{0*3]3VH=u]cb{ sep{~@ܹnJ,6Ix bCJ1 o3P͚w +Ka]PyA0E:cu.}:G=gZ],J .,z"5డAF6GC}Nd>l=JoCweX7 7KF%U*MS%])8-DP )Van4.\0a?0h Ӗ<d V)v@ 0B!.P;iK "枀p( P%miմ挧tz*Ǯ; RⶓFaJV7kNQd8٘L'\rf.u\ Q䭱, .X6p1f̗6*r5o"egpoFG-D( hV ]c(0:0mEꩽO˼ٸJhFٛ۽+\z7TR  I~BBr#FQzt;@? 7`g* Mm3<7EǃL+;2*Lx~a`?=d5`:YcHecߘKckn,cwvk<甮iHAubDI@m6vύ"afjL%pmo6 S,qF7L+YR~tz)Ps7d9-Z-*%lfdC@p@yq1G9c+ C8@gq0Y؝<wĐ@e0@m3A';BQqp^& K! f7[זΪƷ;s}C ITwEmQ+_MIv+`ERD>FLix3e qHf 79&} Q;@(l|6Y4 x|5{ y4yNtrQPI(fSL␃͹5 A{nÕBD;b51F4 24]r/$ _; M4,j0\$=ETwe ?|"ݢ+݀\?e sѮ}=HFu@oQEFü, yEmUQ 0䯓2!{] g)CP.3;B;-\sVFњd9 `b"q):M"69fQ!M/?循[;*LSdT$RH0Ŵ5 SL_@4`ch^0 0L"@mNfquCبzn=''Ƃ_^DT53܀SǚB't$P+RRJ QC;B3Saz$zbĔѐghASHnȭB^ucpD3RA1`(ӮDH G(CläkSƸci j&mݶ6 vj6 dbiע c﬑֦f Z6]) HgH`wopEA+Hc mIWȢ !=[2|\ðiO'sіAz500 X1*r hgaS "X<ץFedXp2#6ȑ`)82'~[(d$87mb0 0Dz^Qykcԓj *LK,@ꎓQ$&@UdUYF[gDYPHT[oax*X*X,^vtn4NaO .dLfy㚽<&ɿ_oQb0$+"`L3.ɱhFAW;`G? /c#@\H(1Jo/q -mٌohbӡ) \>xyY)/-h"P^sHw6C('1R9E۵"`X&c*eR'b;a)K֥ɿ2wL3.aƽ6qܶTx@|H:Yѓ~y6DZtWK[t3( vo`p'ܴ1jN2ͦ&8M 1g$Xjq'="4‹z7tfN1V<\LZ9ֹw*:1 XDRI}'F`BMsKUW- t0m'LE-.FZNPExl=>4zBKa}s$$KM<˒l89 ГƝiqNOO)9m 2rSdhFl<=vDt=A'R%@YM<;Ď& P1m/׭h䝂'i=[X"܃fC(:*f\nOQF=700&jqә֛b)H#8.gB)!4fU'\p|QnAXb1(śb,^9Ugy0~)룍Q2.5aDU&o Cbq;rro!McӃƚ'baђH^¦6?1fVk+x(1P*=%jV¼FہmSpw]R>$AVN#^Yhy5p1٨1ڌ'tH[z^M޹LB)9"lGH*3K#? |ͦ?j2U9mIsRMmn=d<5(.%n4B>.1`Jtɺ iv|<7p&7 fUak$ANN%!LׯRm,=m kxD']8v &U<e.qvM~i="ENrOJ *"+Q ɥk洈׸Nfd> YI$@qF9ٺ"ț\S¯gy(`;+~Z{Pʪ=:c8K *KEOtc,l`_|u)}CyVDPBezTXtRaw profile type iptcx=K@C#1,ܹ!6)rOҊMpGР[c{¡0uF"d nY{UrYO#iCCPICC profilex}=H@_S"vqP,8jP! :\!4iHR\ׂUg]\AIEJ_Rhq?{ܽjiV鶙tfE =CB2YIJwQܟ#f-Df6:Ԧmp'tAG+qλ,̰J|+M F W% pHYs'0tIME ;bg![ IDATx]wXT]wzwP{Ekl$31Eb^)S^1cĂWP@A@@: L/LaPx͝r̹u>{MPEPըFEEJKKQYY Bz֢ yyyP(8p OOO @ `: DGF\.L&5=p8RŨJR4=<==\. ˑt߿wnvy{{ǰa ___f42`AW!4Z7o"##HMMűc #F 22[ ˑk׮سgOKbb"uN3gX,ft2`AgZqI]UUU$$%%!22)B^^<BQQQ<<<wwwxxx@(B*uuu5=rrr$ɒ%Xt)"""ʀMhj'NUpf?+PR Bar1o< 0=z@vv6>slڴwBBB777 PTAuu5jjjPUU|Faĉ2dbcc6NJ8q9G봟OϏ 0`ބf0p|ᇴDqaܸq@PP<<5/^ի࣏>BRRR0`Aheeexcknnnx饗 /thpa޽jkk4!W^ػC7@@B(U,u)Bi/Eȿ]py6b{a`D A`aL,YΝz²e ˀ-% ,Xז,Y>δɻ]̛]S5rH?wB7O׻Z2FgΜi\Qo# 2:іhlJswBT/B|7b_Ǻ՝,npmtx!JMϟ?~)̀ڵke:r|6%Srq!5re%Ы7zz!"1~tvJ ~?3P[jStrER0+zY 6#ؘv&Ҋ ̘1.\06l0 cF1 ܍d2fΜi%ϝϾRy3q=з»yڼ_TV܀)C"XG8(?$BrؘOM;Z-?lz-22Cpp030`=B#I+WĻ 1ŋ6}Faņ8Ui / QVSU}o7 5.3GYFgg3Ww}\'">||MSHRR~gxyy14;toݻ/6Y'N4VсVYSDzc͋3lL͟)4FsnxFIYS m.HR}rºG www$'' P^^$|fD3`!4Kcرt9sXWv$/GS5: g}}Lc^H+k>HqQ!eРAh48{,ƈHFѣG) gк[<`\rPSHtۉL|.щbCpzl+2,(v2x2si7n^s #}oUյ`06j@7o߾lٲL5  0vΝ3z{{c٦ԼNm ӓ j>4wTIX.$u=]XtxOtJn Vn>lrr\,[ƍnjj 0f4n:E6 4y/wAT%;(0Ne><] m[9}̙1߿ 0`-;;TbY%OZ֩cXX4lT˰bnP#IzZ|t`vJxxꩧL[dfMh&MBPPx穬{nHFf/oEbarJRrMdzf2=ohh0i\2`Kh)))9g5r%NfvjÚZgU2$7?PP^gzѱ.?{[7kRɌl >'Urs2qReEh  0xMVZg[GAV)u: uV [g9 J330`XhM E}֙BCSH8g$->UVVZČl A4JKMDBl(z ;~NxdDz-2ݻ78lvK:u&M\'1`?ЦOopU̜9µѐB|s9IH:>tUͯ?b*WPWLBX/W1}a zt:(fT3`&ӋW^5=pU"v`Gœ~. 2 QP^zTIv#bCg6~|lNf k[ͲZ|^@gccYUVZff&t:8$$ 0x r"ʲ.êsYh* ~</硼6_ڠF*컐{a㉦2-uńa\p[X~x:EwƨMF[vMl6B̄fnfT3`&4ˉ0??Z֤ Y$ +琜r&Y,[`dl(XiSo`cez_wv@@b\sFw7NF{}}LJBs ˕&Ǫ0`!4h4"''$Rܳт|r5{F+;!PܜxxD4Bb~R?p9l蘾cP?nZX-ޮxj\^Coɻ6Iٙ͵ƎۮMPC`X #CE7nVzT׫!q,],3\\ߎ\U ~Uյ+q j=:\6$|.=*;ƒ|!p۵ U`( pPu!&&`e"ChM_vjnTHjG@k+FDAY-e |G4ADie2)v _γZ-ٟjeI暎; ,<4$(@ARPH sŘ>27mQUHBIn8ù(˶?)P'\b O w1vb46gə j&Ꙣ( $rF0^=~.`~F<=BF>ݱF&Md"4"4:)@L  Y'2 ƼKM!*;,<.jgVu7*˱Y,J{z?22]łO+Wm{/blxFc~#3d > T? =ļ~ ,P$ x7F L{-A^i ~2KŎ ls8q&vcnLw<7+=yD9V/ dqM&Ẋj >@H1潛esGvQeRk2c9{.bɢltU8֯qaV%͙3]f'<6o6 I;Ҋ0%.-AF ,Dž['2hG t[yB !ݮÖX4mx,DO.aᆣßzˊf -/ǖ-(\&1:]|u )tAz{;UIQ/W$|ܝc:2nd8}ڜ_w`WS׊1ퟰ9bXX$fݴ4X0%,3 oד~xedF;p6؉]p1߼<6Vtn/))\n.ٳ܎&֘ޖ Bc\dؠu&4G-d1qqq ENwAzDE~:x]Wk^#IA5JtE*=5h $X,LѢvn.敃j~t$j)uHU萭ԡR`Ds"E\9%XBmvadyQEu/O]|%V$Hbu{5-ݍFK A"c6s\ :ڰ{.kl9!.ٸv NvGLށpQנFT%8~u 5M5x;A"9܎:SnBȱb4${bDT0z{YćJGI biىW`Y~:$wМc۳:M]YV[oz{*ͺ">pz9{L*( `0 99tpBp87j$r׽&)*cY)lCtYkxY+SNfAƈ7p.:-0?hYϕ߹ƻ P#WyxcTטX}搕WOdBe.eZŅ`xDM`Ƹy^\C粰|_:"<4]9l|@T-`CˣG,->c'Mq E;D=p) w,?,rM 6#N< Lf:8p`tzhQ.|0~X60UOe13Kk208[XCwlCftta@P,>g'`xôd#WO_pz2k^X:w >jJpp|.lW/lYa#&Էc RvB)̕]; Ng0Z)'R§'4eu O7nhz޽{w6aPogjU;Tz5 +VT%ո]muX5P@T*9{6VnUɐUP5\d#5-) 9RۼXވâZ=NUwBZ4z|D2U'.zE.Mh63iϞfaK.AP FƇw;-Z{XhI<.]} Lژ,`pd d|`grmzx9a;w4k v 4@qԣAעs4:HJ$_?/c*@,@O##ѿww8KD`)4(5+R.6 ,:H:#cz`<2}! ͫTitH-KOL&<6X<1&9~*+v<.v/Iz^Nxr@8 wx`xz,$GbOEk\{26w6 Hz ze3JYI@^?(Tؐ$Ų䡁tugjրX5 ,sq j$C7BsXb>< "vM ޫ9s &LHh{:1ua 2Nh)%kPݩXoM,Og.bF[EnٲZ6]{/Lb>e59ݰgjUxasMB"z fCbp'5 !28@\xOVrlxqxt\+Foݧp85Jh6YܒjdY(4Qw%g)ȪEEGLb"q 33N%)7üR60W J@襐t 坥OVTm=sj#xY])70;^~B#*85ur)N;ӆsWq 4Z .9\<ӦǏ;اCDp7S$E^iv$#ynǞͺf]<8<כϘ1Eܕѫ;ނp80Jom'ΡV M/C`(oV"ͭh*Ɩjx d*-lI) \6t:RKxg)մdf>_hN}Ao0bQXTm:ŃBXZ3oǡbp=xxŊ:7 bv{.re}m~?۟&(ՂߤF3>Zl Rok$)ФۂLKa!@c=!kP5s. ľWZXs #3g ''th"G4GOԝn.,?JI+%<|/)0MkPCK'Xtx;vus:t I \d;nU;t=|y'uOCshLx'E @` ÉkLFoꄡȕi(X{""4K 4כCûu ڃں)rp1t<'!";]jyIȎ` ֭zK5[ IDATv*&ԓ~2Yfr_;F lB|>S4cI  lv"a}:=OmZ:5RkTHQi^Tk$*4~V*^$RhW,sh?(xIĜ*{Sk&Oo$E lwe-k寳v'sl#Ecھ)IlX27M-BJ%ڥ>hĩSC!CCSia+;37&_ƠFWY#P*g pvfXXg2 xpssoxf֑hFK3U‡E5rqX@6n[1v*xhd*z8| .;. !!Z%RTHI) ۬6I.'i'6Fٙlmצ6D^NB`,FFY,@mnb_POClEaLd7쟗!"y:>;ipX㝠V^v, wĻ '.Q=jWo^gۿ3ޏg ތb𛸇Y;CW ò9p U0 |j(<6c YE7Y0Kxw#cC&Wuhg(EfA9bH76/JDdu: HgJ?jsI2 &>.OGSQԂn^1`0<3y <]$"P'W}w ~FYQZb,l+7PĄF)4:JpgbӅ|%ϝo>f`Y,~ 7mYCyc7wtynWRSH,Hcv`nb,fۇ䴛V)AhnsZh\XfjBb|]L1*44l'쒙Z<}$EގƠ&cJN?s)0{qĻhI4\Q>EkH#l( ˘P_eD/쒄fy`fp3aZkUޡ?&sܝEV{`d_8iM$M5e """p?6Gfԑ$^׬j#d4l0^<=U,0\=]082>{ =jkz~$4+00*nȷDCfr _ uW x\\lZWE5Z`\6al]3ٴul^iBfMK螺DrVq9B<`yIEIC+z*˦'Wޘ ?`6@d?_?.͵sqpBnw.3:;{l._b{0qjÁOP+k YpquȽp '>ZK7v r-##RYixtp'Z\%BY$$;*Zf5EYW_w'Lb&_7n#~"D5`{pj1f6ak84Z#3 '!W<1MԩƟs.t=/,(UStzZbꙃLfvoF}ZݯwCo៦6`ۛztxb?h .Juށ3,x|\ȥX=VE[E6TfAsYlE@7Nhb$Ib~rEbd{²@"Fa9}S,o0o9 q6.^eJp|#IxMcԨQvA; *-TZԫ)Ԩ)pJe<%CCfFĨnpw?$) ӮÕ<|1:<,>JcRB^nbiCu;(Yt4JN{9KVK.k=#r}X#EaÑtZ/'«I% {5?ki_ g+D7hLݐ)5؝ZW7bvEQ6*&:<;$:%ǼݞDXbx׮]-Juh\ -xInqJ;L>wRoU ..o7ղcP{tX} 澵 sڄGڌ9o+#csf_Q3-RkS@KtC$jYPл.о><*4V\G Sr6i ୮35k4z$ jw o@L3j $F:s0M,r=<{|Y:Ǩ Ļpm~o^rUj3AtXh [͗rcǎ5=?pUPҮnm*JEsg]f+M&jbK Mkaś3{YYgǏZEB U -jU:4h0Ip#I~4f #Jն1`p\x,0?£J,  hnt^/`mNqg>};lS $@7\@;TF*C iĠI+v=Q!f1%L{h_-FJʶw?Q*R2XX̀hQp̘1@=ڵ1%V$㊢ iΕgn<ߩ{G1_8mX$ICqeJBkt9z1}ysN+W g.DOժd hFDv\Ȥ] NYPiwd@y6;ډZ9^xz%j Nn5p`o Np3{Mz#f o}.^vus]-r,[&gsœlJx׶p,B̛[qn\ n&Z倢(kHu#i?M+1lBswwNj/Hv RoB+Rp0FŘty$UfaOg8̫;+M!MF]BRHi 1{deux${zACw5vU1E"p!n~_'ҡi?28f"8xy[!etjGdF aX ٰ}؅IqyL,HA]A\OI;IPQ%皻&N8|޽[jocRSȖV"">»3w7%+el'bB-Gz3f0[233M\]]AQ+;=I[ǘ ~ ^WCtn4G.BOkP]EX04:yy.x xSpy Ok`ڀfnW@3`$A FZY_b+s?mT 4;a.xaNt?/M _'P @ZU{N7'hiƑ`Af$76fQ޻ƚd2;fIS۵Aۏg(ݜ>, h9cQWiۼ_"&mJIX2uUyVa9jJ|&( 3ݺuZXtV*ipgb`=$u4ZK .MPeUbdJ,#e+," Çۭs'OW Bf3rH#\%ǁJgD~uT$Iaj|ĢiB%&O5~³Sb^R\zsMFcPaX}*6޶ W_/W4H, uhKHp( $|OBEaÆFkgnEU*Y0_^:SCq!inswˆsg{(J8yI9=#.DBV}?#\b:^jUIhv,q^tKPnCNHXDcRn׋U$y-7\U EWO!ߣӮES'IpP&UI 7MI5j tٚr}ڦkG,à\%&-JŬcR>B`x߰vo[n}rwڴi&B?fX,DcNZK-a_M.Mz^^d=͇RhwWiq :66Z]]^{5qdd$&Oܥ Z|N]%B6֫T „A5Lr܍bq` C?7oS 7t6$v з~du/NǼտs9,*EZutMxD6V>N^!DT""8J:͎:[/tf$i-^nNxw84k$N`k~}ݬǺ=l'gsp9c ,Y -*I8! /UJYM$E67ېр˹%߻4oUɐRZx7 »l:ٳM[n5=sq>~Uf7XL+@X9`*D1 ˭Z )))V<.\h heT娭\{o7 hۭ:穫hk lM"Qqy8lV'wwQQ(Ǖ ɌʣQ9:\ 3gZ~_m 761^{?MU qLF&::}q^ .۷-K 9sc~!M 0V k3dDy.b+6,3] F/ًo?n_=Jxɒ%VyΜ9~:PSSJ#;;s'NĬY>W hy"Dm6)y0e)$Ro"f/)wm ϞX-qX(]PڱwTx(.BqbB"ˍ $E9/,kuJGg$)dn3]|=*BAB,^϶zl:C'ѣMhJM{=C# GQ\)Ó73%" x,1 Bv W +k?]lPR-;NGRCZZ-d̙3}=|D8tB߬BDޅ2ohE,_?A$؄jm3GVSkP%Sber&⽬=Bђ)eg2z@O&Û"۩,PzUZZrcI{ٷmkAomŭ5ϡWtg;Rl~(_hp{zz>30)B};[j\Rϖ<?S_,6i*}*nUZHR+̶`Ν60DެSD _vy~v_<] IJ%@祹h6Y@i+"ٚw/p1,2kpjvͲv9*T4 ^nJX4 W4 !/˾ݦ} V8˴X.=>Z4Nj6JJC Rh3W6j5Ȟ6x X5/ΰJ `(_$|<\\\bܹx ŨQ0o<,_k֬={:aV۳g yZۉ"0}ȽUy6콀Ec]gPZYrFk/؜ʂMVJ60<6E)PԘ&X=Zzo<>9Zvy9x)hb@)LYv_p_C)9+Z0_U׀U;iJTz=DX7odN8y #^ۈ'j?Ti(?4)4VyjaDҥKM&g"_<ڥa{uK,,zG1uTL0ގsu#Vo;?^'7Ჹ0''zR^^^x/_}鸺m#=.colH̗,xcQ_ N4y@7PfMfHkP"}{!v8b}8(s!5﩯*'ZUej=n|Sx=/#Ia0u'D…zl?6g#Fyg6X!*F9:n٫aMЩ j]V+$)Axq^<+߮C.Bv zyJ'ifEk`(yvB߈` 뉘Pfsy6ƵLbPwZ翃/E) H9#6~Xq$_+UiiC -&fBu6R&DxwV :}ŋ<| 0c[ِXDV=شiS@߾}G}=X Xd,Ggbp|ZQPJ~FƄˊVZw{BkPi1m&2FFH(ULv >><0 xxp8x¾>ۛakYT5 !b\d!.."p9l $nUJq0k@H/L ̗N,Wz)&ԫp^̃H"峧lSeWwi( J:Ӈ"$kMG_I WjTQ&@^uq6@tAh-&/w!4*zvw 7pPӠj4 4BKRLĆ9&}S 1a_(ks# ΀[5F2%b\&Wو2Qp:B#h'ߨӀ Z#ԯ_??|嗦0~g$ĆXVTo7 z~6dш)SXP*Nd`€^cr67?nY B-Gp,&$_,AnF㎤(իP%UJ>AVA%V?N">6/Nʮ|@PՍMw?_qbnb އZ~=)öWiWtdFࣧ u:#ZNS8e(|1A=/Sܳq#g_{ \bu-F< 8h~<[6$IabLb'~a<.xرYxF: yղc4.:2 [_yz4+ͻuYUln۷NN/wOw\7ˡPkF+Ww/ŕαҺyXCfYlYw{MwALJUReJI.bM"3\իWwtr7fb誝̷"udGQ}VVM--<9Df\|[zXD*ٳ=c߾}cWLd76dE:17ufW˰T~9éyȼYP0'!]`"J1vX^{饗OP\yovC) x|t{A9ԝǚy =CߙqmߏUWzp0Ѡq1{l<><͉!Zm0/!$[EqL>XiZMhxM_}Uӓ"-A+&!ޢpŋ1w\Z?l6'||j2  u`7̿\fMVu-,!m+ò*] 11/_66x`1DC'pl R9PHꬡ8SsPiCÃZFZߢ HRH :3O8z?|x  GjcM$LBaTrRo7?*HUH[ѓEfN=c;0}p8RT!͡FGji&lx5jKsHSctLv|iw:5֕,rs⍹#p0aP}7G8(oTVV"&&UU~QFĉ]ˡgСCM5k1|pZ%* 7I)5z|%416oٲ7r TxlL^m(0dTUUa֬YX`r| Ra㫥0bSĕxeNxW.Ƀ{[#JȔmPIȇF_6l qZFyG`Xp ͪx3@h0(S1AA޵MK@R"z(q7YX~?ܰ'2 8B-_n?/ٞ3xˍ VFk6{ FF$@Qw~<."B] Fh4+%Yvx;~ ~-_4ifϞmx|Wݻ!(cjOOgSJ%&O Xׯ߿{-/9Mw,]7o-kEfht< vZVk톚2e 6l///3:MrC_ι~4ݺkx q&AW=n l9fseSnH(4k̭1** dg#^t [l'a•HЪp=du())vK8^Zar8X67>V10ϙ-_䠬 ?ܹszoǎؾ};T*dp!c>;^R:AoOmi!w:"{idɱ,b\~^mݺSN!99 l^сkע7`?FA.FٷpUM-w`х?#6+bBlr%޺u uuu?g3gU]7ZK,kaȣC‡D,3#PUw|$vULJA|.Xۡc>ؙ9iRvYMV ѣGdž lʦٳ8hp*--͛QXX&W>{/sK}[ ꫯF۷CA" |> \ 7<;s>v;3۽t'Ɩ6v/W:LX6kLT|?Ncc#-ZNE!!!2 8y$0%jkk",\qC0U"Lg 8_lr=RDxx8bbb2% v'\2˚aV3)26<*lØS>Pu^Lff()M>rTO睶["WWt{V<͔ hmmEvv6Fe˖!((Vۣm6,_j׿PVVǏgӧIaK+"w|-ҟ/`0Օ 7n"  !EE!>Qд`|BH\xs0τ>Az/P[ьf5N&Sw> }&l Ɵ+p'hLoPOOO 66zMrr2z-HRjٳ}p"Hj.  … ?GMix1}t:Cx˥ؽr6t>v׺pۇ^uņb6l>ܾnlǻaT7ŚF@>$DGG ΝCQQs,YTBѰ3w܉ӧ[-ى vHs$:f-o9ܨh!vpap!B2 z*|Ld*zvB]'cJ bBRARa޼y#ze !GC.bzgQ]Ӈֻhcb`63p89(@*q4D]"d1=4je@H$B,@; 4:dʙҩ&,r鄀vFˀ&0B6"n- 4777B6,:9c[&F\F=4B6bzzoײv+ooojyBm$mmmm /I Bm,Yie8T@KHHiBmdBBBlc\ hBP6U$ЁXB!F&D一)2P# h#}vaZFB(Jhh(e{yrv ZLL T*:B6r=ܼys蘵`R7!AO?NU !(Jŋ{n3,6\S-..Z!P@X㐌!dozܤRQ# h7PG믿f]lnJ333)#!P@sJKKЧ.yx9u)8R7`߸q#6B66ZjF`{a1bo Wnۻw/ZF!1[rsA$>>kL ƅo ^YUUUhT*EmmB!C@P֭[0 ۻ} ],¦sPF`6ˣ`F!Nz>P^^: t:Ѕ5 01 n݁hw2 V=!sb$hUxW\իٔ?<^xji`̐#П-ߟ1lڴ9Fk͸ց.|( }_Dww7 -- J!;r{fB!:Qﯲ JJJmWÇ!˩B4@RRΜ9n[brss3 ϣ6ըOOOj]Bͱڰ{nV###~zh4hhh@cc#._6\`rssǮE#BẺ~dggi?iiiXf t:ݩEB43LѣG7ڵ :QQQVD!&zTUUϠP(J???`֬Ydt!d?B@EoIENDB`borgmatic-1.5.1/docs/static/rsyncnet.png000066400000000000000000000165051361605604600202750ustar00rootroot00000000000000PNG  IHDR?:iCCPICC profile(}=H@_S"vqP,8jP! :\!4iHR\ׂUg]\AIEJ_Rhq?{ܽjiV鶙tfE =CB2YIJwQܟ#f-Df6:Ԧmp'tAG+qλ,̰J|+M FS%م W[Oq]A|ub^isrYΚ71ǍϺ/RW{XYc0#"=E|'ajGѭowU_-R/ү[B0-V$9|.` =ZYLEf%Hғpqd"v.M>~>a?*itEQ~=CyRנCM0^HY{bj֓6(t˺ԥ3C @nc%s07 bS? rIcwe{ \-Y>'̅eCM gU: o~m).4_UeK;]_Pve(r#\#ͳN/kdw r+.S%|oz[v5r:ׅҚ3ozL̽"'fS}R}Z9ȟ]Eҙ!0/cqD>`PQnn&/RbD/}׫ ?)#yyrW O^!r;#t= hDj(5Ft(Oꎂ]bO|Aʃs鉽^:TܰQ{^u}5VUѣj\xxpOlXXR+ /yra0 3:0?,ZU,[,P}VuJjzWݕ/'GLO!\0%P"y ;:V}vjYQآPh=tĮA݅#g+7ʍan]?e]45M]%mm==Z|)չa^إ˱ @ \<"&twٱ1#W@^9pjKJ,͏ `2n8:5O>r6oXksý%Nᗓ ;~j 4ڝ3w-sH9 !GZcbkV!򫃗je]fgB]/-cIJ{ȋZR{&u}O^}4l*h<^]cZ=1'%Z'10fEyu~|zScDkGO 1~լJi~F~/ey|ĔolkGSl 10ކd;Q7EnYq=RF8j:ehsgU{5ei;ߜD怗櫪2P? *2J=ښ! XLA-2dhwFqϮNf+Vnݚ d*7O 1U׉;o9HrrK!U[֥zp@tC #w־C%Ez˦WJ;ỏVu]?+&+V_ ,}_',tyY3_ysFoȶL \=?|γo]ӏ#V?{FzFG m:…ϼC~Q3ǑĀ3lZ;spr|Cw%:\ej}O 4^Sciz92#U6}` 6~rZիT7}iSCɧ;JC);aB1"r~NƦ+}A#D%z]s]LJ:P|з{kBސpE]{Yiq݇Tgm;`x.tw7FV6*^8oDrgxrsV~7PnV'bPĀS&6%9 hA[̹16M-հ[@o~9ш%Yn {pZ*;.KMib'Fp2-b[O i?YK.-GvjAZ Yw}n[t̝Q+ "oTvFXn1!j uV$N'rKd̅x6-fN'HgRJJJywmKǷ`r'&luG.<WĀ9Qz(<2tg2k ?Ab_x*7ؼtq3?{zkgtٹrXn"Ja}Su[kAW\UNQ.lKXޱ|N6$jY<`L[o7]ZrD-js[^vt⪂p2v`-.!`o^;i; 5}Rl@@ܩX\?^V7<4],rM^j#0Ôxu.GKiwW6*'eH^7}ϯIKwI:tOM7'H+5vldϬ'r)\2cQ{^ACzW FPibs/W8(w貘byoR]P~%%ڇ )bWf.PmǯrF'\_a,7jw ׅQ!E,&!ZBZ;DGv>z8o4*F6Au,ٴvR\`7e_^k,u*[ɎmC3 ]h˞?CVeu<֊9a;1vvܝ~EӂYLz_SJz3ܚrQB#ݥWvSP;iY1a{f鏇lQ;# `_AbfE 5_]X{aGC;A e5W =BGӑTJ\[2#7oIy[^ɮ2AKgXxtgd߆SYRgTnYG(Z~ e2jdd~{n;߂han_nvqM,Z7@@.I\Rn"cx=uA1M7v~׽y\' =?A5Y;AcieeҸŨӯu`jK0Ny:(TrRB|-vABwƥX:XC ` ;\\PkyFV's\ u=2! g"MEF& sWȺTԸYh `ĩm_9ӅxH?N4ǠM kDӦ/س(z HDQeU oPw^ =i4_<=7>9{ōcjsĈP ~<\I6$^\+w4~9Qc|1߽e}kɍ(7oKL+R1{bTҲi^ꄠ;Q,Lϩ"XNvsxpf]>; _kwO7u&=UxLb@ \X.m`$Bޤh6"m/: J釬Jߔht\ng[bԵEb#:=U~ͩ%]0%<`x?;SĹ&C>ac.l,5f7IӠeÐ_\#.+"tɾPf ]JMVi/ Rk5ebgMǪrl[RX E ͢o^;|z;B\RcEAxzL˚U\9C~pa'6K Dx˔RvgB)ZIñ|wô`Ӥ.G9U"olQ5=%ƧGK/ /֙ޗЬ/!~zc%޿ bI< Yl8F{4$(kޒ67=Mx}ޟfz \8BF1ofy֯zy2|~PWĀhSJ{OSV4* A0\f5@z2ye+O} գ뛻Wl}错鉾鉾Zb]{MSWKJ٫c2h<&JbO:׏DrUi鉦rшYfM ZYMa3xD])角VN0>s0Mڬ)Z-ׅ\k[va_ &`ƯO_oYTQ>S}Fyr[Ǟ:N-U Ե3 ZLgLQkt=^iQr{|;D ˋAEnJgH ʮ۷-<@YB__1',\i#؏+wgWݖ??-0%Z!m:LJsa߉bNiђe.,A 0ՔCmx́ܺ%!wgƅ0 #U,vS>jZw(~tۯ9u.tWFhb-$F_tPtɚN@lm@-v.M.&'_Gyą %ƞ7>fp].lZW4 CjX^Q~nۃbcSxFKO+NLSP:z/TV\)(2;#`ȭYENuϠLF+ ,M!< b, 7®%kbn 47wa@ =XL:F׷% `L7X=\Ѐ[O>p% 1cHD{JߦcS%r bƐFYh `, 1մ簪WbƒV'8ήc@ Z<#8#ٴ Z߱ ebrx#Moc)Xe۷{煯[K`ԢbFo^,moRj,8~rbKի .bI0a(b 1@ b 1@ b 1@ b 1@ b 1`5.IENDB`borgmatic-1.5.1/pyproject.toml000066400000000000000000000001001361605604600163770ustar00rootroot00000000000000[tool.black] line-length = 100 skip-string-normalization = true borgmatic-1.5.1/sample/000077500000000000000000000000001361605604600147555ustar00rootroot00000000000000borgmatic-1.5.1/sample/cron/000077500000000000000000000000001361605604600157165ustar00rootroot00000000000000borgmatic-1.5.1/sample/cron/borgmatic000066400000000000000000000002501361605604600176050ustar00rootroot00000000000000# You can drop this file into /etc/cron.d/ to run borgmatic nightly. 0 3 * * * root PATH=$PATH:/usr/bin:/usr/local/bin /root/.local/bin/borgmatic --syslog-verbosity 1 borgmatic-1.5.1/sample/systemd/000077500000000000000000000000001361605604600164455ustar00rootroot00000000000000borgmatic-1.5.1/sample/systemd/borgmatic.service000066400000000000000000000012711361605604600217770ustar00rootroot00000000000000[Unit] Description=borgmatic backup Wants=network-online.target After=network-online.target ConditionACPower=true [Service] Type=oneshot # Lower CPU and I/O priority. Nice=19 CPUSchedulingPolicy=batch IOSchedulingClass=best-effort IOSchedulingPriority=7 IOWeight=100 Restart=no # Prevent rate limiting of borgmatic log events. If you are using an older version of systemd that # doesn't support this (pre-240 or so), you may have to remove this option. LogRateLimitIntervalSec=0 # Delay start to prevent backups running during boot. ExecStartPre=sleep 1m ExecStart=systemd-inhibit --who="borgmatic" --why="Prevent interrupting scheduled backup" /root/.local/bin/borgmatic --syslog-verbosity 1 borgmatic-1.5.1/sample/systemd/borgmatic.timer000066400000000000000000000001641361605604600214570ustar00rootroot00000000000000[Unit] Description=Run borgmatic backup [Timer] OnCalendar=daily Persistent=true [Install] WantedBy=timers.target borgmatic-1.5.1/scripts/000077500000000000000000000000001361605604600151635ustar00rootroot00000000000000borgmatic-1.5.1/scripts/dev-docs000077500000000000000000000003611361605604600166150ustar00rootroot00000000000000#!/bin/bash set -e docker build --tag borgmatic-docs --build-arg ENVIRONMENT=dev --file docs/Dockerfile . echo echo "You can view dev docs at http://localhost:8080" echo docker run --interactive --tty --publish 8080:80 --rm borgmatic-docs borgmatic-1.5.1/scripts/find-unsupported-borg-options000077500000000000000000000057771361605604600230570ustar00rootroot00000000000000#!/bin/bash set -o nounset # For each Borg sub-command that borgmatic uses, print out the Borg flags that borgmatic does not # appear to support yet. This script isn't terribly robust. It's intended as a basic tool to ferret # out unsupported Borg options so that they can be considered for addition to borgmatic. # Generate a sample borgmatic configuration with all options set, and uncomment all options. generate-borgmatic-config --destination temp.yaml cat temp.yaml | sed -e 's/# \S.*$//' | sed -e 's/#//' > temp.yaml.uncommented mv temp.yaml.uncommented temp.yaml # For each sub-command (prune, create, and check), collect the Borg command-line flags that result # from running borgmatic with the generated configuration. Then, collect the full set of available # Borg flags as reported by "borg --help" for that sub-command. Finally, compare the two lists of # flags to determine which Borg flags borgmatic doesn't yet support. for sub_command in prune create check list info; do echo "********** borg $sub_command **********" for line in $(borgmatic --config temp.yaml $sub_command -v 2 2>&1 | grep "borg\w* $sub_command") ; do echo "$line" | grep '^-' >> borgmatic_borg_flags done sort borgmatic_borg_flags > borgmatic_borg_flags.sorted mv borgmatic_borg_flags.sorted borgmatic_borg_flags for word in $(borg $sub_command --help | grep '^ -') ; do # Exclude a bunch of flags that borgmatic actually supports, but don't get exercised by the # generated sample config, and also flags that don't make sense to support. echo "$word" | grep ^-- | sed -e 's/,$//' \ | grep -v '^--archives-only$' \ | grep -v '^--critical$' \ | grep -v '^--debug$' \ | grep -v '^--dry-run$' \ | grep -v '^--error$' \ | grep -v '^--help$' \ | grep -v '^--info$' \ | grep -v '^--json$' \ | grep -v '^--keep-last$' \ | grep -v '^--list$' \ | grep -v '^--nobsdflags$' \ | grep -v '^--pattern$' \ | grep -v '^--progress$' \ | grep -v '^--stats$' \ | grep -v '^--read-special$' \ | grep -v '^--repository-only$' \ | grep -v '^--show-rc$' \ | grep -v '^--stats$' \ | grep -v '^--verbose$' \ | grep -v '^--warning$' \ | grep -v '^--exclude' \ | grep -v '^--exclude-from' \ | grep -v '^--first' \ | grep -v '^--format' \ | grep -v '^--glob-archives' \ | grep -v '^--last' \ | grep -v '^--list-format' \ | grep -v '^--patterns-from' \ | grep -v '^--prefix' \ | grep -v '^--short' \ | grep -v '^--sort-by' \ | grep -v '^-h$' \ >> all_borg_flags done sort all_borg_flags > all_borg_flags.sorted mv all_borg_flags.sorted all_borg_flags comm -13 borgmatic_borg_flags all_borg_flags rm ./*_borg_flags done rm temp.yaml borgmatic-1.5.1/scripts/push000077500000000000000000000002111361605604600160620ustar00rootroot00000000000000#!/bin/bash set -e branch_name=$(git rev-parse --abbrev-ref HEAD) git push -u github "$branch_name" git push -u origin "$branch_name" borgmatic-1.5.1/scripts/release000077500000000000000000000025351361605604600165360ustar00rootroot00000000000000#!/bin/bash set -e projects_token=${1:-} github_token=${2:-} if [[ -z $github_token ]]; then echo "Usage: $0 [projects-token] [github-token]" exit 1 fi if [[ ! -f NEWS ]]; then echo "Missing NEWS file. Try running from root of repository." exit 1 fi version=$(head --lines=1 NEWS) git tag $version git push origin $version git push github $version # Build borgmatic and publish to pypi. rm -fr dist python3 setup.py bdist_wheel python3 setup.py sdist gpg --detach-sign --armor dist/* twine upload -r pypi dist/borgmatic-*.tar.gz twine upload -r pypi dist/borgmatic-*-py3-none-any.whl # Set release changelogs on projects.torsion.org and GitHub. release_changelog="$(cat NEWS | sed '/^$/q' | grep -v '^\S')" escaped_release_changelog="$(echo "$release_changelog" | sed -z 's/\n/\\n/g' | sed -z 's/\"/\\"/g')" curl --silent --request POST \ "https://projects.torsion.org/api/v1/repos/witten/borgmatic/releases?access_token=$projects_token" \ --header "Accept: application/json" \ --header "Content-Type: application/json" \ --data "{\"body\": \"$escaped_release_changelog\", \"draft\": false, \"name\": \"borgmatic $version\", \"prerelease\": false, \"tag_name\": \"$version\"}" github-release create --token="$github_token" --owner=witten --repo=borgmatic --tag="$version" \ --name="borgmatic $version" --body="$release_changelog" borgmatic-1.5.1/scripts/run-full-dev-tests000077500000000000000000000007601361605604600205740ustar00rootroot00000000000000#!/bin/sh # This script is for running all tests, including end-to-end tests, on a developer machine. It sets # up database containers to run tests against, runs the tests, and then tears down the containers. # # Run this script from the root directory of the borgmatic source. # # For more information, see: # https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/ set -e docker-compose --file tests/end-to-end/docker-compose.yaml up --force-recreate \ --abort-on-container-exit borgmatic-1.5.1/scripts/run-full-tests000077500000000000000000000012541361605604600200170ustar00rootroot00000000000000#!/bin/sh # This script installs test dependencies and runs all tests, including end-to-end tests. It # is designed to run inside a test container, and presumes that other test infrastructure like # databases are already running. Therefore, on a developer machine, you should not run this script # directly. Instead, run scripts/run-full-dev-tests # # For more information, see: # https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/ set -e python -m pip install --upgrade pip==20.0.2 pip install tox==3.14.3 export COVERAGE_FILE=/tmp/.coverage tox --workdir /tmp/.tox apk add --no-cache borgbackup postgresql-client mariadb-client tox --workdir /tmp/.tox -e end-to-end borgmatic-1.5.1/setup.cfg000066400000000000000000000006551361605604600153230ustar00rootroot00000000000000[metadata] description-file=README.md [tool:pytest] testpaths = tests addopts = --cov-report term-missing:skip-covered --cov=borgmatic --ignore=tests/end-to-end filterwarnings = ignore:Coverage disabled.*:pytest.PytestWarning [flake8] ignore = E501,W503 exclude = *.*/* [tool:isort] force_single_line = False include_trailing_comma = True known_first_party = borgmatic line_length = 100 multi_line_output = 3 skip = .tox borgmatic-1.5.1/setup.py000066400000000000000000000025141361605604600152100ustar00rootroot00000000000000from setuptools import find_packages, setup VERSION = '1.5.1' setup( name='borgmatic', version=VERSION, description='Simple, configuration-driven backup software for servers and workstations', author='Dan Helfman', author_email='witten@torsion.org', url='https://torsion.org/borgmatic', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Programming Language :: Python', 'Topic :: Security :: Cryptography', 'Topic :: System :: Archiving :: Backup', ], packages=find_packages(exclude=['tests*']), entry_points={ 'console_scripts': [ 'borgmatic = borgmatic.commands.borgmatic:main', 'upgrade-borgmatic-config = borgmatic.commands.convert_config:main', 'generate-borgmatic-config = borgmatic.commands.generate_config:main', 'validate-borgmatic-config = borgmatic.commands.validate_config:main', ] }, obsoletes=['atticmatic'], install_requires=( 'pykwalify>=1.6.0,<14.06', 'requests', 'ruamel.yaml>0.15.0,<0.17.0', 'setuptools', 'colorama>=0.4.1,<0.5', ), include_package_data=True, ) borgmatic-1.5.1/test_requirements.txt000066400000000000000000000006551361605604600200250ustar00rootroot00000000000000appdirs==1.4.3 atomicwrites==1.3.0 attrs==19.3.0 black==19.3b0; python_version >= '3.6' click==7.0 colorama==0.4.1 coverage==4.5.4 docopt==0.6.2 flake8==3.7.9 flexmock==0.10.4 isort==4.3.21 mccabe==0.6.1 more-itertools==7.2.0 pluggy==0.13.0 py==1.8.0 pycodestyle==2.5.0 pyflakes==2.1.1 pykwalify==1.7.0 pytest==5.2.2 pytest-cov==2.8.1 python-dateutil==2.8.0 PyYAML==5.1.2 requests==2.22.0 ruamel.yaml>0.15.0,<0.17.0 toml==0.10.0 borgmatic-1.5.1/tests/000077500000000000000000000000001361605604600146365ustar00rootroot00000000000000borgmatic-1.5.1/tests/__init__.py000066400000000000000000000000001361605604600167350ustar00rootroot00000000000000borgmatic-1.5.1/tests/end-to-end/000077500000000000000000000000001361605604600165705ustar00rootroot00000000000000borgmatic-1.5.1/tests/end-to-end/__init__.py000066400000000000000000000000001361605604600206670ustar00rootroot00000000000000borgmatic-1.5.1/tests/end-to-end/docker-compose.yaml000066400000000000000000000007731361605604600223750ustar00rootroot00000000000000version: '3' services: postgresql: image: postgres:11.6-alpine environment: POSTGRES_PASSWORD: test POSTGRES_DB: test mysql: image: mariadb:10.4 environment: MYSQL_ROOT_PASSWORD: test MYSQL_DATABASE: test tests: image: python:3.7-alpine3.10 volumes: - "../..:/app:ro" tmpfs: - "/app/borgmatic.egg-info" tty: true working_dir: /app command: - /app/scripts/run-full-tests depends_on: - postgresql - mysql borgmatic-1.5.1/tests/end-to-end/test_borgmatic.py000066400000000000000000000056711361605604600221610ustar00rootroot00000000000000import json import os import shutil import subprocess import sys import tempfile def generate_configuration(config_path, repository_path): ''' Generate borgmatic configuration into a file at the config path, and update the defaults so as to work for testing (including injecting the given repository path and tacking on an encryption passphrase). ''' subprocess.check_call( 'generate-borgmatic-config --destination {}'.format(config_path).split(' ') ) config = ( open(config_path) .read() .replace('user@backupserver:sourcehostname.borg', repository_path) .replace('- /home', '- {}'.format(config_path)) .replace('- /etc', '') .replace('- /var/log/syslog*', '') + 'storage:\n encryption_passphrase: "test"' ) config_file = open(config_path, 'w') config_file.write(config) config_file.close() def test_borgmatic_command(): # Create a Borg repository. temporary_directory = tempfile.mkdtemp() repository_path = os.path.join(temporary_directory, 'test.borg') extract_path = os.path.join(temporary_directory, 'extract') original_working_directory = os.getcwd() os.mkdir(extract_path) os.chdir(extract_path) try: config_path = os.path.join(temporary_directory, 'test.yaml') generate_configuration(config_path, repository_path) subprocess.check_call( 'borgmatic -v 2 --config {} init --encryption repokey'.format(config_path).split(' ') ) # Run borgmatic to generate a backup archive, and then list it to make sure it exists. subprocess.check_call('borgmatic --config {}'.format(config_path).split(' ')) output = subprocess.check_output( 'borgmatic --config {} list --json'.format(config_path).split(' ') ).decode(sys.stdout.encoding) parsed_output = json.loads(output) assert len(parsed_output) == 1 assert len(parsed_output[0]['archives']) == 1 archive_name = parsed_output[0]['archives'][0]['archive'] # Extract the created archive into the current (temporary) directory, and confirm that the # extracted file looks right. output = subprocess.check_output( 'borgmatic --config {} extract --archive {}'.format(config_path, archive_name).split( ' ' ) ).decode(sys.stdout.encoding) extracted_config_path = os.path.join(extract_path, config_path) assert open(extracted_config_path).read() == open(config_path).read() # Exercise the info flag. output = subprocess.check_output( 'borgmatic --config {} info --json'.format(config_path).split(' ') ).decode(sys.stdout.encoding) parsed_output = json.loads(output) assert len(parsed_output) == 1 assert 'repository' in parsed_output[0] finally: os.chdir(original_working_directory) shutil.rmtree(temporary_directory) borgmatic-1.5.1/tests/end-to-end/test_database.py000066400000000000000000000050121361605604600217430ustar00rootroot00000000000000import json import os import shutil import subprocess import sys import tempfile def write_configuration(config_path, repository_path, borgmatic_source_directory): ''' Write out borgmatic configuration into a file at the config path. Set the options so as to work for testing. This includes injecting the given repository path, borgmatic source directory for storing database dumps, and encryption passphrase. ''' config = ''' location: source_directories: - {} repositories: - {} borgmatic_source_directory: {} storage: encryption_passphrase: "test" hooks: postgresql_databases: - name: test hostname: postgresql username: postgres password: test mysql_databases: - name: test hostname: mysql username: root password: test '''.format( config_path, repository_path, borgmatic_source_directory ) config_file = open(config_path, 'w') config_file.write(config) config_file.close() def test_database_dump_and_restore(): # Create a Borg repository. temporary_directory = tempfile.mkdtemp() repository_path = os.path.join(temporary_directory, 'test.borg') borgmatic_source_directory = os.path.join(temporary_directory, '.borgmatic') original_working_directory = os.getcwd() try: config_path = os.path.join(temporary_directory, 'test.yaml') write_configuration(config_path, repository_path, borgmatic_source_directory) subprocess.check_call( 'borgmatic -v 2 --config {} init --encryption repokey'.format(config_path).split(' ') ) # Run borgmatic to generate a backup archive including a database dump subprocess.check_call('borgmatic create --config {} -v 2'.format(config_path).split(' ')) # Get the created archive name. output = subprocess.check_output( 'borgmatic --config {} list --json'.format(config_path).split(' ') ).decode(sys.stdout.encoding) parsed_output = json.loads(output) assert len(parsed_output) == 1 assert len(parsed_output[0]['archives']) == 1 archive_name = parsed_output[0]['archives'][0]['archive'] # Restore the database from the archive. subprocess.check_call( 'borgmatic --config {} restore --archive {}'.format(config_path, archive_name).split( ' ' ) ) finally: os.chdir(original_working_directory) shutil.rmtree(temporary_directory) borgmatic-1.5.1/tests/end-to-end/test_validate_config.py000066400000000000000000000023131361605604600233160ustar00rootroot00000000000000import os import subprocess import tempfile def test_validate_config_command_with_valid_configuration_succeeds(): with tempfile.TemporaryDirectory() as temporary_directory: config_path = os.path.join(temporary_directory, 'test.yaml') subprocess.check_call( 'generate-borgmatic-config --destination {}'.format(config_path).split(' ') ) exit_code = subprocess.call( 'validate-borgmatic-config --config {}'.format(config_path).split(' ') ) assert exit_code == 0 def test_validate_config_command_with_invalid_configuration_fails(): with tempfile.TemporaryDirectory() as temporary_directory: config_path = os.path.join(temporary_directory, 'test.yaml') subprocess.check_call( 'generate-borgmatic-config --destination {}'.format(config_path).split(' ') ) config = open(config_path).read().replace('keep_daily: 7', 'keep_daily: "7"') config_file = open(config_path, 'w') config_file.write(config) config_file.close() exit_code = subprocess.call( 'validate-borgmatic-config --config {}'.format(config_path).split(' ') ) assert exit_code == 1 borgmatic-1.5.1/tests/integration/000077500000000000000000000000001361605604600171615ustar00rootroot00000000000000borgmatic-1.5.1/tests/integration/__init__.py000066400000000000000000000000001361605604600212600ustar00rootroot00000000000000borgmatic-1.5.1/tests/integration/commands/000077500000000000000000000000001361605604600207625ustar00rootroot00000000000000borgmatic-1.5.1/tests/integration/commands/__init__.py000066400000000000000000000000001361605604600230610ustar00rootroot00000000000000borgmatic-1.5.1/tests/integration/commands/test_arguments.py000066400000000000000000000426501361605604600244070ustar00rootroot00000000000000import pytest from flexmock import flexmock from borgmatic.commands import arguments as module def test_parse_arguments_with_no_arguments_uses_defaults(): config_paths = ['default'] flexmock(module.collect).should_receive('get_default_config_paths').and_return(config_paths) arguments = module.parse_arguments() global_arguments = arguments['global'] assert global_arguments.config_paths == config_paths assert global_arguments.excludes_filename is None assert global_arguments.verbosity == 0 assert global_arguments.syslog_verbosity == 0 assert global_arguments.log_file_verbosity == 0 def test_parse_arguments_with_multiple_config_paths_parses_as_list(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) arguments = module.parse_arguments('--config', 'myconfig', 'otherconfig') global_arguments = arguments['global'] assert global_arguments.config_paths == ['myconfig', 'otherconfig'] assert global_arguments.verbosity == 0 assert global_arguments.syslog_verbosity == 0 assert global_arguments.log_file_verbosity == 0 def test_parse_arguments_with_verbosity_overrides_default(): config_paths = ['default'] flexmock(module.collect).should_receive('get_default_config_paths').and_return(config_paths) arguments = module.parse_arguments('--verbosity', '1') global_arguments = arguments['global'] assert global_arguments.config_paths == config_paths assert global_arguments.excludes_filename is None assert global_arguments.verbosity == 1 assert global_arguments.syslog_verbosity == 0 assert global_arguments.log_file_verbosity == 0 def test_parse_arguments_with_syslog_verbosity_overrides_default(): config_paths = ['default'] flexmock(module.collect).should_receive('get_default_config_paths').and_return(config_paths) arguments = module.parse_arguments('--syslog-verbosity', '2') global_arguments = arguments['global'] assert global_arguments.config_paths == config_paths assert global_arguments.excludes_filename is None assert global_arguments.verbosity == 0 assert global_arguments.syslog_verbosity == 2 def test_parse_arguments_with_log_file_verbosity_overrides_default(): config_paths = ['default'] flexmock(module.collect).should_receive('get_default_config_paths').and_return(config_paths) arguments = module.parse_arguments('--log-file-verbosity', '-1') global_arguments = arguments['global'] assert global_arguments.config_paths == config_paths assert global_arguments.excludes_filename is None assert global_arguments.verbosity == 0 assert global_arguments.syslog_verbosity == 0 assert global_arguments.log_file_verbosity == -1 def test_parse_arguments_with_list_json_overrides_default(): arguments = module.parse_arguments('list', '--json') assert 'list' in arguments assert arguments['list'].json is True def test_parse_arguments_with_dashed_list_json_overrides_default(): arguments = module.parse_arguments('--list', '--json') assert 'list' in arguments assert arguments['list'].json is True def test_parse_arguments_with_no_actions_defaults_to_all_actions_enabled(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) arguments = module.parse_arguments() assert 'prune' in arguments assert 'create' in arguments assert 'check' in arguments def test_parse_arguments_with_no_actions_passes_argument_to_relevant_actions(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) arguments = module.parse_arguments('--stats', '--files') assert 'prune' in arguments assert arguments['prune'].stats assert arguments['prune'].files assert 'create' in arguments assert arguments['create'].stats assert arguments['create'].files assert 'check' in arguments def test_parse_arguments_with_help_and_no_actions_shows_global_help(capsys): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit) as exit: module.parse_arguments('--help') assert exit.value.code == 0 captured = capsys.readouterr() assert 'global arguments:' in captured.out assert 'actions:' in captured.out def test_parse_arguments_with_help_and_action_shows_action_help(capsys): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit) as exit: module.parse_arguments('create', '--help') assert exit.value.code == 0 captured = capsys.readouterr() assert 'global arguments:' not in captured.out assert 'actions:' not in captured.out assert 'create arguments:' in captured.out def test_parse_arguments_with_prune_action_leaves_other_actions_disabled(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) arguments = module.parse_arguments('prune') assert 'prune' in arguments assert 'create' not in arguments assert 'check' not in arguments def test_parse_arguments_with_dashed_prune_action_leaves_other_actions_disabled(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) arguments = module.parse_arguments('--prune') assert 'prune' in arguments assert 'create' not in arguments assert 'check' not in arguments def test_parse_arguments_with_multiple_actions_leaves_other_action_disabled(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) arguments = module.parse_arguments('create', 'check') assert 'prune' not in arguments assert 'create' in arguments assert 'check' in arguments def test_parse_arguments_with_multiple_dashed_actions_leaves_other_action_disabled(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) arguments = module.parse_arguments('--create', '--check') assert 'prune' not in arguments assert 'create' in arguments assert 'check' in arguments def test_parse_arguments_with_invalid_arguments_exits(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--posix-me-harder') def test_parse_arguments_disallows_deprecated_excludes_option(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(ValueError): module.parse_arguments('--config', 'myconfig', '--excludes', 'myexcludes') def test_parse_arguments_disallows_encryption_mode_without_init(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', '--encryption', 'repokey') def test_parse_arguments_allows_encryption_mode_with_init(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', 'init', '--encryption', 'repokey') def test_parse_arguments_allows_encryption_mode_with_dashed_init(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', '--init', '--encryption', 'repokey') def test_parse_arguments_requires_encryption_mode_with_init(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', 'init') def test_parse_arguments_disallows_append_only_without_init(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', '--append-only') def test_parse_arguments_disallows_storage_quota_without_init(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', '--storage-quota', '5G') def test_parse_arguments_allows_init_and_prune(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', 'init', '--encryption', 'repokey', 'prune') def test_parse_arguments_allows_init_and_create(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', 'init', '--encryption', 'repokey', 'create') def test_parse_arguments_disallows_init_and_dry_run(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(ValueError): module.parse_arguments( '--config', 'myconfig', 'init', '--encryption', 'repokey', '--dry-run' ) def test_parse_arguments_disallows_glob_archives_with_successful(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(ValueError): module.parse_arguments( '--config', 'myconfig', 'list', '--glob-archives', '*glob*', '--successful' ) def test_parse_arguments_disallows_repository_unless_action_consumes_it(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', '--repository', 'test.borg') def test_parse_arguments_allows_repository_with_extract(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments( '--config', 'myconfig', 'extract', '--repository', 'test.borg', '--archive', 'test' ) def test_parse_arguments_allows_repository_with_mount(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments( '--config', 'myconfig', 'mount', '--repository', 'test.borg', '--archive', 'test', '--mount-point', '/mnt', ) def test_parse_arguments_allows_repository_with_list(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', 'list', '--repository', 'test.borg') def test_parse_arguments_disallows_archive_unless_action_consumes_it(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', '--archive', 'test') def test_parse_arguments_disallows_paths_unless_action_consumes_it(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', '--path', 'test') def test_parse_arguments_allows_archive_with_extract(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', 'extract', '--archive', 'test') def test_parse_arguments_allows_archive_with_mount(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments( '--config', 'myconfig', 'mount', '--archive', 'test', '--mount-point', '/mnt' ) def test_parse_arguments_allows_archive_with_dashed_extract(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', '--extract', '--archive', 'test') def test_parse_arguments_allows_archive_with_restore(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', 'restore', '--archive', 'test') def test_parse_arguments_allows_archive_with_dashed_restore(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', '--restore', '--archive', 'test') def test_parse_arguments_allows_archive_with_list(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--config', 'myconfig', 'list', '--archive', 'test') def test_parse_arguments_requires_archive_with_extract(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', 'extract') def test_parse_arguments_requires_archive_with_restore(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', 'restore') def test_parse_arguments_requires_mount_point_with_mount(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', 'mount', '--archive', 'test') def test_parse_arguments_requires_mount_point_with_umount(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--config', 'myconfig', 'umount') def test_parse_arguments_allows_progress_before_create(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--progress', 'create', 'list') def test_parse_arguments_allows_progress_after_create(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('create', '--progress', 'list') def test_parse_arguments_allows_progress_and_extract(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--progress', 'extract', '--archive', 'test', 'list') def test_parse_arguments_allows_progress_and_restore(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--progress', 'restore', '--archive', 'test', 'list') def test_parse_arguments_disallows_progress_without_create(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--progress', 'list') def test_parse_arguments_with_stats_and_create_flags_does_not_raise(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--stats', 'create', 'list') def test_parse_arguments_with_stats_and_prune_flags_does_not_raise(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--stats', 'prune', 'list') def test_parse_arguments_with_stats_flag_but_no_create_or_prune_flag_raises_value_error(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--stats', 'list') def test_parse_arguments_with_files_and_create_flags_does_not_raise(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--files', 'create', 'list') def test_parse_arguments_with_files_and_prune_flags_does_not_raise(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--files', 'prune', 'list') def test_parse_arguments_with_files_flag_but_no_create_or_prune_or_restore_flag_raises_value_error(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(SystemExit): module.parse_arguments('--files', 'list') def test_parse_arguments_allows_json_with_list_or_info(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('list', '--json') module.parse_arguments('info', '--json') def test_parse_arguments_allows_json_with_dashed_info(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('--info', '--json') def test_parse_arguments_disallows_json_with_both_list_and_info(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) with pytest.raises(ValueError): module.parse_arguments('list', 'info', '--json') def test_parse_arguments_check_only_extract_does_not_raise_extract_subparser_error(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('check', '--only', 'extract') def test_parse_arguments_extract_archive_check_does_not_raise_check_subparser_error(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('extract', '--archive', 'check') def test_parse_arguments_extract_with_check_only_extract_does_not_raise(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) module.parse_arguments('extract', '--archive', 'name', 'check', '--only', 'extract') borgmatic-1.5.1/tests/integration/commands/test_borgmatic.py000066400000000000000000000006601361605604600243440ustar00rootroot00000000000000import subprocess from flexmock import flexmock from borgmatic.commands import borgmatic as module def test_borgmatic_version_matches_news_version(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) borgmatic_version = subprocess.check_output(('borgmatic', '--version')).decode('ascii') news_version = open('NEWS').readline() assert borgmatic_version == news_version borgmatic-1.5.1/tests/integration/commands/test_convert_config.py000066400000000000000000000032071361605604600254020ustar00rootroot00000000000000import os import pytest from flexmock import flexmock from borgmatic.commands import convert_config as module def test_parse_arguments_with_no_arguments_uses_defaults(): flexmock(os.path).should_receive('exists').and_return(True) parser = module.parse_arguments() assert parser.source_config_filename == module.DEFAULT_SOURCE_CONFIG_FILENAME assert parser.source_excludes_filename == module.DEFAULT_SOURCE_EXCLUDES_FILENAME assert parser.destination_config_filename == module.DEFAULT_DESTINATION_CONFIG_FILENAME def test_parse_arguments_with_filename_arguments_overrides_defaults(): flexmock(os.path).should_receive('exists').and_return(True) parser = module.parse_arguments( '--source-config', 'config', '--source-excludes', 'excludes', '--destination-config', 'config.yaml', ) assert parser.source_config_filename == 'config' assert parser.source_excludes_filename == 'excludes' assert parser.destination_config_filename == 'config.yaml' def test_parse_arguments_with_missing_default_excludes_file_sets_filename_to_none(): flexmock(os.path).should_receive('exists').and_return(False) parser = module.parse_arguments() assert parser.source_config_filename == module.DEFAULT_SOURCE_CONFIG_FILENAME assert parser.source_excludes_filename is None assert parser.destination_config_filename == module.DEFAULT_DESTINATION_CONFIG_FILENAME def test_parse_arguments_with_invalid_arguments_exits(): flexmock(os.path).should_receive('exists').and_return(True) with pytest.raises(SystemExit): module.parse_arguments('--posix-me-harder') borgmatic-1.5.1/tests/integration/commands/test_generate_config.py000066400000000000000000000006701361605604600255150ustar00rootroot00000000000000from borgmatic.commands import generate_config as module def test_parse_arguments_with_no_arguments_uses_defaults(): parser = module.parse_arguments() assert parser.destination_filename == module.DEFAULT_DESTINATION_CONFIG_FILENAME def test_parse_arguments_with_filename_argument_overrides_defaults(): parser = module.parse_arguments('--destination', 'config.yaml') assert parser.destination_filename == 'config.yaml' borgmatic-1.5.1/tests/integration/commands/test_validate_config.py000066400000000000000000000012401361605604600255060ustar00rootroot00000000000000from flexmock import flexmock from borgmatic.commands import validate_config as module def test_parse_arguments_with_no_arguments_uses_defaults(): config_paths = ['default'] flexmock(module.collect).should_receive('get_default_config_paths').and_return(config_paths) parser = module.parse_arguments() assert parser.config_paths == config_paths def test_parse_arguments_with_multiple_config_paths_parses_as_list(): flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) parser = module.parse_arguments('--config', 'myconfig', 'otherconfig') assert parser.config_paths == ['myconfig', 'otherconfig'] borgmatic-1.5.1/tests/integration/config/000077500000000000000000000000001361605604600204265ustar00rootroot00000000000000borgmatic-1.5.1/tests/integration/config/__init__.py000066400000000000000000000000001361605604600225250ustar00rootroot00000000000000borgmatic-1.5.1/tests/integration/config/test_generate.py000066400000000000000000000163411361605604600236360ustar00rootroot00000000000000import os import sys from io import StringIO import pytest from flexmock import flexmock from borgmatic.config import generate as module def test_insert_newline_before_comment_does_not_raise(): field_name = 'foo' config = module.yaml.comments.CommentedMap([(field_name, 33)]) config.yaml_set_comment_before_after_key(key=field_name, before='Comment') module._insert_newline_before_comment(config, field_name) def test_comment_out_line_skips_blank_line(): line = ' \n' assert module._comment_out_line(line) == line def test_comment_out_line_skips_already_commented_out_line(): line = ' # foo' assert module._comment_out_line(line) == line def test_comment_out_line_comments_section_name(): line = 'figgy-pudding:' assert module._comment_out_line(line) == '# ' + line def test_comment_out_line_comments_indented_option(): line = ' enabled: true' assert module._comment_out_line(line) == ' # enabled: true' def test_comment_out_line_comments_twice_indented_option(): line = ' - item' assert module._comment_out_line(line) == ' # - item' def test_comment_out_optional_configuration_comments_optional_config_only(): # The "# COMMENT_OUT" comment is a sentinel used to express that the following key is optional. # It's stripped out of the final output. flexmock(module)._comment_out_line = lambda line: '# ' + line config = ''' # COMMENT_OUT foo: # COMMENT_OUT bar: - baz - quux location: repositories: - one - two # This comment should be kept. # COMMENT_OUT other: thing ''' # flake8: noqa expected_config = ''' # foo: # bar: # - baz # - quux location: repositories: - one - two # This comment should be kept. # other: thing ''' assert module._comment_out_optional_configuration(config.strip()) == expected_config.strip() def test_render_configuration_converts_configuration_to_yaml_string(): yaml_string = module._render_configuration({'foo': 'bar'}) assert yaml_string == 'foo: bar\n' def test_write_configuration_does_not_raise(): flexmock(os.path).should_receive('exists').and_return(False) flexmock(os).should_receive('makedirs') builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').and_return(StringIO()) flexmock(os).should_receive('chmod') module.write_configuration('config.yaml', 'config: yaml') def test_write_configuration_with_already_existing_file_raises(): flexmock(os.path).should_receive('exists').and_return(True) with pytest.raises(FileExistsError): module.write_configuration('config.yaml', 'config: yaml') def test_write_configuration_with_already_existing_directory_does_not_raise(): flexmock(os.path).should_receive('exists').and_return(False) flexmock(os).should_receive('makedirs').and_raise(FileExistsError) builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').and_return(StringIO()) flexmock(os).should_receive('chmod') module.write_configuration('config.yaml', 'config: yaml') def test_add_comments_to_configuration_sequence_of_strings_does_not_raise(): config = module.yaml.comments.CommentedSeq(['foo', 'bar']) schema = {'seq': [{'type': 'str'}]} module.add_comments_to_configuration_sequence(config, schema) def test_add_comments_to_configuration_sequence_of_maps_does_not_raise(): config = module.yaml.comments.CommentedSeq([module.yaml.comments.CommentedMap([('foo', 'yo')])]) schema = {'seq': [{'map': {'foo': {'desc': 'yo'}}}]} module.add_comments_to_configuration_sequence(config, schema) def test_add_comments_to_configuration_sequence_of_maps_without_description_does_not_raise(): config = module.yaml.comments.CommentedSeq([module.yaml.comments.CommentedMap([('foo', 'yo')])]) schema = {'seq': [{'map': {'foo': {}}}]} module.add_comments_to_configuration_sequence(config, schema) def test_add_comments_to_configuration_map_does_not_raise(): # Ensure that it can deal with fields both in the schema and missing from the schema. config = module.yaml.comments.CommentedMap([('foo', 33), ('bar', 44), ('baz', 55)]) schema = {'map': {'foo': {'desc': 'Foo'}, 'bar': {'desc': 'Bar'}}} module.add_comments_to_configuration_map(config, schema) def test_add_comments_to_configuration_map_with_skip_first_does_not_raise(): config = module.yaml.comments.CommentedMap([('foo', 33)]) schema = {'map': {'foo': {'desc': 'Foo'}}} module.add_comments_to_configuration_map(config, schema, skip_first=True) def test_remove_commented_out_sentinel_keeps_other_comments(): field_name = 'foo' config = module.yaml.comments.CommentedMap([(field_name, 33)]) config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.\nCOMMENT_OUT') module.remove_commented_out_sentinel(config, field_name) comments = config.ca.items[field_name][module.RUAMEL_YAML_COMMENTS_INDEX] assert len(comments) == 1 assert comments[0].value == '# Actual comment.\n' def test_remove_commented_out_sentinel_without_sentinel_keeps_other_comments(): field_name = 'foo' config = module.yaml.comments.CommentedMap([(field_name, 33)]) config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.') module.remove_commented_out_sentinel(config, field_name) comments = config.ca.items[field_name][module.RUAMEL_YAML_COMMENTS_INDEX] assert len(comments) == 1 assert comments[0].value == '# Actual comment.\n' def test_remove_commented_out_sentinel_on_unknown_field_does_not_raise(): field_name = 'foo' config = module.yaml.comments.CommentedMap([(field_name, 33)]) config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.') module.remove_commented_out_sentinel(config, 'unknown') def test_generate_sample_configuration_does_not_raise(): builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('schema.yaml').and_return('') flexmock(module.yaml).should_receive('round_trip_load') flexmock(module).should_receive('_schema_to_sample_configuration') flexmock(module).should_receive('merge_source_configuration_into_destination') flexmock(module).should_receive('_render_configuration') flexmock(module).should_receive('_comment_out_optional_configuration') flexmock(module).should_receive('write_configuration') module.generate_sample_configuration(None, 'dest.yaml', 'schema.yaml') def test_generate_sample_configuration_with_source_filename_does_not_raise(): builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('schema.yaml').and_return('') flexmock(module.yaml).should_receive('round_trip_load') flexmock(module.load).should_receive('load_configuration') flexmock(module).should_receive('_schema_to_sample_configuration') flexmock(module).should_receive('merge_source_configuration_into_destination') flexmock(module).should_receive('_render_configuration') flexmock(module).should_receive('_comment_out_optional_configuration') flexmock(module).should_receive('write_configuration') module.generate_sample_configuration('source.yaml', 'dest.yaml', 'schema.yaml') borgmatic-1.5.1/tests/integration/config/test_legacy.py000066400000000000000000000011021361605604600232750ustar00rootroot00000000000000import string from collections import OrderedDict from io import StringIO from borgmatic.config import legacy as module def test_parse_section_options_with_punctuation_should_return_section_options(): parser = module.RawConfigParser() parser.read_file(StringIO('[section]\nfoo: {}\n'.format(string.punctuation))) section_format = module.Section_format( 'section', (module.Config_option('foo', str, required=True),) ) config = module.parse_section_options(parser, section_format) assert config == OrderedDict((('foo', string.punctuation),)) borgmatic-1.5.1/tests/integration/config/test_load.py000066400000000000000000000033461361605604600227640ustar00rootroot00000000000000import sys import pytest import ruamel.yaml from flexmock import flexmock from borgmatic.config import load as module def test_load_configuration_parses_contents(): builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('config.yaml').and_return('key: value') assert module.load_configuration('config.yaml') == {'key': 'value'} def test_load_configuration_inlines_include(): builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('include.yaml').and_return('value') builtins.should_receive('open').with_args('config.yaml').and_return( 'key: !include include.yaml' ) assert module.load_configuration('config.yaml') == {'key': 'value'} def test_load_configuration_merges_include(): builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('include.yaml').and_return( ''' foo: bar baz: quux ''' ) builtins.should_receive('open').with_args('config.yaml').and_return( ''' foo: override <<: !include include.yaml ''' ) assert module.load_configuration('config.yaml') == {'foo': 'override', 'baz': 'quux'} def test_load_configuration_does_not_merge_include_list(): builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('include.yaml').and_return( ''' - one - two ''' ) builtins.should_receive('open').with_args('config.yaml').and_return( ''' foo: bar repositories: <<: !include include.yaml ''' ) with pytest.raises(ruamel.yaml.error.YAMLError): assert module.load_configuration('config.yaml') borgmatic-1.5.1/tests/integration/config/test_override.py000066400000000000000000000020171361605604600236560ustar00rootroot00000000000000import pytest from borgmatic.config import override as module @pytest.mark.parametrize( 'value,expected_result', ( ('thing', 'thing'), ('33', 33), ('33b', '33b'), ('true', True), ('false', False), ('[foo]', ['foo']), ('[foo, bar]', ['foo', 'bar']), ), ) def test_convert_value_type_coerces_values(value, expected_result): assert module.convert_value_type(value) == expected_result def test_apply_overrides_updates_config(): raw_overrides = [ 'section.key=value1', 'other_section.thing=value2', 'section.nested.key=value3', 'new.foo=bar', ] config = { 'section': {'key': 'value', 'other': 'other_value'}, 'other_section': {'thing': 'thing_value'}, } module.apply_overrides(config, raw_overrides) assert config == { 'section': {'key': 'value1', 'other': 'other_value', 'nested': {'key': 'value3'}}, 'other_section': {'thing': 'value2'}, 'new': {'foo': 'bar'}, } borgmatic-1.5.1/tests/integration/config/test_validate.py000066400000000000000000000154231361605604600236350ustar00rootroot00000000000000import io import string import sys import pytest from flexmock import flexmock from borgmatic.config import validate as module def test_schema_filename_returns_plausable_path(): schema_path = module.schema_filename() assert schema_path.endswith('/schema.yaml') def mock_config_and_schema(config_yaml, schema_yaml=None): ''' Set up mocks for the given config config YAML string and the schema YAML string, or the default schema if no schema is provided. The idea is that that the code under test consumes these mocks when parsing the configuration. ''' config_stream = io.StringIO(config_yaml) if schema_yaml is None: schema_stream = open(module.schema_filename()) else: schema_stream = io.StringIO(schema_yaml) builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('config.yaml').and_return(config_stream) builtins.should_receive('open').with_args('schema.yaml').and_return(schema_stream) def test_parse_configuration_transforms_file_into_mapping(): mock_config_and_schema( ''' location: source_directories: - /home - /etc repositories: - hostname.borg retention: keep_minutely: 60 keep_hourly: 24 keep_daily: 7 consistency: checks: - repository - archives ''' ) result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': {'source_directories': ['/home', '/etc'], 'repositories': ['hostname.borg']}, 'retention': {'keep_daily': 7, 'keep_hourly': 24, 'keep_minutely': 60}, 'consistency': {'checks': ['repository', 'archives']}, } def test_parse_configuration_passes_through_quoted_punctuation(): escaped_punctuation = string.punctuation.replace('\\', r'\\').replace('"', r'\"') mock_config_and_schema( ''' location: source_directories: - /home repositories: - "{}.borg" '''.format( escaped_punctuation ) ) result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': { 'source_directories': ['/home'], 'repositories': ['{}.borg'.format(string.punctuation)], } } def test_parse_configuration_with_schema_lacking_examples_does_not_raise(): mock_config_and_schema( ''' location: source_directories: - /home repositories: - hostname.borg ''', ''' map: location: required: true map: source_directories: required: true seq: - type: scalar repositories: required: true seq: - type: scalar ''', ) module.parse_configuration('config.yaml', 'schema.yaml') def test_parse_configuration_inlines_include(): mock_config_and_schema( ''' location: source_directories: - /home repositories: - hostname.borg retention: !include include.yaml ''' ) builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('include.yaml').and_return( ''' keep_daily: 7 keep_hourly: 24 ''' ) result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': {'source_directories': ['/home'], 'repositories': ['hostname.borg']}, 'retention': {'keep_daily': 7, 'keep_hourly': 24}, } def test_parse_configuration_merges_include(): mock_config_and_schema( ''' location: source_directories: - /home repositories: - hostname.borg retention: keep_daily: 1 <<: !include include.yaml ''' ) builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('include.yaml').and_return( ''' keep_daily: 7 keep_hourly: 24 ''' ) result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': {'source_directories': ['/home'], 'repositories': ['hostname.borg']}, 'retention': {'keep_daily': 1, 'keep_hourly': 24}, } def test_parse_configuration_raises_for_missing_config_file(): with pytest.raises(FileNotFoundError): module.parse_configuration('config.yaml', 'schema.yaml') def test_parse_configuration_raises_for_missing_schema_file(): mock_config_and_schema('') builtins = flexmock(sys.modules['builtins']) builtins.should_receive('open').with_args('schema.yaml').and_raise(FileNotFoundError) with pytest.raises(FileNotFoundError): module.parse_configuration('config.yaml', 'schema.yaml') def test_parse_configuration_raises_for_syntax_error(): mock_config_and_schema('foo:\nbar') with pytest.raises(ValueError): module.parse_configuration('config.yaml', 'schema.yaml') def test_parse_configuration_raises_for_validation_error(): mock_config_and_schema( ''' location: source_directories: yes repositories: - hostname.borg ''' ) with pytest.raises(module.Validation_error): module.parse_configuration('config.yaml', 'schema.yaml') def test_parse_configuration_applies_overrides(): mock_config_and_schema( ''' location: source_directories: - /home repositories: - hostname.borg local_path: borg1 ''' ) result = module.parse_configuration( 'config.yaml', 'schema.yaml', overrides=['location.local_path=borg2'] ) assert result == { 'location': { 'source_directories': ['/home'], 'repositories': ['hostname.borg'], 'local_path': 'borg2', } } def test_parse_configuration_applies_normalization(): mock_config_and_schema( ''' location: source_directories: - /home repositories: - hostname.borg exclude_if_present: .nobackup ''' ) result = module.parse_configuration('config.yaml', 'schema.yaml') assert result == { 'location': { 'source_directories': ['/home'], 'repositories': ['hostname.borg'], 'exclude_if_present': ['.nobackup'], } } borgmatic-1.5.1/tests/integration/test_execute.py000066400000000000000000000051061361605604600222360ustar00rootroot00000000000000import logging import subprocess import pytest from flexmock import flexmock from borgmatic import execute as module def test_log_output_logs_each_line_separately(): flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').once() flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once() flexmock(module).should_receive('exit_code_indicates_error').and_return(False) hi_process = subprocess.Popen(['echo', 'hi'], stdout=subprocess.PIPE) module.log_output( ['echo', 'hi'], hi_process, hi_process.stdout, output_log_level=logging.INFO, error_on_warnings=False, ) there_process = subprocess.Popen(['echo', 'there'], stdout=subprocess.PIPE) module.log_output( ['echo', 'there'], there_process, there_process.stdout, output_log_level=logging.INFO, error_on_warnings=False, ) def test_log_output_includes_error_output_in_exception(): flexmock(module.logger).should_receive('log') flexmock(module).should_receive('exit_code_indicates_error').and_return(True) process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) with pytest.raises(subprocess.CalledProcessError) as error: module.log_output( ['grep'], process, process.stdout, output_log_level=logging.INFO, error_on_warnings=False, ) assert error.value.returncode == 2 assert error.value.output def test_log_output_truncates_long_error_output(): flexmock(module).ERROR_OUTPUT_MAX_LINE_COUNT = 0 flexmock(module.logger).should_receive('log') flexmock(module).should_receive('exit_code_indicates_error').and_return(True) process = subprocess.Popen(['grep'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) with pytest.raises(subprocess.CalledProcessError) as error: module.log_output( ['grep'], process, process.stdout, output_log_level=logging.INFO, error_on_warnings=False, ) assert error.value.returncode == 2 assert error.value.output.startswith('...') def test_log_output_with_no_output_logs_nothing(): flexmock(module.logger).should_receive('log').never() flexmock(module).should_receive('exit_code_indicates_error').and_return(False) process = subprocess.Popen(['true'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) module.log_output( ['true'], process, process.stdout, output_log_level=logging.INFO, error_on_warnings=False ) borgmatic-1.5.1/tests/unit/000077500000000000000000000000001361605604600156155ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/__init__.py000066400000000000000000000000001361605604600177140ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/borg/000077500000000000000000000000001361605604600165465ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/borg/__init__.py000066400000000000000000000000001361605604600206450ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/borg/test_check.py000066400000000000000000000265031361605604600212420ustar00rootroot00000000000000import logging import pytest from flexmock import flexmock from borgmatic.borg import check as module from ..test_verbosity import insert_logging_mock def insert_execute_command_mock(command): flexmock(module).should_receive('execute_command').with_args( command, error_on_warnings=True ).once() def insert_execute_command_never(): flexmock(module).should_receive('execute_command').never() def test_parse_checks_returns_them_as_tuple(): checks = module._parse_checks({'checks': ['foo', 'disabled', 'bar']}) assert checks == ('foo', 'bar') def test_parse_checks_with_missing_value_returns_defaults(): checks = module._parse_checks({}) assert checks == module.DEFAULT_CHECKS def test_parse_checks_with_blank_value_returns_defaults(): checks = module._parse_checks({'checks': []}) assert checks == module.DEFAULT_CHECKS def test_parse_checks_with_none_value_returns_defaults(): checks = module._parse_checks({'checks': None}) assert checks == module.DEFAULT_CHECKS def test_parse_checks_with_disabled_returns_no_checks(): checks = module._parse_checks({'checks': ['disabled']}) assert checks == () def test_parse_checks_with_data_check_also_injects_archives(): checks = module._parse_checks({'checks': ['data']}) assert checks == ('data', 'archives') def test_parse_checks_with_data_check_passes_through_archives(): checks = module._parse_checks({'checks': ['data', 'archives']}) assert checks == ('data', 'archives') def test_parse_checks_prefers_override_checks_to_configured_checks(): checks = module._parse_checks({'checks': ['archives']}, only_checks=['repository', 'extract']) assert checks == ('repository', 'extract') def test_parse_checks_with_override_data_check_also_injects_archives(): checks = module._parse_checks({'checks': ['extract']}, only_checks=['data']) assert checks == ('data', 'archives') def test_make_check_flags_with_repository_check_returns_flag(): flags = module._make_check_flags(('repository',)) assert flags == ('--repository-only',) def test_make_check_flags_with_archives_check_returns_flag(): flags = module._make_check_flags(('archives',)) assert flags == ('--archives-only',) def test_make_check_flags_with_data_check_returns_flag(): flags = module._make_check_flags(('data',)) assert flags == ('--verify-data',) def test_make_check_flags_with_extract_omits_extract_flag(): flags = module._make_check_flags(('extract',)) assert flags == () def test_make_check_flags_with_default_checks_and_default_prefix_returns_default_flags(): flags = module._make_check_flags(module.DEFAULT_CHECKS, prefix=module.DEFAULT_PREFIX) assert flags == ('--prefix', module.DEFAULT_PREFIX) def test_make_check_flags_with_all_checks_and_default_prefix_returns_default_flags(): flags = module._make_check_flags( module.DEFAULT_CHECKS + ('extract',), prefix=module.DEFAULT_PREFIX ) assert flags == ('--prefix', module.DEFAULT_PREFIX) def test_make_check_flags_with_archives_check_and_last_includes_last_flag(): flags = module._make_check_flags(('archives',), check_last=3) assert flags == ('--archives-only', '--last', '3') def test_make_check_flags_with_repository_check_and_last_omits_last_flag(): flags = module._make_check_flags(('repository',), check_last=3) assert flags == ('--repository-only',) def test_make_check_flags_with_default_checks_and_last_includes_last_flag(): flags = module._make_check_flags(module.DEFAULT_CHECKS, check_last=3) assert flags == ('--last', '3') def test_make_check_flags_with_archives_check_and_prefix_includes_prefix_flag(): flags = module._make_check_flags(('archives',), prefix='foo-') assert flags == ('--archives-only', '--prefix', 'foo-') def test_make_check_flags_with_archives_check_and_empty_prefix_omits_prefix_flag(): flags = module._make_check_flags(('archives',), prefix='') assert flags == ('--archives-only',) def test_make_check_flags_with_archives_check_and_none_prefix_omits_prefix_flag(): flags = module._make_check_flags(('archives',), prefix=None) assert flags == ('--archives-only',) def test_make_check_flags_with_repository_check_and_prefix_omits_prefix_flag(): flags = module._make_check_flags(('repository',), prefix='foo-') assert flags == ('--repository-only',) def test_make_check_flags_with_default_checks_and_prefix_includes_prefix_flag(): flags = module._make_check_flags(module.DEFAULT_CHECKS, prefix='foo-') assert flags == ('--prefix', 'foo-') def test_check_archives_with_progress_calls_borg_with_progress_parameter(): checks = ('repository',) consistency_config = {'check_last': None} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').and_return(()) flexmock(module).should_receive('execute_command').never() flexmock(module).should_receive('execute_command_without_capture').with_args( ('borg', 'check', '--progress', 'repo'), error_on_warnings=True ).once() module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config, progress=True ) def test_check_archives_with_repair_calls_borg_with_repair_parameter(): checks = ('repository',) consistency_config = {'check_last': None} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').and_return(()) flexmock(module).should_receive('execute_command').never() flexmock(module).should_receive('execute_command_without_capture').with_args( ('borg', 'check', '--repair', 'repo'), error_on_warnings=True ).once() module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config, repair=True ) @pytest.mark.parametrize( 'checks', ( ('repository',), ('archives',), ('repository', 'archives'), ('repository', 'archives', 'other'), ), ) def test_check_archives_calls_borg_with_parameters(checks): check_last = flexmock() consistency_config = {'check_last': check_last} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').with_args( checks, check_last, module.DEFAULT_PREFIX ).and_return(()) insert_execute_command_mock(('borg', 'check', 'repo')) module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config ) def test_check_archives_with_extract_check_calls_extract_only(): checks = ('extract',) check_last = flexmock() consistency_config = {'check_last': check_last} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').never() flexmock(module.extract).should_receive('extract_last_archive_dry_run').once() insert_execute_command_never() module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config ) def test_check_archives_with_log_info_calls_borg_with_info_parameter(): checks = ('repository',) consistency_config = {'check_last': None} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').and_return(()) insert_logging_mock(logging.INFO) insert_execute_command_mock(('borg', 'check', '--info', 'repo')) module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config ) def test_check_archives_with_log_debug_calls_borg_with_debug_parameter(): checks = ('repository',) consistency_config = {'check_last': None} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').and_return(()) insert_logging_mock(logging.DEBUG) insert_execute_command_mock(('borg', 'check', '--debug', '--show-rc', 'repo')) module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config ) def test_check_archives_without_any_checks_bails(): consistency_config = {'check_last': None} flexmock(module).should_receive('_parse_checks').and_return(()) insert_execute_command_never() module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config ) def test_check_archives_with_local_path_calls_borg_via_local_path(): checks = ('repository',) check_last = flexmock() consistency_config = {'check_last': check_last} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').with_args( checks, check_last, module.DEFAULT_PREFIX ).and_return(()) insert_execute_command_mock(('borg1', 'check', 'repo')) module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config, local_path='borg1', ) def test_check_archives_with_remote_path_calls_borg_with_remote_path_parameters(): checks = ('repository',) check_last = flexmock() consistency_config = {'check_last': check_last} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').with_args( checks, check_last, module.DEFAULT_PREFIX ).and_return(()) insert_execute_command_mock(('borg', 'check', '--remote-path', 'borg1', 'repo')) module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config, remote_path='borg1', ) def test_check_archives_with_lock_wait_calls_borg_with_lock_wait_parameters(): checks = ('repository',) check_last = flexmock() consistency_config = {'check_last': check_last} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').with_args( checks, check_last, module.DEFAULT_PREFIX ).and_return(()) insert_execute_command_mock(('borg', 'check', '--lock-wait', '5', 'repo')) module.check_archives( repository='repo', storage_config={'lock_wait': 5}, consistency_config=consistency_config ) def test_check_archives_with_retention_prefix(): checks = ('repository',) check_last = flexmock() prefix = 'foo-' consistency_config = {'check_last': check_last, 'prefix': prefix} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').with_args( checks, check_last, prefix ).and_return(()) insert_execute_command_mock(('borg', 'check', 'repo')) module.check_archives( repository='repo', storage_config={}, consistency_config=consistency_config ) def test_check_archives_with_extra_borg_options_calls_borg_with_extra_options(): checks = ('repository',) consistency_config = {'check_last': None} flexmock(module).should_receive('_parse_checks').and_return(checks) flexmock(module).should_receive('_make_check_flags').and_return(()) insert_execute_command_mock(('borg', 'check', '--extra', '--options', 'repo')) module.check_archives( repository='repo', storage_config={'extra_borg_options': {'check': '--extra --options'}}, consistency_config=consistency_config, ) borgmatic-1.5.1/tests/unit/borg/test_create.py000066400000000000000000001355051361605604600214330ustar00rootroot00000000000000import logging import pytest from flexmock import flexmock from borgmatic.borg import create as module from ..test_verbosity import insert_logging_mock def test_expand_directory_with_basic_path_passes_it_through(): flexmock(module.os.path).should_receive('expanduser').and_return('foo') flexmock(module.glob).should_receive('glob').and_return([]) paths = module._expand_directory('foo') assert paths == ['foo'] def test_expand_directory_with_glob_expands(): flexmock(module.os.path).should_receive('expanduser').and_return('foo*') flexmock(module.glob).should_receive('glob').and_return(['foo', 'food']) paths = module._expand_directory('foo*') assert paths == ['foo', 'food'] def test_expand_directories_flattens_expanded_directories(): flexmock(module).should_receive('_expand_directory').with_args('~/foo').and_return( ['/root/foo'] ) flexmock(module).should_receive('_expand_directory').with_args('bar*').and_return( ['bar', 'barf'] ) paths = module._expand_directories(('~/foo', 'bar*')) assert paths == ('/root/foo', 'bar', 'barf') def test_expand_directories_considers_none_as_no_directories(): paths = module._expand_directories(None) assert paths == () def test_expand_home_directories_expands_tildes(): flexmock(module.os.path).should_receive('expanduser').with_args('~/bar').and_return('/foo/bar') flexmock(module.os.path).should_receive('expanduser').with_args('baz').and_return('baz') paths = module._expand_home_directories(('~/bar', 'baz')) assert paths == ('/foo/bar', 'baz') def test_expand_home_directories_considers_none_as_no_directories(): paths = module._expand_home_directories(None) assert paths == () def test_write_pattern_file_does_not_raise(): temporary_file = flexmock(name='filename', write=lambda mode: None, flush=lambda: None) flexmock(module.tempfile).should_receive('NamedTemporaryFile').and_return(temporary_file) module._write_pattern_file(['exclude']) def test_write_pattern_file_with_empty_exclude_patterns_does_not_raise(): module._write_pattern_file([]) def test_make_pattern_flags_includes_pattern_filename_when_given(): pattern_flags = module._make_pattern_flags( location_config={'patterns': ['R /', '- /var']}, pattern_filename='/tmp/patterns' ) assert pattern_flags == ('--patterns-from', '/tmp/patterns') def test_make_pattern_flags_includes_patterns_from_filenames_when_in_config(): pattern_flags = module._make_pattern_flags( location_config={'patterns_from': ['patterns', 'other']} ) assert pattern_flags == ('--patterns-from', 'patterns', '--patterns-from', 'other') def test_make_pattern_flags_includes_both_filenames_when_patterns_given_and_patterns_from_in_config(): pattern_flags = module._make_pattern_flags( location_config={'patterns_from': ['patterns']}, pattern_filename='/tmp/patterns' ) assert pattern_flags == ('--patterns-from', 'patterns', '--patterns-from', '/tmp/patterns') def test_make_pattern_flags_considers_none_patterns_from_filenames_as_empty(): pattern_flags = module._make_pattern_flags(location_config={'patterns_from': None}) assert pattern_flags == () def test_make_exclude_flags_includes_exclude_patterns_filename_when_given(): exclude_flags = module._make_exclude_flags( location_config={'exclude_patterns': ['*.pyc', '/var']}, exclude_filename='/tmp/excludes' ) assert exclude_flags == ('--exclude-from', '/tmp/excludes') def test_make_exclude_flags_includes_exclude_from_filenames_when_in_config(): exclude_flags = module._make_exclude_flags( location_config={'exclude_from': ['excludes', 'other']} ) assert exclude_flags == ('--exclude-from', 'excludes', '--exclude-from', 'other') def test_make_exclude_flags_includes_both_filenames_when_patterns_given_and_exclude_from_in_config(): exclude_flags = module._make_exclude_flags( location_config={'exclude_from': ['excludes']}, exclude_filename='/tmp/excludes' ) assert exclude_flags == ('--exclude-from', 'excludes', '--exclude-from', '/tmp/excludes') def test_make_exclude_flags_considers_none_exclude_from_filenames_as_empty(): exclude_flags = module._make_exclude_flags(location_config={'exclude_from': None}) assert exclude_flags == () def test_make_exclude_flags_includes_exclude_caches_when_true_in_config(): exclude_flags = module._make_exclude_flags(location_config={'exclude_caches': True}) assert exclude_flags == ('--exclude-caches',) def test_make_exclude_flags_does_not_include_exclude_caches_when_false_in_config(): exclude_flags = module._make_exclude_flags(location_config={'exclude_caches': False}) assert exclude_flags == () def test_make_exclude_flags_includes_exclude_if_present_when_in_config(): exclude_flags = module._make_exclude_flags( location_config={'exclude_if_present': ['exclude_me', 'also_me']} ) assert exclude_flags == ( '--exclude-if-present', 'exclude_me', '--exclude-if-present', 'also_me', ) def test_make_exclude_flags_includes_keep_exclude_tags_when_true_in_config(): exclude_flags = module._make_exclude_flags(location_config={'keep_exclude_tags': True}) assert exclude_flags == ('--keep-exclude-tags',) def test_make_exclude_flags_does_not_include_keep_exclude_tags_when_false_in_config(): exclude_flags = module._make_exclude_flags(location_config={'keep_exclude_tags': False}) assert exclude_flags == () def test_make_exclude_flags_includes_exclude_nodump_when_true_in_config(): exclude_flags = module._make_exclude_flags(location_config={'exclude_nodump': True}) assert exclude_flags == ('--exclude-nodump',) def test_make_exclude_flags_does_not_include_exclude_nodump_when_false_in_config(): exclude_flags = module._make_exclude_flags(location_config={'exclude_nodump': False}) assert exclude_flags == () def test_make_exclude_flags_is_empty_when_config_has_no_excludes(): exclude_flags = module._make_exclude_flags(location_config={}) assert exclude_flags == () def test_borgmatic_source_directories_set_when_directory_exists(): flexmock(module.os.path).should_receive('exists').and_return(True) flexmock(module.os.path).should_receive('expanduser') assert module.borgmatic_source_directories('/tmp') == ['/tmp'] def test_borgmatic_source_directories_empty_when_directory_does_not_exist(): flexmock(module.os.path).should_receive('exists').and_return(False) flexmock(module.os.path).should_receive('expanduser') assert module.borgmatic_source_directories('/tmp') == [] def test_borgmatic_source_directories_defaults_when_directory_not_given(): flexmock(module.os.path).should_receive('exists').and_return(True) flexmock(module.os.path).should_receive('expanduser') assert module.borgmatic_source_directories(None) == [module.DEFAULT_BORGMATIC_SOURCE_DIRECTORY] DEFAULT_ARCHIVE_NAME = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}' ARCHIVE_WITH_PATHS = ('repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo', 'bar') def test_create_archive_calls_borg_with_parameters(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_patterns_calls_borg_with_patterns(): pattern_flags = ('--patterns-from', 'patterns') flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return( flexmock(name='/tmp/patterns') ).and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(pattern_flags) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create') + pattern_flags + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'patterns': ['pattern'], }, storage_config={}, ) def test_create_archive_with_exclude_patterns_calls_borg_with_excludes(): exclude_flags = ('--exclude-from', 'excludes') flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(('exclude',)) flexmock(module).should_receive('_write_pattern_file').and_return(None).and_return( flexmock(name='/tmp/excludes') ) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(exclude_flags) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create') + exclude_flags + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': ['exclude'], }, storage_config={}, ) def test_create_archive_with_log_info_calls_borg_with_info_parameter(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--info') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) insert_logging_mock(logging.INFO) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_log_info_and_json_suppresses_most_borg_output(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--json') + ARCHIVE_WITH_PATHS, output_log_level=None, error_on_warnings=False, ) insert_logging_mock(logging.INFO) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, json=True, ) def test_create_archive_with_log_debug_calls_borg_with_debug_parameter(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--debug', '--show-rc') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) insert_logging_mock(logging.DEBUG) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_log_debug_and_json_suppresses_most_borg_output(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--json') + ARCHIVE_WITH_PATHS, output_log_level=None, error_on_warnings=False, ) insert_logging_mock(logging.DEBUG) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, json=True, ) def test_create_archive_with_dry_run_calls_borg_with_dry_run_parameter(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--dry-run') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=True, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_stats_and_dry_run_calls_borg_without_stats_parameter(): # --dry-run and --stats are mutually exclusive, see: # https://borgbackup.readthedocs.io/en/stable/usage/create.html#description flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--info', '--dry-run') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) insert_logging_mock(logging.INFO) module.create_archive( dry_run=True, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, stats=True, ) def test_create_archive_with_checkpoint_interval_calls_borg_with_checkpoint_interval_parameters(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--checkpoint-interval', '600') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={'checkpoint_interval': 600}, ) def test_create_archive_with_chunker_params_calls_borg_with_chunker_params_parameters(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--chunker-params', '1,2,3,4') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={'chunker_params': '1,2,3,4'}, ) def test_create_archive_with_compression_calls_borg_with_compression_parameters(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--compression', 'rle') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={'compression': 'rle'}, ) def test_create_archive_with_remote_rate_limit_calls_borg_with_remote_ratelimit_parameters(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--remote-ratelimit', '100') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={'remote_rate_limit': 100}, ) def test_create_archive_with_one_file_system_calls_borg_with_one_file_system_parameter(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--one-file-system') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'one_file_system': True, 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_numeric_owner_calls_borg_with_numeric_owner_parameter(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--numeric-owner') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'numeric_owner': True, 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_read_special_calls_borg_with_read_special_parameter(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--read-special') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'read_special': True, 'exclude_patterns': None, }, storage_config={}, ) @pytest.mark.parametrize('option_name', ('atime', 'ctime', 'birthtime', 'bsd_flags')) def test_create_archive_with_option_true_calls_borg_without_corresponding_parameter(option_name): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], option_name: True, 'exclude_patterns': None, }, storage_config={}, ) @pytest.mark.parametrize('option_name', ('atime', 'ctime', 'birthtime', 'bsd_flags')) def test_create_archive_with_option_false_calls_borg_with_corresponding_parameter(option_name): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--no' + option_name.replace('_', '')) + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], option_name: False, 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_files_cache_calls_borg_with_files_cache_parameters(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--files-cache', 'ctime,size') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'files_cache': 'ctime,size', 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_local_path_calls_borg_via_local_path(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg1', 'create') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, local_path='borg1', ) def test_create_archive_with_remote_path_calls_borg_with_remote_path_parameters(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--remote-path', 'borg1') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, remote_path='borg1', ) def test_create_archive_with_umask_calls_borg_with_umask_parameters(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--umask', '740') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={'umask': 740}, ) def test_create_archive_with_lock_wait_calls_borg_with_lock_wait_parameters(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--lock-wait', '5') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={'lock_wait': 5}, ) def test_create_archive_with_stats_calls_borg_with_stats_parameter_and_warning_output_log_level(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--stats') + ARCHIVE_WITH_PATHS, output_log_level=logging.WARNING, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, stats=True, ) def test_create_archive_with_stats_and_log_info_calls_borg_with_stats_parameter_and_info_output_log_level(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--info', '--stats') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) insert_logging_mock(logging.INFO) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, stats=True, ) def test_create_archive_with_files_calls_borg_with_list_parameter_and_warning_output_log_level(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--list', '--filter', 'AME-') + ARCHIVE_WITH_PATHS, output_log_level=logging.WARNING, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, files=True, ) def test_create_archive_with_files_and_log_info_calls_borg_with_list_parameter_and_info_output_log_level(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--list', '--filter', 'AME-', '--info') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) insert_logging_mock(logging.INFO) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, files=True, ) def test_create_archive_with_progress_and_log_info_calls_borg_with_progress_parameter_and_no_list(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command_without_capture').with_args( ('borg', 'create', '--info', '--progress') + ARCHIVE_WITH_PATHS, error_on_warnings=False ) insert_logging_mock(logging.INFO) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, progress=True, ) def test_create_archive_with_progress_calls_borg_with_progress_parameter(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command_without_capture').with_args( ('borg', 'create', '--progress') + ARCHIVE_WITH_PATHS, error_on_warnings=False ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, progress=True, ) def test_create_archive_with_json_calls_borg_with_json_parameter(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--json') + ARCHIVE_WITH_PATHS, output_log_level=None, error_on_warnings=False, ).and_return('[]') json_output = module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, json=True, ) assert json_output == '[]' def test_create_archive_with_stats_and_json_calls_borg_without_stats_parameter(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--json') + ARCHIVE_WITH_PATHS, output_log_level=None, error_on_warnings=False, ).and_return('[]') json_output = module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, json=True, stats=True, ) assert json_output == '[]' def test_create_archive_with_source_directories_glob_expands(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'food')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo', 'food'), output_log_level=logging.INFO, error_on_warnings=False, ) flexmock(module.glob).should_receive('glob').with_args('foo*').and_return(['foo', 'food']) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo*'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_non_matching_source_directories_glob_passes_through(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo*',)) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo*'), output_log_level=logging.INFO, error_on_warnings=False, ) flexmock(module.glob).should_receive('glob').with_args('foo*').and_return([]) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo*'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_glob_calls_borg_with_expanded_directories(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'food')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', 'repo::{}'.format(DEFAULT_ARCHIVE_NAME), 'foo', 'food'), output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo*'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={}, ) def test_create_archive_with_archive_name_format_calls_borg_with_archive_name(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', 'repo::ARCHIVE_NAME', 'foo', 'bar'), output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={'archive_name_format': 'ARCHIVE_NAME'}, ) def test_create_archive_with_archive_name_format_accepts_borg_placeholders(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', 'repo::Documents_{hostname}-{now}', 'foo', 'bar'), output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={'archive_name_format': 'Documents_{hostname}-{now}'}, ) def test_create_archive_with_extra_borg_options_calls_borg_with_extra_options(): flexmock(module).should_receive('borgmatic_source_directories').and_return([]) flexmock(module).should_receive('_expand_directories').and_return(('foo', 'bar')) flexmock(module).should_receive('_expand_home_directories').and_return(()) flexmock(module).should_receive('_write_pattern_file').and_return(None) flexmock(module).should_receive('_make_pattern_flags').and_return(()) flexmock(module).should_receive('_make_exclude_flags').and_return(()) flexmock(module).should_receive('execute_command').with_args( ('borg', 'create', '--extra', '--options') + ARCHIVE_WITH_PATHS, output_log_level=logging.INFO, error_on_warnings=False, ) module.create_archive( dry_run=False, repository='repo', location_config={ 'source_directories': ['foo', 'bar'], 'repositories': ['repo'], 'exclude_patterns': None, }, storage_config={'extra_borg_options': {'create': '--extra --options'}}, ) borgmatic-1.5.1/tests/unit/borg/test_environment.py000066400000000000000000000032671361605604600225330ustar00rootroot00000000000000import os from borgmatic.borg import environment as module def test_initialize_with_passcommand_should_set_environment(): orig_environ = os.environ try: os.environ = {} module.initialize({'encryption_passcommand': 'command'}) assert os.environ.get('BORG_PASSCOMMAND') == 'command' finally: os.environ = orig_environ def test_initialize_with_passphrase_should_set_environment(): orig_environ = os.environ try: os.environ = {} module.initialize({'encryption_passphrase': 'pass'}) assert os.environ.get('BORG_PASSPHRASE') == 'pass' finally: os.environ = orig_environ def test_initialize_with_ssh_command_should_set_environment(): orig_environ = os.environ try: os.environ = {} module.initialize({'ssh_command': 'ssh -C'}) assert os.environ.get('BORG_RSH') == 'ssh -C' finally: os.environ = orig_environ def test_initialize_without_configuration_should_only_set_default_environment(): orig_environ = os.environ try: os.environ = {} module.initialize({}) assert {key: value for key, value in os.environ.items() if key.startswith('BORG_')} == { 'BORG_RELOCATED_REPO_ACCESS_IS_OK': 'no', 'BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK': 'no', } finally: os.environ = orig_environ def test_initialize_with_relocated_repo_access_should_override_default(): orig_environ = os.environ try: os.environ = {} module.initialize({'relocated_repo_access_is_ok': True}) assert os.environ.get('BORG_RELOCATED_REPO_ACCESS_IS_OK') == 'yes' finally: os.environ = orig_environ borgmatic-1.5.1/tests/unit/borg/test_extract.py000066400000000000000000000203571361605604600216400ustar00rootroot00000000000000import logging from flexmock import flexmock from borgmatic.borg import extract as module from ..test_verbosity import insert_logging_mock def insert_execute_command_mock(command, working_directory=None, error_on_warnings=True): flexmock(module).should_receive('execute_command').with_args( command, working_directory=working_directory, error_on_warnings=error_on_warnings ).once() def insert_execute_command_output_mock(command, result): flexmock(module).should_receive('execute_command').with_args( command, output_log_level=None, error_on_warnings=False ).and_return(result).once() def test_extract_last_archive_dry_run_calls_borg_with_last_archive(): insert_execute_command_output_mock( ('borg', 'list', '--short', 'repo'), result='archive1\narchive2\n' ) insert_execute_command_mock(('borg', 'extract', '--dry-run', 'repo::archive2')) module.extract_last_archive_dry_run(repository='repo', lock_wait=None) def test_extract_last_archive_dry_run_without_any_archives_should_not_raise(): insert_execute_command_output_mock(('borg', 'list', '--short', 'repo'), result='\n') module.extract_last_archive_dry_run(repository='repo', lock_wait=None) def test_extract_last_archive_dry_run_with_log_info_calls_borg_with_info_parameter(): insert_execute_command_output_mock( ('borg', 'list', '--short', '--info', 'repo'), result='archive1\narchive2\n' ) insert_execute_command_mock(('borg', 'extract', '--dry-run', '--info', 'repo::archive2')) insert_logging_mock(logging.INFO) module.extract_last_archive_dry_run(repository='repo', lock_wait=None) def test_extract_last_archive_dry_run_with_log_debug_calls_borg_with_debug_parameter(): insert_execute_command_output_mock( ('borg', 'list', '--short', '--debug', '--show-rc', 'repo'), result='archive1\narchive2\n' ) insert_execute_command_mock( ('borg', 'extract', '--dry-run', '--debug', '--show-rc', '--list', 'repo::archive2') ) insert_logging_mock(logging.DEBUG) module.extract_last_archive_dry_run(repository='repo', lock_wait=None) def test_extract_last_archive_dry_run_calls_borg_via_local_path(): insert_execute_command_output_mock( ('borg1', 'list', '--short', 'repo'), result='archive1\narchive2\n' ) insert_execute_command_mock(('borg1', 'extract', '--dry-run', 'repo::archive2')) module.extract_last_archive_dry_run(repository='repo', lock_wait=None, local_path='borg1') def test_extract_last_archive_dry_run_calls_borg_with_remote_path_parameters(): insert_execute_command_output_mock( ('borg', 'list', '--short', '--remote-path', 'borg1', 'repo'), result='archive1\narchive2\n' ) insert_execute_command_mock( ('borg', 'extract', '--dry-run', '--remote-path', 'borg1', 'repo::archive2') ) module.extract_last_archive_dry_run(repository='repo', lock_wait=None, remote_path='borg1') def test_extract_last_archive_dry_run_calls_borg_with_lock_wait_parameters(): insert_execute_command_output_mock( ('borg', 'list', '--short', '--lock-wait', '5', 'repo'), result='archive1\narchive2\n' ) insert_execute_command_mock( ('borg', 'extract', '--dry-run', '--lock-wait', '5', 'repo::archive2') ) module.extract_last_archive_dry_run(repository='repo', lock_wait=5) def test_extract_archive_calls_borg_with_path_parameters(): flexmock(module.os.path).should_receive('abspath').and_return('repo') insert_execute_command_mock(('borg', 'extract', 'repo::archive', 'path1', 'path2')) module.extract_archive( dry_run=False, repository='repo', archive='archive', paths=['path1', 'path2'], location_config={}, storage_config={}, ) def test_extract_archive_calls_borg_with_remote_path_parameters(): flexmock(module.os.path).should_receive('abspath').and_return('repo') insert_execute_command_mock(('borg', 'extract', '--remote-path', 'borg1', 'repo::archive')) module.extract_archive( dry_run=False, repository='repo', archive='archive', paths=None, location_config={}, storage_config={}, remote_path='borg1', ) def test_extract_archive_calls_borg_with_numeric_owner_parameter(): flexmock(module.os.path).should_receive('abspath').and_return('repo') insert_execute_command_mock(('borg', 'extract', '--numeric-owner', 'repo::archive')) module.extract_archive( dry_run=False, repository='repo', archive='archive', paths=None, location_config={'numeric_owner': True}, storage_config={}, ) def test_extract_archive_calls_borg_with_umask_parameters(): flexmock(module.os.path).should_receive('abspath').and_return('repo') insert_execute_command_mock(('borg', 'extract', '--umask', '0770', 'repo::archive')) module.extract_archive( dry_run=False, repository='repo', archive='archive', paths=None, location_config={}, storage_config={'umask': '0770'}, ) def test_extract_archive_calls_borg_with_lock_wait_parameters(): flexmock(module.os.path).should_receive('abspath').and_return('repo') insert_execute_command_mock(('borg', 'extract', '--lock-wait', '5', 'repo::archive')) module.extract_archive( dry_run=False, repository='repo', archive='archive', paths=None, location_config={}, storage_config={'lock_wait': '5'}, ) def test_extract_archive_with_log_info_calls_borg_with_info_parameter(): flexmock(module.os.path).should_receive('abspath').and_return('repo') insert_execute_command_mock(('borg', 'extract', '--info', 'repo::archive')) insert_logging_mock(logging.INFO) module.extract_archive( dry_run=False, repository='repo', archive='archive', paths=None, location_config={}, storage_config={}, ) def test_extract_archive_with_log_debug_calls_borg_with_debug_parameters(): flexmock(module.os.path).should_receive('abspath').and_return('repo') insert_execute_command_mock( ('borg', 'extract', '--debug', '--list', '--show-rc', 'repo::archive') ) insert_logging_mock(logging.DEBUG) module.extract_archive( dry_run=False, repository='repo', archive='archive', paths=None, location_config={}, storage_config={}, ) def test_extract_archive_calls_borg_with_dry_run_parameter(): flexmock(module.os.path).should_receive('abspath').and_return('repo') insert_execute_command_mock(('borg', 'extract', '--dry-run', 'repo::archive')) module.extract_archive( dry_run=True, repository='repo', archive='archive', paths=None, location_config={}, storage_config={}, ) def test_extract_archive_calls_borg_with_destination_path(): flexmock(module.os.path).should_receive('abspath').and_return('repo') insert_execute_command_mock(('borg', 'extract', 'repo::archive'), working_directory='/dest') module.extract_archive( dry_run=False, repository='repo', archive='archive', paths=None, location_config={}, storage_config={}, destination_path='/dest', ) def test_extract_archive_calls_borg_with_progress_parameter(): flexmock(module.os.path).should_receive('abspath').and_return('repo') flexmock(module).should_receive('execute_command_without_capture').with_args( ('borg', 'extract', '--progress', 'repo::archive'), working_directory=None, error_on_warnings=True, ).once() module.extract_archive( dry_run=False, repository='repo', archive='archive', paths=None, location_config={}, storage_config={}, progress=True, ) def test_extract_archive_skips_abspath_for_remote_repository(): flexmock(module.os.path).should_receive('abspath').never() flexmock(module).should_receive('execute_command').with_args( ('borg', 'extract', 'server:repo::archive'), working_directory=None, error_on_warnings=True ).once() module.extract_archive( dry_run=False, repository='server:repo', archive='archive', paths=None, location_config={}, storage_config={}, ) borgmatic-1.5.1/tests/unit/borg/test_flags.py000066400000000000000000000031741361605604600212600ustar00rootroot00000000000000from flexmock import flexmock from borgmatic.borg import flags as module def test_make_flags_formats_string_value(): assert module.make_flags('foo', 'bar') == ('--foo', 'bar') def test_make_flags_formats_integer_value(): assert module.make_flags('foo', 3) == ('--foo', '3') def test_make_flags_formats_true_value(): assert module.make_flags('foo', True) == ('--foo',) def test_make_flags_omits_false_value(): assert module.make_flags('foo', False) == () def test_make_flags_formats_name_with_underscore(): assert module.make_flags('posix_me_harder', 'okay') == ('--posix-me-harder', 'okay') def test_make_flags_from_arguments_flattens_and_sorts_multiple_arguments(): flexmock(module).should_receive('make_flags').with_args('foo', 'bar').and_return(('foo', 'bar')) flexmock(module).should_receive('make_flags').with_args('baz', 'quux').and_return( ('baz', 'quux') ) arguments = flexmock(foo='bar', baz='quux') assert module.make_flags_from_arguments(arguments) == ('baz', 'quux', 'foo', 'bar') def test_make_flags_from_arguments_excludes_underscored_argument_names(): flexmock(module).should_receive('make_flags').with_args('foo', 'bar').and_return(('foo', 'bar')) arguments = flexmock(foo='bar', _baz='quux') assert module.make_flags_from_arguments(arguments) == ('foo', 'bar') def test_make_flags_from_arguments_omits_excludes(): flexmock(module).should_receive('make_flags').with_args('foo', 'bar').and_return(('foo', 'bar')) arguments = flexmock(foo='bar', baz='quux') assert module.make_flags_from_arguments(arguments, excludes=('baz', 'other')) == ('foo', 'bar') borgmatic-1.5.1/tests/unit/borg/test_info.py000066400000000000000000000121611361605604600211130ustar00rootroot00000000000000import logging import pytest from flexmock import flexmock from borgmatic.borg import info as module from ..test_verbosity import insert_logging_mock def test_display_archives_info_calls_borg_with_parameters(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False ) module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive=None, json=False) ) def test_display_archives_info_with_log_info_calls_borg_with_info_parameter(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', '--info', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ) insert_logging_mock(logging.INFO) module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive=None, json=False) ) def test_display_archives_info_with_log_info_and_json_suppresses_most_borg_output(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', '--json', 'repo'), output_log_level=None, error_on_warnings=False ).and_return('[]') insert_logging_mock(logging.INFO) json_output = module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive=None, json=True) ) assert json_output == '[]' def test_display_archives_info_with_log_debug_calls_borg_with_debug_parameter(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', '--debug', '--show-rc', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ) insert_logging_mock(logging.DEBUG) module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive=None, json=False) ) def test_display_archives_info_with_log_debug_and_json_suppresses_most_borg_output(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', '--json', 'repo'), output_log_level=None, error_on_warnings=False ).and_return('[]') insert_logging_mock(logging.DEBUG) json_output = module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive=None, json=True) ) assert json_output == '[]' def test_display_archives_info_with_json_calls_borg_with_json_parameter(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', '--json', 'repo'), output_log_level=None, error_on_warnings=False ).and_return('[]') json_output = module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive=None, json=True) ) assert json_output == '[]' def test_display_archives_info_with_archive_calls_borg_with_archive_parameter(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', 'repo::archive'), output_log_level=logging.WARNING, error_on_warnings=False ) module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive='archive', json=False) ) def test_display_archives_info_with_local_path_calls_borg_via_local_path(): flexmock(module).should_receive('execute_command').with_args( ('borg1', 'info', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False ) module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive=None, json=False), local_path='borg1', ) def test_display_archives_info_with_remote_path_calls_borg_with_remote_path_parameters(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', '--remote-path', 'borg1', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ) module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive=None, json=False), remote_path='borg1', ) def test_display_archives_info_with_lock_wait_calls_borg_with_lock_wait_parameters(): storage_config = {'lock_wait': 5} flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', '--lock-wait', '5', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ) module.display_archives_info( repository='repo', storage_config=storage_config, info_arguments=flexmock(archive=None, json=False), ) @pytest.mark.parametrize('argument_name', ('prefix', 'glob_archives', 'sort_by', 'first', 'last')) def test_display_archives_info_passes_through_arguments_to_borg(argument_name): flexmock(module).should_receive('execute_command').with_args( ('borg', 'info', '--' + argument_name.replace('_', '-'), 'value', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ) module.display_archives_info( repository='repo', storage_config={}, info_arguments=flexmock(archive=None, json=False, **{argument_name: 'value'}), ) borgmatic-1.5.1/tests/unit/borg/test_init.py000066400000000000000000000106771361605604600211350ustar00rootroot00000000000000import logging import subprocess import pytest from flexmock import flexmock from borgmatic.borg import init as module from ..test_verbosity import insert_logging_mock INFO_SOME_UNKNOWN_EXIT_CODE = -999 INIT_COMMAND = ('borg', 'init', '--encryption', 'repokey') def insert_info_command_found_mock(): flexmock(module).should_receive('execute_command') def insert_info_command_not_found_mock(): flexmock(module).should_receive('execute_command').and_raise( subprocess.CalledProcessError(module.INFO_REPOSITORY_NOT_FOUND_EXIT_CODE, []) ) def insert_init_command_mock(init_command, **kwargs): flexmock(module).should_receive('execute_command_without_capture').with_args( init_command, error_on_warnings=False ).once() def test_initialize_repository_calls_borg_with_parameters(): insert_info_command_not_found_mock() insert_init_command_mock(INIT_COMMAND + ('repo',)) module.initialize_repository(repository='repo', storage_config={}, encryption_mode='repokey') def test_initialize_repository_raises_for_borg_init_error(): insert_info_command_not_found_mock() flexmock(module).should_receive('execute_command_without_capture').and_raise( module.subprocess.CalledProcessError(2, 'borg init') ) with pytest.raises(subprocess.CalledProcessError): module.initialize_repository( repository='repo', storage_config={}, encryption_mode='repokey' ) def test_initialize_repository_skips_initialization_when_repository_already_exists(): insert_info_command_found_mock() flexmock(module).should_receive('execute_command_without_capture').never() module.initialize_repository(repository='repo', storage_config={}, encryption_mode='repokey') def test_initialize_repository_raises_for_unknown_info_command_error(): flexmock(module).should_receive('execute_command').and_raise( subprocess.CalledProcessError(INFO_SOME_UNKNOWN_EXIT_CODE, []) ) with pytest.raises(subprocess.CalledProcessError): module.initialize_repository( repository='repo', storage_config={}, encryption_mode='repokey' ) def test_initialize_repository_with_append_only_calls_borg_with_append_only_parameter(): insert_info_command_not_found_mock() insert_init_command_mock(INIT_COMMAND + ('--append-only', 'repo')) module.initialize_repository( repository='repo', storage_config={}, encryption_mode='repokey', append_only=True ) def test_initialize_repository_with_storage_quota_calls_borg_with_storage_quota_parameter(): insert_info_command_not_found_mock() insert_init_command_mock(INIT_COMMAND + ('--storage-quota', '5G', 'repo')) module.initialize_repository( repository='repo', storage_config={}, encryption_mode='repokey', storage_quota='5G' ) def test_initialize_repository_with_log_info_calls_borg_with_info_parameter(): insert_info_command_not_found_mock() insert_init_command_mock(INIT_COMMAND + ('--info', 'repo')) insert_logging_mock(logging.INFO) module.initialize_repository(repository='repo', storage_config={}, encryption_mode='repokey') def test_initialize_repository_with_log_debug_calls_borg_with_debug_parameter(): insert_info_command_not_found_mock() insert_init_command_mock(INIT_COMMAND + ('--debug', 'repo')) insert_logging_mock(logging.DEBUG) module.initialize_repository(repository='repo', storage_config={}, encryption_mode='repokey') def test_initialize_repository_with_local_path_calls_borg_via_local_path(): insert_info_command_not_found_mock() insert_init_command_mock(('borg1',) + INIT_COMMAND[1:] + ('repo',)) module.initialize_repository( repository='repo', storage_config={}, encryption_mode='repokey', local_path='borg1' ) def test_initialize_repository_with_remote_path_calls_borg_with_remote_path_parameter(): insert_info_command_not_found_mock() insert_init_command_mock(INIT_COMMAND + ('--remote-path', 'borg1', 'repo')) module.initialize_repository( repository='repo', storage_config={}, encryption_mode='repokey', remote_path='borg1' ) def test_initialize_repository_with_extra_borg_options_calls_borg_with_extra_options(): insert_info_command_not_found_mock() insert_init_command_mock(INIT_COMMAND + ('--extra', '--options', 'repo')) module.initialize_repository( repository='repo', storage_config={'extra_borg_options': {'init': '--extra --options'}}, encryption_mode='repokey', ) borgmatic-1.5.1/tests/unit/borg/test_list.py000066400000000000000000000247771361605604600211530ustar00rootroot00000000000000import logging import pytest from flexmock import flexmock from borgmatic.borg import list as module from ..test_verbosity import insert_logging_mock BORG_LIST_LATEST_ARGUMENTS = ( '--glob-archives', module.BORG_EXCLUDE_CHECKPOINTS_GLOB, '--last', '1', '--short', 'repo', ) def test_resolve_archive_name_passes_through_non_latest_archive_name(): archive = 'myhost-2030-01-01T14:41:17.647620' assert module.resolve_archive_name('repo', archive, storage_config={}) == archive def test_resolve_archive_name_calls_borg_with_parameters(): expected_archive = 'archive-name' flexmock(module).should_receive('execute_command').with_args( ('borg', 'list') + BORG_LIST_LATEST_ARGUMENTS, output_log_level=None, error_on_warnings=False, ).and_return(expected_archive + '\n') assert module.resolve_archive_name('repo', 'latest', storage_config={}) == expected_archive def test_resolve_archive_name_with_log_info_calls_borg_with_info_parameter(): expected_archive = 'archive-name' flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--info') + BORG_LIST_LATEST_ARGUMENTS, output_log_level=None, error_on_warnings=False, ).and_return(expected_archive + '\n') insert_logging_mock(logging.INFO) assert module.resolve_archive_name('repo', 'latest', storage_config={}) == expected_archive def test_resolve_archive_name_with_log_debug_calls_borg_with_debug_parameter(): expected_archive = 'archive-name' flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--debug', '--show-rc') + BORG_LIST_LATEST_ARGUMENTS, output_log_level=None, error_on_warnings=False, ).and_return(expected_archive + '\n') insert_logging_mock(logging.DEBUG) assert module.resolve_archive_name('repo', 'latest', storage_config={}) == expected_archive def test_resolve_archive_name_with_local_path_calls_borg_via_local_path(): expected_archive = 'archive-name' flexmock(module).should_receive('execute_command').with_args( ('borg1', 'list') + BORG_LIST_LATEST_ARGUMENTS, output_log_level=None, error_on_warnings=False, ).and_return(expected_archive + '\n') assert ( module.resolve_archive_name('repo', 'latest', storage_config={}, local_path='borg1') == expected_archive ) def test_resolve_archive_name_with_remote_path_calls_borg_with_remote_path_parameters(): expected_archive = 'archive-name' flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--remote-path', 'borg1') + BORG_LIST_LATEST_ARGUMENTS, output_log_level=None, error_on_warnings=False, ).and_return(expected_archive + '\n') assert ( module.resolve_archive_name('repo', 'latest', storage_config={}, remote_path='borg1') == expected_archive ) def test_resolve_archive_name_without_archives_raises(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list') + BORG_LIST_LATEST_ARGUMENTS, output_log_level=None, error_on_warnings=False, ).and_return('') with pytest.raises(ValueError): module.resolve_archive_name('repo', 'latest', storage_config={}) def test_resolve_archive_name_with_lock_wait_calls_borg_with_lock_wait_parameters(): expected_archive = 'archive-name' flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--lock-wait', 'okay') + BORG_LIST_LATEST_ARGUMENTS, output_log_level=None, error_on_warnings=False, ).and_return(expected_archive + '\n') assert ( module.resolve_archive_name('repo', 'latest', storage_config={'lock_wait': 'okay'}) == expected_archive ) def test_list_archives_calls_borg_with_parameters(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False ) module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=False, successful=False), ) def test_list_archives_with_log_info_calls_borg_with_info_parameter(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--info', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ) insert_logging_mock(logging.INFO) module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=False, successful=False), ) def test_list_archives_with_log_info_and_json_suppresses_most_borg_output(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--json', 'repo'), output_log_level=None, error_on_warnings=False ) insert_logging_mock(logging.INFO) module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=True, successful=False), ) def test_list_archives_with_log_debug_calls_borg_with_debug_parameter(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--debug', '--show-rc', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ) insert_logging_mock(logging.DEBUG) module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=False, successful=False), ) def test_list_archives_with_log_debug_and_json_suppresses_most_borg_output(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--json', 'repo'), output_log_level=None, error_on_warnings=False ) insert_logging_mock(logging.DEBUG) module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=True, successful=False), ) def test_list_archives_with_lock_wait_calls_borg_with_lock_wait_parameters(): storage_config = {'lock_wait': 5} flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--lock-wait', '5', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ) module.list_archives( repository='repo', storage_config=storage_config, list_arguments=flexmock(archive=None, paths=None, json=False, successful=False), ) def test_list_archives_with_archive_calls_borg_with_archive_parameter(): storage_config = {} flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', 'repo::archive'), output_log_level=logging.WARNING, error_on_warnings=False ) module.list_archives( repository='repo', storage_config=storage_config, list_arguments=flexmock(archive='archive', paths=None, json=False, successful=False), ) def test_list_archives_with_path_calls_borg_with_path_parameter(): storage_config = {} flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', 'repo::archive', 'var/lib'), output_log_level=logging.WARNING, error_on_warnings=False, ) module.list_archives( repository='repo', storage_config=storage_config, list_arguments=flexmock(archive='archive', paths=['var/lib'], json=False, successful=False), ) def test_list_archives_with_local_path_calls_borg_via_local_path(): flexmock(module).should_receive('execute_command').with_args( ('borg1', 'list', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False ) module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=False, successful=False), local_path='borg1', ) def test_list_archives_with_remote_path_calls_borg_with_remote_path_parameters(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--remote-path', 'borg1', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ) module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=False, successful=False), remote_path='borg1', ) def test_list_archives_with_short_calls_borg_with_short_parameter(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--short', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ).and_return('[]') module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=False, successful=False, short=True), ) @pytest.mark.parametrize( 'argument_name', ( 'prefix', 'glob_archives', 'sort_by', 'first', 'last', 'exclude', 'exclude_from', 'pattern', 'patterns_from', ), ) def test_list_archives_passes_through_arguments_to_borg(argument_name): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--' + argument_name.replace('_', '-'), 'value', 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ).and_return('[]') module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock( archive=None, paths=None, json=False, successful=False, **{argument_name: 'value'} ), ) def test_list_archives_with_successful_calls_borg_to_exclude_checkpoints(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--glob-archives', module.BORG_EXCLUDE_CHECKPOINTS_GLOB, 'repo'), output_log_level=logging.WARNING, error_on_warnings=False, ).and_return('[]') module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=False, successful=True), ) def test_list_archives_with_json_calls_borg_with_json_parameter(): flexmock(module).should_receive('execute_command').with_args( ('borg', 'list', '--json', 'repo'), output_log_level=None, error_on_warnings=False ).and_return('[]') json_output = module.list_archives( repository='repo', storage_config={}, list_arguments=flexmock(archive=None, paths=None, json=True, successful=False), ) assert json_output == '[]' borgmatic-1.5.1/tests/unit/borg/test_mount.py000066400000000000000000000074661361605604600213360ustar00rootroot00000000000000import logging from flexmock import flexmock from borgmatic.borg import mount as module from ..test_verbosity import insert_logging_mock def insert_execute_command_mock(command): flexmock(module).should_receive('execute_command').with_args( command, error_on_warnings=False ).once() def test_mount_archive_calls_borg_with_required_parameters(): insert_execute_command_mock(('borg', 'mount', 'repo::archive', '/mnt')) module.mount_archive( repository='repo', archive='archive', mount_point='/mnt', paths=None, foreground=False, options=None, storage_config={}, ) def test_mount_archive_calls_borg_with_path_parameters(): insert_execute_command_mock(('borg', 'mount', 'repo::archive', '/mnt', 'path1', 'path2')) module.mount_archive( repository='repo', archive='archive', mount_point='/mnt', paths=['path1', 'path2'], foreground=False, options=None, storage_config={}, ) def test_mount_archive_calls_borg_with_remote_path_parameters(): insert_execute_command_mock( ('borg', 'mount', '--remote-path', 'borg1', 'repo::archive', '/mnt') ) module.mount_archive( repository='repo', archive='archive', mount_point='/mnt', paths=None, foreground=False, options=None, storage_config={}, remote_path='borg1', ) def test_mount_archive_calls_borg_with_umask_parameters(): insert_execute_command_mock(('borg', 'mount', '--umask', '0770', 'repo::archive', '/mnt')) module.mount_archive( repository='repo', archive='archive', mount_point='/mnt', paths=None, foreground=False, options=None, storage_config={'umask': '0770'}, ) def test_mount_archive_calls_borg_with_lock_wait_parameters(): insert_execute_command_mock(('borg', 'mount', '--lock-wait', '5', 'repo::archive', '/mnt')) module.mount_archive( repository='repo', archive='archive', mount_point='/mnt', paths=None, foreground=False, options=None, storage_config={'lock_wait': '5'}, ) def test_mount_archive_with_log_info_calls_borg_with_info_parameter(): insert_execute_command_mock(('borg', 'mount', '--info', 'repo::archive', '/mnt')) insert_logging_mock(logging.INFO) module.mount_archive( repository='repo', archive='archive', mount_point='/mnt', paths=None, foreground=False, options=None, storage_config={}, ) def test_mount_archive_with_log_debug_calls_borg_with_debug_parameters(): insert_execute_command_mock(('borg', 'mount', '--debug', '--show-rc', 'repo::archive', '/mnt')) insert_logging_mock(logging.DEBUG) module.mount_archive( repository='repo', archive='archive', mount_point='/mnt', paths=None, foreground=False, options=None, storage_config={}, ) def test_mount_archive_calls_borg_with_foreground_parameter(): flexmock(module).should_receive('execute_command_without_capture').with_args( ('borg', 'mount', '--foreground', 'repo::archive', '/mnt'), error_on_warnings=False ).once() module.mount_archive( repository='repo', archive='archive', mount_point='/mnt', paths=None, foreground=True, options=None, storage_config={}, ) def test_mount_archive_calls_borg_with_options_parameters(): insert_execute_command_mock(('borg', 'mount', '-o', 'super_mount', 'repo::archive', '/mnt')) module.mount_archive( repository='repo', archive='archive', mount_point='/mnt', paths=None, foreground=False, options='super_mount', storage_config={}, ) borgmatic-1.5.1/tests/unit/borg/test_prune.py000066400000000000000000000204651361605604600213170ustar00rootroot00000000000000import logging from collections import OrderedDict from flexmock import flexmock from borgmatic.borg import prune as module from ..test_verbosity import insert_logging_mock def insert_execute_command_mock(prune_command, output_log_level): flexmock(module).should_receive('execute_command').with_args( prune_command, output_log_level=output_log_level, error_on_warnings=False ).once() BASE_PRUNE_FLAGS = (('--keep-daily', '1'), ('--keep-weekly', '2'), ('--keep-monthly', '3')) def test_make_prune_flags_returns_flags_from_config_plus_default_prefix(): retention_config = OrderedDict((('keep_daily', 1), ('keep_weekly', 2), ('keep_monthly', 3))) result = module._make_prune_flags(retention_config) assert tuple(result) == BASE_PRUNE_FLAGS + (('--prefix', '{hostname}-'),) def test_make_prune_flags_accepts_prefix_with_placeholders(): retention_config = OrderedDict((('keep_daily', 1), ('prefix', 'Documents_{hostname}-{now}'))) result = module._make_prune_flags(retention_config) expected = (('--keep-daily', '1'), ('--prefix', 'Documents_{hostname}-{now}')) assert tuple(result) == expected def test_make_prune_flags_treats_empty_prefix_as_no_prefix(): retention_config = OrderedDict((('keep_daily', 1), ('prefix', ''))) result = module._make_prune_flags(retention_config) expected = (('--keep-daily', '1'),) assert tuple(result) == expected def test_make_prune_flags_treats_none_prefix_as_no_prefix(): retention_config = OrderedDict((('keep_daily', 1), ('prefix', None))) result = module._make_prune_flags(retention_config) expected = (('--keep-daily', '1'),) assert tuple(result) == expected PRUNE_COMMAND = ('borg', 'prune', '--keep-daily', '1', '--keep-weekly', '2', '--keep-monthly', '3') def test_prune_archives_calls_borg_with_parameters(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('repo',), logging.INFO) module.prune_archives( dry_run=False, repository='repo', storage_config={}, retention_config=retention_config ) def test_prune_archives_with_log_info_calls_borg_with_info_parameter(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('--info', 'repo'), logging.INFO) insert_logging_mock(logging.INFO) module.prune_archives( repository='repo', storage_config={}, dry_run=False, retention_config=retention_config ) def test_prune_archives_with_log_debug_calls_borg_with_debug_parameter(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('--debug', '--show-rc', 'repo'), logging.INFO) insert_logging_mock(logging.DEBUG) module.prune_archives( repository='repo', storage_config={}, dry_run=False, retention_config=retention_config ) def test_prune_archives_with_dry_run_calls_borg_with_dry_run_parameter(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('--dry-run', 'repo'), logging.INFO) module.prune_archives( repository='repo', storage_config={}, dry_run=True, retention_config=retention_config ) def test_prune_archives_with_local_path_calls_borg_via_local_path(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(('borg1',) + PRUNE_COMMAND[1:] + ('repo',), logging.INFO) module.prune_archives( dry_run=False, repository='repo', storage_config={}, retention_config=retention_config, local_path='borg1', ) def test_prune_archives_with_remote_path_calls_borg_with_remote_path_parameters(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('--remote-path', 'borg1', 'repo'), logging.INFO) module.prune_archives( dry_run=False, repository='repo', storage_config={}, retention_config=retention_config, remote_path='borg1', ) def test_prune_archives_with_stats_calls_borg_with_stats_parameter_and_warning_output_log_level(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('--stats', 'repo'), logging.WARNING) module.prune_archives( dry_run=False, repository='repo', storage_config={}, retention_config=retention_config, stats=True, ) def test_prune_archives_with_stats_and_log_info_calls_borg_with_stats_parameter_and_info_output_log_level(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_logging_mock(logging.INFO) insert_execute_command_mock(PRUNE_COMMAND + ('--stats', '--info', 'repo'), logging.INFO) module.prune_archives( dry_run=False, repository='repo', storage_config={}, retention_config=retention_config, stats=True, ) def test_prune_archives_with_files_calls_borg_with_list_parameter_and_warning_output_log_level(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('--list', 'repo'), logging.WARNING) module.prune_archives( dry_run=False, repository='repo', storage_config={}, retention_config=retention_config, files=True, ) def test_prune_archives_with_files_and_log_info_calls_borg_with_list_parameter_and_info_output_log_level(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_logging_mock(logging.INFO) insert_execute_command_mock(PRUNE_COMMAND + ('--info', '--list', 'repo'), logging.INFO) module.prune_archives( dry_run=False, repository='repo', storage_config={}, retention_config=retention_config, files=True, ) def test_prune_archives_with_umask_calls_borg_with_umask_parameters(): storage_config = {'umask': '077'} retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('--umask', '077', 'repo'), logging.INFO) module.prune_archives( dry_run=False, repository='repo', storage_config=storage_config, retention_config=retention_config, ) def test_prune_archives_with_lock_wait_calls_borg_with_lock_wait_parameters(): storage_config = {'lock_wait': 5} retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('--lock-wait', '5', 'repo'), logging.INFO) module.prune_archives( dry_run=False, repository='repo', storage_config=storage_config, retention_config=retention_config, ) def test_prune_archives_with_extra_borg_options_calls_borg_with_extra_options(): retention_config = flexmock() flexmock(module).should_receive('_make_prune_flags').with_args(retention_config).and_return( BASE_PRUNE_FLAGS ) insert_execute_command_mock(PRUNE_COMMAND + ('--extra', '--options', 'repo'), logging.INFO) module.prune_archives( dry_run=False, repository='repo', storage_config={'extra_borg_options': {'prune': '--extra --options'}}, retention_config=retention_config, ) borgmatic-1.5.1/tests/unit/borg/test_umount.py000066400000000000000000000016761361605604600215200ustar00rootroot00000000000000import logging from flexmock import flexmock from borgmatic.borg import umount as module from ..test_verbosity import insert_logging_mock def insert_execute_command_mock(command): flexmock(module).should_receive('execute_command').with_args( command, error_on_warnings=True ).once() def test_unmount_archive_calls_borg_with_required_parameters(): insert_execute_command_mock(('borg', 'umount', '/mnt')) module.unmount_archive(mount_point='/mnt') def test_unmount_archive_with_log_info_calls_borg_with_info_parameter(): insert_execute_command_mock(('borg', 'umount', '--info', '/mnt')) insert_logging_mock(logging.INFO) module.unmount_archive(mount_point='/mnt') def test_unmount_archive_with_log_debug_calls_borg_with_debug_parameters(): insert_execute_command_mock(('borg', 'umount', '--debug', '--show-rc', '/mnt')) insert_logging_mock(logging.DEBUG) module.unmount_archive(mount_point='/mnt') borgmatic-1.5.1/tests/unit/commands/000077500000000000000000000000001361605604600174165ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/commands/__init__.py000066400000000000000000000000001361605604600215150ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/commands/test_arguments.py000066400000000000000000000116201361605604600230340ustar00rootroot00000000000000from flexmock import flexmock from borgmatic.commands import arguments as module def test_parse_subparser_arguments_consumes_subparser_arguments_before_subparser_name(): action_namespace = flexmock(foo=True) subparsers = flexmock( choices={ 'action': flexmock(parse_known_args=lambda arguments: (action_namespace, [])), 'other': flexmock(), } ) arguments = module.parse_subparser_arguments(('--foo', 'true', 'action'), subparsers) assert arguments == {'action': action_namespace} def test_parse_subparser_arguments_consumes_subparser_arguments_after_subparser_name(): action_namespace = flexmock(foo=True) subparsers = flexmock( choices={ 'action': flexmock(parse_known_args=lambda arguments: (action_namespace, [])), 'other': flexmock(), } ) arguments = module.parse_subparser_arguments(('action', '--foo', 'true'), subparsers) assert arguments == {'action': action_namespace} def test_parse_subparser_arguments_consumes_subparser_arguments_with_alias(): action_namespace = flexmock(foo=True) action_subparser = flexmock(parse_known_args=lambda arguments: (action_namespace, [])) subparsers = flexmock( choices={ 'action': action_subparser, '-a': action_subparser, 'other': flexmock(), '-o': flexmock(), } ) flexmock(module).SUBPARSER_ALIASES = {'action': ['-a'], 'other': ['-o']} arguments = module.parse_subparser_arguments(('-a', '--foo', 'true'), subparsers) assert arguments == {'action': action_namespace} def test_parse_subparser_arguments_consumes_multiple_subparser_arguments(): action_namespace = flexmock(foo=True) other_namespace = flexmock(bar=3) subparsers = flexmock( choices={ 'action': flexmock( parse_known_args=lambda arguments: (action_namespace, ['--bar', '3']) ), 'other': flexmock(parse_known_args=lambda arguments: (other_namespace, [])), } ) arguments = module.parse_subparser_arguments( ('action', '--foo', 'true', 'other', '--bar', '3'), subparsers ) assert arguments == {'action': action_namespace, 'other': other_namespace} def test_parse_subparser_arguments_applies_default_subparsers(): prune_namespace = flexmock() create_namespace = flexmock(progress=True) check_namespace = flexmock() subparsers = flexmock( choices={ 'prune': flexmock(parse_known_args=lambda arguments: (prune_namespace, ['--progress'])), 'create': flexmock(parse_known_args=lambda arguments: (create_namespace, [])), 'check': flexmock(parse_known_args=lambda arguments: (check_namespace, [])), 'other': flexmock(), } ) arguments = module.parse_subparser_arguments(('--progress'), subparsers) assert arguments == { 'prune': prune_namespace, 'create': create_namespace, 'check': check_namespace, } def test_parse_global_arguments_with_help_does_not_apply_default_subparsers(): global_namespace = flexmock(verbosity='lots') action_namespace = flexmock() top_level_parser = flexmock(parse_args=lambda arguments: global_namespace) subparsers = flexmock( choices={ 'action': flexmock( parse_known_args=lambda arguments: (action_namespace, ['--verbosity', 'lots']) ), 'other': flexmock(), } ) arguments = module.parse_global_arguments( ('--verbosity', 'lots', '--help'), top_level_parser, subparsers ) assert arguments == global_namespace def test_parse_global_arguments_consumes_global_arguments_before_subparser_name(): global_namespace = flexmock(verbosity='lots') action_namespace = flexmock() top_level_parser = flexmock(parse_args=lambda arguments: global_namespace) subparsers = flexmock( choices={ 'action': flexmock( parse_known_args=lambda arguments: (action_namespace, ['--verbosity', 'lots']) ), 'other': flexmock(), } ) arguments = module.parse_global_arguments( ('--verbosity', 'lots', 'action'), top_level_parser, subparsers ) assert arguments == global_namespace def test_parse_global_arguments_consumes_global_arguments_after_subparser_name(): global_namespace = flexmock(verbosity='lots') action_namespace = flexmock() top_level_parser = flexmock(parse_args=lambda arguments: global_namespace) subparsers = flexmock( choices={ 'action': flexmock( parse_known_args=lambda arguments: (action_namespace, ['--verbosity', 'lots']) ), 'other': flexmock(), } ) arguments = module.parse_global_arguments( ('action', '--verbosity', 'lots'), top_level_parser, subparsers ) assert arguments == global_namespace borgmatic-1.5.1/tests/unit/commands/test_borgmatic.py000066400000000000000000000435451361605604600230110ustar00rootroot00000000000000import logging import subprocess from flexmock import flexmock import borgmatic.hooks.command from borgmatic.commands import borgmatic as module def test_run_configuration_runs_actions_for_each_repository(): flexmock(module.borg_environment).should_receive('initialize') expected_results = [flexmock(), flexmock()] flexmock(module).should_receive('run_actions').and_return(expected_results[:1]).and_return( expected_results[1:] ) config = {'location': {'repositories': ['foo', 'bar']}} arguments = {'global': flexmock(monitoring_verbosity=1)} results = list(module.run_configuration('test.yaml', config, arguments)) assert results == expected_results def test_run_configuration_calls_hooks_for_prune_action(): flexmock(module.borg_environment).should_receive('initialize') flexmock(module.command).should_receive('execute_hook').twice() flexmock(module.dispatch).should_receive('call_hooks').at_least().twice() flexmock(module).should_receive('run_actions').and_return([]) config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'prune': flexmock()} list(module.run_configuration('test.yaml', config, arguments)) def test_run_configuration_executes_and_calls_hooks_for_create_action(): flexmock(module.borg_environment).should_receive('initialize') flexmock(module.command).should_receive('execute_hook').twice() flexmock(module.dispatch).should_receive('call_hooks').at_least().twice() flexmock(module).should_receive('run_actions').and_return([]) config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()} list(module.run_configuration('test.yaml', config, arguments)) def test_run_configuration_calls_hooks_for_check_action(): flexmock(module.borg_environment).should_receive('initialize') flexmock(module.command).should_receive('execute_hook').twice() flexmock(module.dispatch).should_receive('call_hooks').at_least().twice() flexmock(module).should_receive('run_actions').and_return([]) config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'check': flexmock()} list(module.run_configuration('test.yaml', config, arguments)) def test_run_configuration_does_not_trigger_hooks_for_list_action(): flexmock(module.borg_environment).should_receive('initialize') flexmock(module.command).should_receive('execute_hook').never() flexmock(module.dispatch).should_receive('call_hooks').never() flexmock(module).should_receive('run_actions').and_return([]) config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'list': flexmock()} list(module.run_configuration('test.yaml', config, arguments)) def test_run_configuration_logs_actions_error(): flexmock(module.borg_environment).should_receive('initialize') flexmock(module.command).should_receive('execute_hook') flexmock(module.dispatch).should_receive('call_hooks') expected_results = [flexmock()] flexmock(module).should_receive('make_error_log_records').and_return(expected_results) flexmock(module).should_receive('run_actions').and_raise(OSError) config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False)} results = list(module.run_configuration('test.yaml', config, arguments)) assert results == expected_results def test_run_configuration_logs_pre_hook_error(): flexmock(module.borg_environment).should_receive('initialize') flexmock(module.command).should_receive('execute_hook').and_raise(OSError).and_return(None) expected_results = [flexmock()] flexmock(module).should_receive('make_error_log_records').and_return(expected_results) flexmock(module).should_receive('run_actions').never() config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()} results = list(module.run_configuration('test.yaml', config, arguments)) assert results == expected_results def test_run_configuration_bails_for_pre_hook_soft_failure(): flexmock(module.borg_environment).should_receive('initialize') error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again') flexmock(module.command).should_receive('execute_hook').and_raise(error).and_return(None) flexmock(module).should_receive('make_error_log_records').never() flexmock(module).should_receive('run_actions').never() config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()} results = list(module.run_configuration('test.yaml', config, arguments)) assert results == [] def test_run_configuration_logs_post_hook_error(): flexmock(module.borg_environment).should_receive('initialize') flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise( OSError ).and_return(None) flexmock(module.dispatch).should_receive('call_hooks') expected_results = [flexmock()] flexmock(module).should_receive('make_error_log_records').and_return(expected_results) flexmock(module).should_receive('run_actions').and_return([]) config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()} results = list(module.run_configuration('test.yaml', config, arguments)) assert results == expected_results def test_run_configuration_bails_for_post_hook_soft_failure(): flexmock(module.borg_environment).should_receive('initialize') error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again') flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise( error ).and_return(None) flexmock(module.dispatch).should_receive('call_hooks') flexmock(module).should_receive('make_error_log_records').never() flexmock(module).should_receive('run_actions').and_return([]) config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()} results = list(module.run_configuration('test.yaml', config, arguments)) assert results == [] def test_run_configuration_logs_on_error_hook_error(): flexmock(module.borg_environment).should_receive('initialize') flexmock(module.command).should_receive('execute_hook').and_raise(OSError) expected_results = [flexmock(), flexmock()] flexmock(module).should_receive('make_error_log_records').and_return( expected_results[:1] ).and_return(expected_results[1:]) flexmock(module).should_receive('run_actions').and_raise(OSError) config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()} results = list(module.run_configuration('test.yaml', config, arguments)) assert results == expected_results def test_run_configuration_bails_for_on_error_hook_soft_failure(): flexmock(module.borg_environment).should_receive('initialize') error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again') flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise(error) expected_results = [flexmock()] flexmock(module).should_receive('make_error_log_records').and_return(expected_results) flexmock(module).should_receive('run_actions').and_raise(OSError) config = {'location': {'repositories': ['foo']}} arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()} results = list(module.run_configuration('test.yaml', config, arguments)) assert results == expected_results def test_load_configurations_collects_parsed_configurations(): configuration = flexmock() other_configuration = flexmock() flexmock(module.validate).should_receive('parse_configuration').and_return( configuration ).and_return(other_configuration) configs, logs = tuple(module.load_configurations(('test.yaml', 'other.yaml'))) assert configs == {'test.yaml': configuration, 'other.yaml': other_configuration} assert logs == [] def test_load_configurations_logs_critical_for_parse_error(): flexmock(module.validate).should_receive('parse_configuration').and_raise(ValueError) configs, logs = tuple(module.load_configurations(('test.yaml',))) assert configs == {} assert {log.levelno for log in logs} == {logging.CRITICAL} def test_log_record_does_not_raise(): module.log_record(levelno=1, foo='bar', baz='quux') def test_log_record_with_suppress_does_not_raise(): module.log_record(levelno=1, foo='bar', baz='quux', suppress_log=True) def test_make_error_log_records_generates_output_logs_for_message_only(): flexmock(module).should_receive('log_record').replace_with(dict) logs = tuple(module.make_error_log_records('Error')) assert {log['levelno'] for log in logs} == {logging.CRITICAL} def test_make_error_log_records_generates_output_logs_for_called_process_error(): flexmock(module).should_receive('log_record').replace_with(dict) flexmock(module.logger).should_receive('getEffectiveLevel').and_return(logging.WARNING) logs = tuple( module.make_error_log_records( 'Error', subprocess.CalledProcessError(1, 'ls', 'error output') ) ) assert {log['levelno'] for log in logs} == {logging.CRITICAL} assert any(log for log in logs if 'error output' in str(log)) def test_make_error_log_records_generates_logs_for_value_error(): flexmock(module).should_receive('log_record').replace_with(dict) logs = tuple(module.make_error_log_records('Error', ValueError())) assert {log['levelno'] for log in logs} == {logging.CRITICAL} def test_make_error_log_records_generates_logs_for_os_error(): flexmock(module).should_receive('log_record').replace_with(dict) logs = tuple(module.make_error_log_records('Error', OSError())) assert {log['levelno'] for log in logs} == {logging.CRITICAL} def test_make_error_log_records_generates_nothing_for_other_error(): flexmock(module).should_receive('log_record').replace_with(dict) logs = tuple(module.make_error_log_records('Error', KeyError())) assert logs == () def test_get_local_path_uses_configuration_value(): assert module.get_local_path({'test.yaml': {'location': {'local_path': 'borg1'}}}) == 'borg1' def test_get_local_path_without_location_defaults_to_borg(): assert module.get_local_path({'test.yaml': {}}) == 'borg' def test_get_local_path_without_local_path_defaults_to_borg(): assert module.get_local_path({'test.yaml': {'location': {}}}) == 'borg' def test_collect_configuration_run_summary_logs_info_for_success(): flexmock(module.command).should_receive('execute_hook').never() flexmock(module).should_receive('run_configuration').and_return([]) arguments = {} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert {log.levelno for log in logs} == {logging.INFO} def test_collect_configuration_run_summary_executes_hooks_for_create(): flexmock(module).should_receive('run_configuration').and_return([]) arguments = {'create': flexmock(), 'global': flexmock(monitoring_verbosity=1, dry_run=False)} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert {log.levelno for log in logs} == {logging.INFO} def test_collect_configuration_run_summary_logs_info_for_success_with_extract(): flexmock(module.validate).should_receive('guard_configuration_contains_repository') flexmock(module).should_receive('run_configuration').and_return([]) arguments = {'extract': flexmock(repository='repo')} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert {log.levelno for log in logs} == {logging.INFO} def test_collect_configuration_run_summary_logs_extract_with_repository_error(): flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise( ValueError ) expected_logs = (flexmock(),) flexmock(module).should_receive('make_error_log_records').and_return(expected_logs) arguments = {'extract': flexmock(repository='repo')} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert logs == expected_logs def test_collect_configuration_run_summary_logs_info_for_success_with_mount(): flexmock(module.validate).should_receive('guard_configuration_contains_repository') flexmock(module).should_receive('run_configuration').and_return([]) arguments = {'mount': flexmock(repository='repo')} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert {log.levelno for log in logs} == {logging.INFO} def test_collect_configuration_run_summary_logs_mount_with_repository_error(): flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise( ValueError ) expected_logs = (flexmock(),) flexmock(module).should_receive('make_error_log_records').and_return(expected_logs) arguments = {'mount': flexmock(repository='repo')} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert logs == expected_logs def test_collect_configuration_run_summary_logs_missing_configs_error(): arguments = {'global': flexmock(config_paths=[])} expected_logs = (flexmock(),) flexmock(module).should_receive('make_error_log_records').and_return(expected_logs) logs = tuple(module.collect_configuration_run_summary_logs({}, arguments=arguments)) assert logs == expected_logs def test_collect_configuration_run_summary_logs_pre_hook_error(): flexmock(module.command).should_receive('execute_hook').and_raise(ValueError) expected_logs = (flexmock(),) flexmock(module).should_receive('make_error_log_records').and_return(expected_logs) arguments = {'create': flexmock(), 'global': flexmock(monitoring_verbosity=1, dry_run=False)} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert logs == expected_logs def test_collect_configuration_run_summary_logs_post_hook_error(): flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise(ValueError) flexmock(module).should_receive('run_configuration').and_return([]) expected_logs = (flexmock(),) flexmock(module).should_receive('make_error_log_records').and_return(expected_logs) arguments = {'create': flexmock(), 'global': flexmock(monitoring_verbosity=1, dry_run=False)} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert expected_logs[0] in logs def test_collect_configuration_run_summary_logs_for_list_with_archive_and_repository_error(): flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise( ValueError ) expected_logs = (flexmock(),) flexmock(module).should_receive('make_error_log_records').and_return(expected_logs) arguments = {'list': flexmock(repository='repo', archive='test')} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert logs == expected_logs def test_collect_configuration_run_summary_logs_info_for_success_with_list(): flexmock(module).should_receive('run_configuration').and_return([]) arguments = {'list': flexmock(repository='repo', archive=None)} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert {log.levelno for log in logs} == {logging.INFO} def test_collect_configuration_run_summary_logs_run_configuration_error(): flexmock(module.validate).should_receive('guard_configuration_contains_repository') flexmock(module).should_receive('run_configuration').and_return( [logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg='Error'))] ) flexmock(module).should_receive('make_error_log_records').and_return([]) arguments = {} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert {log.levelno for log in logs} == {logging.CRITICAL} def test_collect_configuration_run_summary_logs_run_umount_error(): flexmock(module.validate).should_receive('guard_configuration_contains_repository') flexmock(module).should_receive('run_configuration').and_return([]) flexmock(module.borg_umount).should_receive('unmount_archive').and_raise(OSError) flexmock(module).should_receive('make_error_log_records').and_return( [logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg='Error'))] ) arguments = {'umount': flexmock(mount_point='/mnt')} logs = tuple( module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments) ) assert {log.levelno for log in logs} == {logging.INFO, logging.CRITICAL} def test_collect_configuration_run_summary_logs_outputs_merged_json_results(): flexmock(module).should_receive('run_configuration').and_return(['foo', 'bar']).and_return( ['baz'] ) flexmock(module.sys.stdout).should_receive('write').with_args('["foo", "bar", "baz"]').once() arguments = {} tuple( module.collect_configuration_run_summary_logs( {'test.yaml': {}, 'test2.yaml': {}}, arguments=arguments ) ) borgmatic-1.5.1/tests/unit/config/000077500000000000000000000000001361605604600170625ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/config/__init__.py000066400000000000000000000000001361605604600211610ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/config/test_checks.py000066400000000000000000000013051361605604600217320ustar00rootroot00000000000000from borgmatic.config import checks as module def test_repository_enabled_for_checks_defaults_to_enabled_for_all_repositories(): enabled = module.repository_enabled_for_checks('repo.borg', consistency={}) assert enabled def test_repository_enabled_for_checks_is_enabled_for_specified_repositories(): enabled = module.repository_enabled_for_checks( 'repo.borg', consistency={'check_repositories': ['repo.borg', 'other.borg']} ) assert enabled def test_repository_enabled_for_checks_is_disabled_for_other_repositories(): enabled = module.repository_enabled_for_checks( 'repo.borg', consistency={'check_repositories': ['other.borg']} ) assert not enabled borgmatic-1.5.1/tests/unit/config/test_collect.py000066400000000000000000000147731361605604600221340ustar00rootroot00000000000000import sys from flexmock import flexmock from borgmatic.config import collect as module def test_get_default_config_paths_includes_absolute_user_config_path(): flexmock(module.os, environ={'XDG_CONFIG_HOME': None, 'HOME': '/home/user'}) config_paths = module.get_default_config_paths() assert '/home/user/.config/borgmatic/config.yaml' in config_paths def test_get_default_config_paths_prefers_xdg_config_home_for_user_config_path(): flexmock(module.os, environ={'XDG_CONFIG_HOME': '/home/user/.etc', 'HOME': '/home/user'}) config_paths = module.get_default_config_paths() assert '/home/user/.etc/borgmatic/config.yaml' in config_paths def test_get_default_config_paths_does_not_expand_home_when_false(): flexmock(module.os, environ={'HOME': '/home/user'}) config_paths = module.get_default_config_paths(expand_home=False) assert '$HOME/.config/borgmatic/config.yaml' in config_paths def test_collect_config_filenames_collects_given_files(): config_paths = ('config.yaml', 'other.yaml') flexmock(module.os.path).should_receive('isdir').and_return(False) config_filenames = tuple(module.collect_config_filenames(config_paths)) assert config_filenames == config_paths def test_collect_config_filenames_collects_yml_file_endings(): config_paths = ('config.yaml', '/etc/borgmatic.d') mock_path = flexmock(module.os.path) mock_path.should_receive('exists').and_return(True) mock_path.should_receive('isdir').with_args('config.yaml').and_return(False) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d').and_return(True) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d/foo.yml').and_return(False) flexmock(module.os).should_receive('listdir') flexmock(sys.modules['builtins']).should_receive('sorted').and_return(['foo.yml']) config_filenames = tuple(module.collect_config_filenames(config_paths)) assert config_filenames == ('config.yaml', '/etc/borgmatic.d/foo.yml') def test_collect_config_filenames_collects_files_from_given_directories_and_ignores_sub_directories(): config_paths = ('config.yaml', '/etc/borgmatic.d') mock_path = flexmock(module.os.path) mock_path.should_receive('exists').and_return(True) mock_path.should_receive('isdir').with_args('config.yaml').and_return(False) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d').and_return(True) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d/foo.yaml').and_return(False) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d/bar').and_return(True) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d/baz.yaml').and_return(False) flexmock(module.os).should_receive('listdir') flexmock(sys.modules['builtins']).should_receive('sorted').and_return( ['foo.yaml', 'bar', 'baz.yaml'] ) config_filenames = tuple(module.collect_config_filenames(config_paths)) assert config_filenames == ( 'config.yaml', '/etc/borgmatic.d/foo.yaml', '/etc/borgmatic.d/baz.yaml', ) def test_collect_config_filenames_collects_files_from_given_directories_and_ignores_non_yaml_filenames(): config_paths = ('/etc/borgmatic.d',) mock_path = flexmock(module.os.path) mock_path.should_receive('exists').and_return(True) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d').and_return(True) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d/foo.yaml').and_return(False) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d/bar.yaml~').and_return(False) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d/baz.txt').and_return(False) flexmock(module.os).should_receive('listdir') flexmock(sys.modules['builtins']).should_receive('sorted').and_return( ['foo.yaml', 'bar.yaml~', 'baz.txt'] ) config_filenames = tuple(module.collect_config_filenames(config_paths)) assert config_filenames == ('/etc/borgmatic.d/foo.yaml',) def test_collect_config_filenames_skips_etc_borgmatic_config_dot_yaml_if_it_does_not_exist(): config_paths = ('config.yaml', '/etc/borgmatic/config.yaml') mock_path = flexmock(module.os.path) mock_path.should_receive('exists').with_args('config.yaml').and_return(True) mock_path.should_receive('exists').with_args('/etc/borgmatic/config.yaml').and_return(False) mock_path.should_receive('isdir').with_args('config.yaml').and_return(False) mock_path.should_receive('isdir').with_args('/etc/borgmatic/config.yaml').and_return(True) config_filenames = tuple(module.collect_config_filenames(config_paths)) assert config_filenames == ('config.yaml',) def test_collect_config_filenames_skips_etc_borgmatic_dot_d_if_it_does_not_exist(): config_paths = ('config.yaml', '/etc/borgmatic.d') mock_path = flexmock(module.os.path) mock_path.should_receive('exists').with_args('config.yaml').and_return(True) mock_path.should_receive('exists').with_args('/etc/borgmatic.d').and_return(False) mock_path.should_receive('isdir').with_args('config.yaml').and_return(False) mock_path.should_receive('isdir').with_args('/etc/borgmatic.d').and_return(True) config_filenames = tuple(module.collect_config_filenames(config_paths)) assert config_filenames == ('config.yaml',) def test_collect_config_filenames_skips_non_canonical_etc_borgmatic_dot_d_if_it_does_not_exist(): config_paths = ('config.yaml', '/etc/../etc/borgmatic.d') mock_path = flexmock(module.os.path) mock_path.should_receive('exists').with_args('config.yaml').and_return(True) mock_path.should_receive('exists').with_args('/etc/../etc/borgmatic.d').and_return(False) mock_path.should_receive('isdir').with_args('config.yaml').and_return(False) mock_path.should_receive('isdir').with_args('/etc/../etc/borgmatic.d').and_return(True) config_filenames = tuple(module.collect_config_filenames(config_paths)) assert config_filenames == ('config.yaml',) def test_collect_config_filenames_includes_other_directory_if_it_does_not_exist(): config_paths = ('config.yaml', '/my/directory') mock_path = flexmock(module.os.path) mock_path.should_receive('exists').with_args('config.yaml').and_return(True) mock_path.should_receive('exists').with_args('/my/directory').and_return(False) mock_path.should_receive('isdir').with_args('config.yaml').and_return(False) mock_path.should_receive('isdir').with_args('/my/directory').and_return(True) config_filenames = tuple(module.collect_config_filenames(config_paths)) assert config_filenames == config_paths borgmatic-1.5.1/tests/unit/config/test_convert.py000066400000000000000000000121171361605604600221550ustar00rootroot00000000000000import os from collections import OrderedDict, defaultdict, namedtuple import pytest from flexmock import flexmock from borgmatic.config import convert as module Parsed_config = namedtuple('Parsed_config', ('location', 'storage', 'retention', 'consistency')) def test_convert_section_generates_integer_value_for_integer_type_in_schema(): flexmock(module.yaml.comments).should_receive('CommentedMap').replace_with(OrderedDict) source_section_config = OrderedDict([('check_last', '3')]) section_schema = {'map': {'check_last': {'type': 'int'}}} destination_config = module._convert_section(source_section_config, section_schema) assert destination_config == OrderedDict([('check_last', 3)]) def test_convert_legacy_parsed_config_transforms_source_config_to_mapping(): flexmock(module.yaml.comments).should_receive('CommentedMap').replace_with(OrderedDict) flexmock(module.generate).should_receive('add_comments_to_configuration_map') source_config = Parsed_config( location=OrderedDict([('source_directories', '/home'), ('repository', 'hostname.borg')]), storage=OrderedDict([('encryption_passphrase', 'supersecret')]), retention=OrderedDict([('keep_daily', 7)]), consistency=OrderedDict([('checks', 'repository')]), ) source_excludes = ['/var'] schema = {'map': defaultdict(lambda: {'map': {}})} destination_config = module.convert_legacy_parsed_config(source_config, source_excludes, schema) assert destination_config == OrderedDict( [ ( 'location', OrderedDict( [ ('source_directories', ['/home']), ('repositories', ['hostname.borg']), ('exclude_patterns', ['/var']), ] ), ), ('storage', OrderedDict([('encryption_passphrase', 'supersecret')])), ('retention', OrderedDict([('keep_daily', 7)])), ('consistency', OrderedDict([('checks', ['repository'])])), ] ) def test_convert_legacy_parsed_config_splits_space_separated_values(): flexmock(module.yaml.comments).should_receive('CommentedMap').replace_with(OrderedDict) flexmock(module.generate).should_receive('add_comments_to_configuration_map') source_config = Parsed_config( location=OrderedDict( [('source_directories', '/home /etc'), ('repository', 'hostname.borg')] ), storage=OrderedDict(), retention=OrderedDict(), consistency=OrderedDict([('checks', 'repository archives')]), ) source_excludes = ['/var'] schema = {'map': defaultdict(lambda: {'map': {}})} destination_config = module.convert_legacy_parsed_config(source_config, source_excludes, schema) assert destination_config == OrderedDict( [ ( 'location', OrderedDict( [ ('source_directories', ['/home', '/etc']), ('repositories', ['hostname.borg']), ('exclude_patterns', ['/var']), ] ), ), ('storage', OrderedDict()), ('retention', OrderedDict()), ('consistency', OrderedDict([('checks', ['repository', 'archives'])])), ] ) def test_guard_configuration_upgraded_raises_when_only_source_config_present(): flexmock(os.path).should_receive('exists').with_args('config').and_return(True) flexmock(os.path).should_receive('exists').with_args('config.yaml').and_return(False) flexmock(os.path).should_receive('exists').with_args('other.yaml').and_return(False) with pytest.raises(module.Legacy_configuration_not_upgraded): module.guard_configuration_upgraded('config', ('config.yaml', 'other.yaml')) def test_guard_configuration_upgraded_does_not_raise_when_only_destination_config_present(): flexmock(os.path).should_receive('exists').with_args('config').and_return(False) flexmock(os.path).should_receive('exists').with_args('config.yaml').and_return(False) flexmock(os.path).should_receive('exists').with_args('other.yaml').and_return(True) module.guard_configuration_upgraded('config', ('config.yaml', 'other.yaml')) def test_guard_configuration_upgraded_does_not_raise_when_both_configs_present(): flexmock(os.path).should_receive('exists').with_args('config').and_return(True) flexmock(os.path).should_receive('exists').with_args('config.yaml').and_return(False) flexmock(os.path).should_receive('exists').with_args('other.yaml').and_return(True) module.guard_configuration_upgraded('config', ('config.yaml', 'other.yaml')) def test_guard_configuration_upgraded_does_not_raise_when_neither_config_present(): flexmock(os.path).should_receive('exists').with_args('config').and_return(False) flexmock(os.path).should_receive('exists').with_args('config.yaml').and_return(False) flexmock(os.path).should_receive('exists').with_args('other.yaml').and_return(False) module.guard_configuration_upgraded('config', ('config.yaml', 'other.yaml')) borgmatic-1.5.1/tests/unit/config/test_generate.py000066400000000000000000000122121361605604600222630ustar00rootroot00000000000000from collections import OrderedDict import pytest from flexmock import flexmock from borgmatic.config import generate as module def test_schema_to_sample_configuration_generates_config_map_with_examples(): flexmock(module.yaml.comments).should_receive('CommentedMap').replace_with(OrderedDict) flexmock(module).should_receive('add_comments_to_configuration_map') schema = { 'map': OrderedDict( [ ('section1', {'map': {'field1': OrderedDict([('example', 'Example 1')])}}), ( 'section2', { 'map': OrderedDict( [ ('field2', {'example': 'Example 2'}), ('field3', {'example': 'Example 3'}), ] ) }, ), ] ) } config = module._schema_to_sample_configuration(schema) assert config == OrderedDict( [ ('section1', OrderedDict([('field1', 'Example 1')])), ('section2', OrderedDict([('field2', 'Example 2'), ('field3', 'Example 3')])), ] ) def test_schema_to_sample_configuration_generates_config_sequence_of_strings_with_example(): flexmock(module.yaml.comments).should_receive('CommentedSeq').replace_with(list) flexmock(module).should_receive('add_comments_to_configuration_sequence') schema = {'seq': [{'type': 'str'}], 'example': ['hi']} config = module._schema_to_sample_configuration(schema) assert config == ['hi'] def test_schema_to_sample_configuration_generates_config_sequence_of_maps_with_examples(): flexmock(module.yaml.comments).should_receive('CommentedSeq').replace_with(list) flexmock(module).should_receive('add_comments_to_configuration_sequence') flexmock(module).should_receive('add_comments_to_configuration_map') schema = { 'seq': [ { 'map': OrderedDict( [('field1', {'example': 'Example 1'}), ('field2', {'example': 'Example 2'})] ) } ] } config = module._schema_to_sample_configuration(schema) assert config == [OrderedDict([('field1', 'Example 1'), ('field2', 'Example 2')])] def test_schema_to_sample_configuration_with_unsupported_schema_raises(): schema = {'gobbledygook': [{'type': 'not-your'}]} with pytest.raises(ValueError): module._schema_to_sample_configuration(schema) def test_merge_source_configuration_into_destination_inserts_map_fields(): destination_config = {'foo': 'dest1', 'bar': 'dest2'} source_config = {'foo': 'source1', 'baz': 'source2'} flexmock(module).should_receive('remove_commented_out_sentinel') flexmock(module).should_receive('yaml.comments.CommentedSeq').replace_with(list) module.merge_source_configuration_into_destination(destination_config, source_config) assert destination_config == {'foo': 'source1', 'bar': 'dest2', 'baz': 'source2'} def test_merge_source_configuration_into_destination_inserts_nested_map_fields(): destination_config = {'foo': {'first': 'dest1', 'second': 'dest2'}, 'bar': 'dest3'} source_config = {'foo': {'first': 'source1'}} flexmock(module).should_receive('remove_commented_out_sentinel') flexmock(module).should_receive('yaml.comments.CommentedSeq').replace_with(list) module.merge_source_configuration_into_destination(destination_config, source_config) assert destination_config == {'foo': {'first': 'source1', 'second': 'dest2'}, 'bar': 'dest3'} def test_merge_source_configuration_into_destination_inserts_sequence_fields(): destination_config = {'foo': ['dest1', 'dest2'], 'bar': ['dest3'], 'baz': ['dest4']} source_config = {'foo': ['source1'], 'bar': ['source2', 'source3']} flexmock(module).should_receive('remove_commented_out_sentinel') flexmock(module).should_receive('yaml.comments.CommentedSeq').replace_with(list) module.merge_source_configuration_into_destination(destination_config, source_config) assert destination_config == { 'foo': ['source1'], 'bar': ['source2', 'source3'], 'baz': ['dest4'], } def test_merge_source_configuration_into_destination_inserts_sequence_of_maps(): destination_config = {'foo': [{'first': 'dest1', 'second': 'dest2'}], 'bar': 'dest3'} source_config = {'foo': [{'first': 'source1'}, {'other': 'source2'}]} flexmock(module).should_receive('remove_commented_out_sentinel') flexmock(module).should_receive('yaml.comments.CommentedSeq').replace_with(list) module.merge_source_configuration_into_destination(destination_config, source_config) assert destination_config == { 'foo': [{'first': 'source1', 'second': 'dest2'}, {'other': 'source2'}], 'bar': 'dest3', } def test_merge_source_configuration_into_destination_without_source_does_nothing(): original_destination_config = {'foo': 'dest1', 'bar': 'dest2'} destination_config = dict(original_destination_config) module.merge_source_configuration_into_destination(destination_config, None) assert destination_config == original_destination_config borgmatic-1.5.1/tests/unit/config/test_legacy.py000066400000000000000000000164561361605604600217530ustar00rootroot00000000000000from collections import OrderedDict import pytest from flexmock import flexmock from borgmatic.config import legacy as module def test_option_should_create_config_option(): option = module.option('name', bool, required=False) assert option == module.Config_option('name', bool, False) def test_option_should_create_config_option_with_defaults(): option = module.option('name') assert option == module.Config_option('name', str, True) def test_validate_configuration_format_with_valid_config_should_not_raise(): parser = flexmock() parser.should_receive('sections').and_return(('section', 'other')) parser.should_receive('options').with_args('section').and_return(('stuff',)) parser.should_receive('options').with_args('other').and_return(('such',)) config_format = ( module.Section_format( 'section', options=(module.Config_option('stuff', str, required=True),) ), module.Section_format('other', options=(module.Config_option('such', str, required=True),)), ) module.validate_configuration_format(parser, config_format) def test_validate_configuration_format_with_missing_required_section_should_raise(): parser = flexmock() parser.should_receive('sections').and_return(('section',)) config_format = ( module.Section_format( 'section', options=(module.Config_option('stuff', str, required=True),) ), # At least one option in this section is required, so the section is required. module.Section_format( 'missing', options=( module.Config_option('such', str, required=False), module.Config_option('things', str, required=True), ), ), ) with pytest.raises(ValueError): module.validate_configuration_format(parser, config_format) def test_validate_configuration_format_with_missing_optional_section_should_not_raise(): parser = flexmock() parser.should_receive('sections').and_return(('section',)) parser.should_receive('options').with_args('section').and_return(('stuff',)) config_format = ( module.Section_format( 'section', options=(module.Config_option('stuff', str, required=True),) ), # No options in the section are required, so the section is optional. module.Section_format( 'missing', options=( module.Config_option('such', str, required=False), module.Config_option('things', str, required=False), ), ), ) module.validate_configuration_format(parser, config_format) def test_validate_configuration_format_with_unknown_section_should_raise(): parser = flexmock() parser.should_receive('sections').and_return(('section', 'extra')) config_format = (module.Section_format('section', options=()),) with pytest.raises(ValueError): module.validate_configuration_format(parser, config_format) def test_validate_configuration_format_with_missing_required_option_should_raise(): parser = flexmock() parser.should_receive('sections').and_return(('section',)) parser.should_receive('options').with_args('section').and_return(('option',)) config_format = ( module.Section_format( 'section', options=( module.Config_option('option', str, required=True), module.Config_option('missing', str, required=True), ), ), ) with pytest.raises(ValueError): module.validate_configuration_format(parser, config_format) def test_validate_configuration_format_with_missing_optional_option_should_not_raise(): parser = flexmock() parser.should_receive('sections').and_return(('section',)) parser.should_receive('options').with_args('section').and_return(('option',)) config_format = ( module.Section_format( 'section', options=( module.Config_option('option', str, required=True), module.Config_option('missing', str, required=False), ), ), ) module.validate_configuration_format(parser, config_format) def test_validate_configuration_format_with_extra_option_should_raise(): parser = flexmock() parser.should_receive('sections').and_return(('section',)) parser.should_receive('options').with_args('section').and_return(('option', 'extra')) config_format = ( module.Section_format( 'section', options=(module.Config_option('option', str, required=True),) ), ) with pytest.raises(ValueError): module.validate_configuration_format(parser, config_format) def test_parse_section_options_should_return_section_options(): parser = flexmock() parser.should_receive('get').with_args('section', 'foo').and_return('value') parser.should_receive('getint').with_args('section', 'bar').and_return(1) parser.should_receive('getboolean').never() parser.should_receive('has_option').with_args('section', 'foo').and_return(True) parser.should_receive('has_option').with_args('section', 'bar').and_return(True) section_format = module.Section_format( 'section', ( module.Config_option('foo', str, required=True), module.Config_option('bar', int, required=True), ), ) config = module.parse_section_options(parser, section_format) assert config == OrderedDict((('foo', 'value'), ('bar', 1))) def test_parse_section_options_for_missing_section_should_return_empty_dict(): parser = flexmock() parser.should_receive('get').never() parser.should_receive('getint').never() parser.should_receive('getboolean').never() parser.should_receive('has_option').with_args('section', 'foo').and_return(False) parser.should_receive('has_option').with_args('section', 'bar').and_return(False) section_format = module.Section_format( 'section', ( module.Config_option('foo', str, required=False), module.Config_option('bar', int, required=False), ), ) config = module.parse_section_options(parser, section_format) assert config == OrderedDict() def insert_mock_parser(): parser = flexmock() parser.should_receive('read').and_return([flexmock()]) module.RawConfigParser = lambda: parser return parser def test_parse_configuration_should_return_section_configs(): parser = insert_mock_parser() config_format = (flexmock(name='items'), flexmock(name='things')) mock_module = flexmock(module) mock_module.should_receive('validate_configuration_format').with_args( parser, config_format ).once() mock_section_configs = (flexmock(), flexmock()) for section_format, section_config in zip(config_format, mock_section_configs): mock_module.should_receive('parse_section_options').with_args( parser, section_format ).and_return(section_config).once() parsed_config = module.parse_configuration('filename', config_format) assert parsed_config == type(parsed_config)(*mock_section_configs) def test_parse_configuration_with_file_open_error_should_raise(): parser = insert_mock_parser() parser.should_receive('read').and_return([]) with pytest.raises(ValueError): module.parse_configuration('filename', config_format=flexmock()) borgmatic-1.5.1/tests/unit/config/test_normalize.py000066400000000000000000000015131361605604600224730ustar00rootroot00000000000000import pytest from borgmatic.config import normalize as module @pytest.mark.parametrize( 'config,expected_config', ( ( {'location': {'exclude_if_present': '.nobackup'}}, {'location': {'exclude_if_present': ['.nobackup']}}, ), ( {'location': {'exclude_if_present': ['.nobackup']}}, {'location': {'exclude_if_present': ['.nobackup']}}, ), ( {'location': {'source_directories': ['foo', 'bar']}}, {'location': {'source_directories': ['foo', 'bar']}}, ), ({'storage': {'compression': 'yes_please'}}, {'storage': {'compression': 'yes_please'}}), ), ) def test_normalize_applies_hard_coded_normalization_to_config(config, expected_config): module.normalize(config) assert config == expected_config borgmatic-1.5.1/tests/unit/config/test_override.py000066400000000000000000000047201361605604600223150ustar00rootroot00000000000000import pytest from flexmock import flexmock from borgmatic.config import override as module def test_set_values_with_empty_keys_bails(): config = {} module.set_values(config, keys=(), value='value') assert config == {} def test_set_values_with_one_key_sets_it_into_config(): config = {} module.set_values(config, keys=('key',), value='value') assert config == {'key': 'value'} def test_set_values_with_one_key_overwrites_existing_key(): config = {'key': 'old_value', 'other': 'other_value'} module.set_values(config, keys=('key',), value='value') assert config == {'key': 'value', 'other': 'other_value'} def test_set_values_with_multiple_keys_creates_hierarchy(): config = {} module.set_values(config, ('section', 'key'), 'value') assert config == {'section': {'key': 'value'}} def test_set_values_with_multiple_keys_updates_hierarchy(): config = {'section': {'other': 'other_value'}} module.set_values(config, ('section', 'key'), 'value') assert config == {'section': {'key': 'value', 'other': 'other_value'}} def test_parse_overrides_splits_keys_and_values(): flexmock(module).should_receive('convert_value_type').replace_with(lambda value: value) raw_overrides = ['section.my_option=value1', 'section.other_option=value2'] expected_result = ( (('section', 'my_option'), 'value1'), (('section', 'other_option'), 'value2'), ) module.parse_overrides(raw_overrides) == expected_result def test_parse_overrides_allows_value_with_equal_sign(): flexmock(module).should_receive('convert_value_type').replace_with(lambda value: value) raw_overrides = ['section.option=this===value'] expected_result = ((('section', 'option'), 'this===value'),) module.parse_overrides(raw_overrides) == expected_result def test_parse_overrides_raises_on_missing_equal_sign(): flexmock(module).should_receive('convert_value_type').replace_with(lambda value: value) raw_overrides = ['section.option'] with pytest.raises(ValueError): module.parse_overrides(raw_overrides) def test_parse_overrides_allows_value_with_single_key(): flexmock(module).should_receive('convert_value_type').replace_with(lambda value: value) raw_overrides = ['option=value'] expected_result = ((('option',), 'value'),) module.parse_overrides(raw_overrides) == expected_result def test_parse_overrides_handles_empty_overrides(): module.parse_overrides(raw_overrides=None) == () borgmatic-1.5.1/tests/unit/config/test_validate.py000066400000000000000000000134731361605604600222740ustar00rootroot00000000000000import pytest from flexmock import flexmock from borgmatic.config import validate as module def test_validation_error_str_contains_error_messages_and_config_filename(): error = module.Validation_error('config.yaml', ('oops', 'uh oh')) result = str(error) assert 'config.yaml' in result assert 'oops' in result assert 'uh oh' in result def test_apply_logical_validation_raises_if_archive_name_format_present_without_prefix(): with pytest.raises(module.Validation_error): module.apply_logical_validation( 'config.yaml', { 'storage': {'archive_name_format': '{hostname}-{now}'}, 'retention': {'keep_daily': 7}, }, ) def test_apply_logical_validation_raises_if_archive_name_format_present_without_retention_prefix(): with pytest.raises(module.Validation_error): module.apply_logical_validation( 'config.yaml', { 'storage': {'archive_name_format': '{hostname}-{now}'}, 'retention': {'keep_daily': 7}, 'consistency': {'prefix': '{hostname}-'}, }, ) def test_apply_locical_validation_raises_if_unknown_repository_in_check_repositories(): with pytest.raises(module.Validation_error): module.apply_logical_validation( 'config.yaml', { 'location': {'repositories': ['repo.borg', 'other.borg']}, 'retention': {'keep_secondly': 1000}, 'consistency': {'check_repositories': ['repo.borg', 'unknown.borg']}, }, ) def test_apply_locical_validation_does_not_raise_if_known_repository_in_check_repositories(): module.apply_logical_validation( 'config.yaml', { 'location': {'repositories': ['repo.borg', 'other.borg']}, 'retention': {'keep_secondly': 1000}, 'consistency': {'check_repositories': ['repo.borg']}, }, ) def test_apply_logical_validation_does_not_raise_if_archive_name_format_and_prefix_present(): module.apply_logical_validation( 'config.yaml', { 'storage': {'archive_name_format': '{hostname}-{now}'}, 'retention': {'prefix': '{hostname}-'}, 'consistency': {'prefix': '{hostname}-'}, }, ) def test_apply_logical_validation_does_not_raise_otherwise(): module.apply_logical_validation('config.yaml', {'retention': {'keep_secondly': 1000}}) def test_remove_examples_strips_examples_from_map(): schema = { 'map': { 'foo': {'desc': 'thing1', 'example': 'bar'}, 'baz': {'desc': 'thing2', 'example': 'quux'}, } } module.remove_examples(schema) assert schema == {'map': {'foo': {'desc': 'thing1'}, 'baz': {'desc': 'thing2'}}} def test_remove_examples_strips_examples_from_sequence_of_maps(): schema = {'seq': [{'map': {'foo': {'desc': 'thing', 'example': 'bar'}}, 'example': 'stuff'}]} module.remove_examples(schema) assert schema == {'seq': [{'map': {'foo': {'desc': 'thing'}}}]} def test_normalize_repository_path_passes_through_remote_repository(): repository = 'example.org:test.borg' module.normalize_repository_path(repository) == repository def test_normalize_repository_path_passes_through_absolute_repository(): repository = '/foo/bar/test.borg' flexmock(module.os.path).should_receive('abspath').and_return(repository) module.normalize_repository_path(repository) == repository def test_normalize_repository_path_resolves_relative_repository(): repository = 'test.borg' absolute = '/foo/bar/test.borg' flexmock(module.os.path).should_receive('abspath').and_return(absolute) module.normalize_repository_path(repository) == absolute def test_repositories_match_does_not_raise(): flexmock(module).should_receive('normalize_repository_path') module.repositories_match('foo', 'bar') def test_guard_configuration_contains_repository_does_not_raise_when_repository_in_config(): flexmock(module).should_receive('repositories_match').replace_with( lambda first, second: first == second ) module.guard_configuration_contains_repository( repository='repo', configurations={'config.yaml': {'location': {'repositories': ['repo']}}} ) def test_guard_configuration_contains_repository_does_not_raise_when_repository_not_given(): module.guard_configuration_contains_repository( repository=None, configurations={'config.yaml': {'location': {'repositories': ['repo']}}} ) def test_guard_configuration_contains_repository_errors_when_repository_assumed_to_match_config_twice(): with pytest.raises(ValueError): module.guard_configuration_contains_repository( repository=None, configurations={'config.yaml': {'location': {'repositories': ['repo', 'repo2']}}}, ) def test_guard_configuration_contains_repository_errors_when_repository_missing_from_config(): flexmock(module).should_receive('repositories_match').replace_with( lambda first, second: first == second ) with pytest.raises(ValueError): module.guard_configuration_contains_repository( repository='nope', configurations={'config.yaml': {'location': {'repositories': ['repo', 'repo2']}}}, ) def test_guard_configuration_contains_repository_errors_when_repository_matches_config_twice(): flexmock(module).should_receive('repositories_match').replace_with( lambda first, second: first == second ) with pytest.raises(ValueError): module.guard_configuration_contains_repository( repository='repo', configurations={ 'config.yaml': {'location': {'repositories': ['repo', 'repo2']}}, 'other.yaml': {'location': {'repositories': ['repo']}}, }, ) borgmatic-1.5.1/tests/unit/hooks/000077500000000000000000000000001361605604600167405ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/hooks/__init__.py000066400000000000000000000000001361605604600210370ustar00rootroot00000000000000borgmatic-1.5.1/tests/unit/hooks/test_command.py000066400000000000000000000070141361605604600217710ustar00rootroot00000000000000import logging import subprocess from flexmock import flexmock from borgmatic.hooks import command as module def test_interpolate_context_passes_through_command_without_variable(): assert module.interpolate_context('ls', {'foo': 'bar'}) == 'ls' def test_interpolate_context_passes_through_command_with_unknown_variable(): assert module.interpolate_context('ls {baz}', {'foo': 'bar'}) == 'ls {baz}' def test_interpolate_context_interpolates_variables(): context = {'foo': 'bar', 'baz': 'quux'} assert module.interpolate_context('ls {foo}{baz} {baz}', context) == 'ls barquux quux' def test_execute_hook_invokes_each_command(): flexmock(module).should_receive('interpolate_context').replace_with( lambda command, context: command ) flexmock(module.execute).should_receive('execute_command').with_args( [':'], output_log_level=logging.WARNING, shell=True ).once() module.execute_hook([':'], None, 'config.yaml', 'pre-backup', dry_run=False) def test_execute_hook_with_multiple_commands_invokes_each_command(): flexmock(module).should_receive('interpolate_context').replace_with( lambda command, context: command ) flexmock(module.execute).should_receive('execute_command').with_args( [':'], output_log_level=logging.WARNING, shell=True ).once() flexmock(module.execute).should_receive('execute_command').with_args( ['true'], output_log_level=logging.WARNING, shell=True ).once() module.execute_hook([':', 'true'], None, 'config.yaml', 'pre-backup', dry_run=False) def test_execute_hook_with_umask_sets_that_umask(): flexmock(module).should_receive('interpolate_context').replace_with( lambda command, context: command ) flexmock(module.os).should_receive('umask').with_args(0o77).and_return(0o22).once() flexmock(module.os).should_receive('umask').with_args(0o22).once() flexmock(module.execute).should_receive('execute_command').with_args( [':'], output_log_level=logging.WARNING, shell=True ) module.execute_hook([':'], 77, 'config.yaml', 'pre-backup', dry_run=False) def test_execute_hook_with_dry_run_skips_commands(): flexmock(module).should_receive('interpolate_context').replace_with( lambda command, context: command ) flexmock(module.execute).should_receive('execute_command').never() module.execute_hook([':', 'true'], None, 'config.yaml', 'pre-backup', dry_run=True) def test_execute_hook_with_empty_commands_does_not_raise(): module.execute_hook([], None, 'config.yaml', 'post-backup', dry_run=False) def test_execute_hook_on_error_logs_as_error(): flexmock(module).should_receive('interpolate_context').replace_with( lambda command, context: command ) flexmock(module.execute).should_receive('execute_command').with_args( [':'], output_log_level=logging.ERROR, shell=True ).once() module.execute_hook([':'], None, 'config.yaml', 'on-error', dry_run=False) def test_considered_soft_failure_treats_soft_fail_exit_code_as_soft_fail(): error = subprocess.CalledProcessError(module.SOFT_FAIL_EXIT_CODE, 'try again') assert module.considered_soft_failure('config.yaml', error) def test_considered_soft_failure_does_not_treat_other_exit_code_as_soft_fail(): error = subprocess.CalledProcessError(1, 'error') assert not module.considered_soft_failure('config.yaml', error) def test_considered_soft_failure_does_not_treat_other_exception_type_as_soft_fail(): assert not module.considered_soft_failure('config.yaml', Exception()) borgmatic-1.5.1/tests/unit/hooks/test_cronhub.py000066400000000000000000000033021361605604600220070ustar00rootroot00000000000000from flexmock import flexmock from borgmatic.hooks import cronhub as module def test_ping_monitor_rewrites_ping_url_for_start_state(): ping_url = 'https://example.com/start/abcdef' flexmock(module.requests).should_receive('get').with_args('https://example.com/start/abcdef') module.ping_monitor( ping_url, 'config.yaml', module.monitor.State.START, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_rewrites_ping_url_and_state_for_start_state(): ping_url = 'https://example.com/ping/abcdef' flexmock(module.requests).should_receive('get').with_args('https://example.com/start/abcdef') module.ping_monitor( ping_url, 'config.yaml', module.monitor.State.START, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_rewrites_ping_url_for_finish_state(): ping_url = 'https://example.com/start/abcdef' flexmock(module.requests).should_receive('get').with_args('https://example.com/finish/abcdef') module.ping_monitor( ping_url, 'config.yaml', module.monitor.State.FINISH, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_rewrites_ping_url_for_fail_state(): ping_url = 'https://example.com/start/abcdef' flexmock(module.requests).should_receive('get').with_args('https://example.com/fail/abcdef') module.ping_monitor( ping_url, 'config.yaml', module.monitor.State.FAIL, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_dry_run_does_not_hit_ping_url(): ping_url = 'https://example.com' flexmock(module.requests).should_receive('get').never() module.ping_monitor( ping_url, 'config.yaml', module.monitor.State.START, monitoring_log_level=1, dry_run=True ) borgmatic-1.5.1/tests/unit/hooks/test_cronitor.py000066400000000000000000000024601361605604600222120ustar00rootroot00000000000000from flexmock import flexmock from borgmatic.hooks import cronitor as module def test_ping_monitor_hits_ping_url_for_start_state(): ping_url = 'https://example.com' flexmock(module.requests).should_receive('get').with_args('{}/{}'.format(ping_url, 'run')) module.ping_monitor( ping_url, 'config.yaml', module.monitor.State.START, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_hits_ping_url_for_finish_state(): ping_url = 'https://example.com' flexmock(module.requests).should_receive('get').with_args('{}/{}'.format(ping_url, 'complete')) module.ping_monitor( ping_url, 'config.yaml', module.monitor.State.FINISH, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_hits_ping_url_for_fail_state(): ping_url = 'https://example.com' flexmock(module.requests).should_receive('get').with_args('{}/{}'.format(ping_url, 'fail')) module.ping_monitor( ping_url, 'config.yaml', module.monitor.State.FAIL, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_dry_run_does_not_hit_ping_url(): ping_url = 'https://example.com' flexmock(module.requests).should_receive('get').never() module.ping_monitor( ping_url, 'config.yaml', module.monitor.State.START, monitoring_log_level=1, dry_run=True ) borgmatic-1.5.1/tests/unit/hooks/test_dispatch.py000066400000000000000000000050171361605604600221530ustar00rootroot00000000000000import sys import pytest from flexmock import flexmock from borgmatic.hooks import dispatch as module def hook_function(config, log_prefix, thing, value): ''' This test function gets mocked out below. ''' pass def test_call_hook_invokes_module_function_with_arguments_and_returns_value(): hooks = {'super_hook': flexmock(), 'other_hook': flexmock()} expected_return_value = flexmock() test_module = sys.modules[__name__] flexmock(module).HOOK_NAME_TO_MODULE = {'super_hook': test_module} flexmock(test_module).should_receive('hook_function').with_args( hooks['super_hook'], 'prefix', 55, value=66 ).and_return(expected_return_value).once() return_value = module.call_hook('hook_function', hooks, 'prefix', 'super_hook', 55, value=66) assert return_value == expected_return_value def test_call_hook_without_hook_config_skips_call(): hooks = {'other_hook': flexmock()} test_module = sys.modules[__name__] flexmock(module).HOOK_NAME_TO_MODULE = {'super_hook': test_module} flexmock(test_module).should_receive('hook_function').never() module.call_hook('hook_function', hooks, 'prefix', 'super_hook', 55, value=66) def test_call_hook_without_corresponding_module_raises(): hooks = {'super_hook': flexmock(), 'other_hook': flexmock()} test_module = sys.modules[__name__] flexmock(module).HOOK_NAME_TO_MODULE = {'other_hook': test_module} flexmock(test_module).should_receive('hook_function').never() with pytest.raises(ValueError): module.call_hook('hook_function', hooks, 'prefix', 'super_hook', 55, value=66) def test_call_hooks_calls_each_hook_and_collects_return_values(): hooks = {'super_hook': flexmock(), 'other_hook': flexmock()} expected_return_values = {'super_hook': flexmock(), 'other_hook': flexmock()} flexmock(module).should_receive('call_hook').and_return( expected_return_values['super_hook'] ).and_return(expected_return_values['other_hook']) return_values = module.call_hooks('do_stuff', hooks, 'prefix', ('super_hook', 'other_hook'), 55) assert return_values == expected_return_values def test_call_hooks_calls_skips_return_values_for_unconfigured_hooks(): hooks = {'super_hook': flexmock()} expected_return_values = {'super_hook': flexmock()} flexmock(module).should_receive('call_hook').and_return(expected_return_values['super_hook']) return_values = module.call_hooks('do_stuff', hooks, 'prefix', ('super_hook', 'other_hook'), 55) assert return_values == expected_return_values borgmatic-1.5.1/tests/unit/hooks/test_dump.py000066400000000000000000000173461361605604600213310ustar00rootroot00000000000000import pytest from flexmock import flexmock from borgmatic.hooks import dump as module def test_make_database_dump_path_joins_arguments(): assert module.make_database_dump_path('/tmp', 'super_databases') == '/tmp/super_databases' def test_make_database_dump_path_defaults_without_source_directory(): assert module.make_database_dump_path(None, 'super_databases') == '~/.borgmatic/super_databases' def test_make_database_dump_filename_uses_name_and_hostname(): flexmock(module.os.path).should_receive('expanduser').and_return('databases') assert ( module.make_database_dump_filename('databases', 'test', 'hostname') == 'databases/hostname/test' ) def test_make_database_dump_filename_without_hostname_defaults_to_localhost(): flexmock(module.os.path).should_receive('expanduser').and_return('databases') assert module.make_database_dump_filename('databases', 'test') == 'databases/localhost/test' def test_make_database_dump_filename_with_invalid_name_raises(): flexmock(module.os.path).should_receive('expanduser').and_return('databases') with pytest.raises(ValueError): module.make_database_dump_filename('databases', 'invalid/name') def test_flatten_dump_patterns_produces_list_of_all_patterns(): dump_patterns = {'postgresql_databases': ['*/glob', 'glob/*'], 'mysql_databases': ['*/*/*']} expected_patterns = sorted( dump_patterns['postgresql_databases'] + dump_patterns['mysql_databases'] ) assert sorted(module.flatten_dump_patterns(dump_patterns, ('bob',))) == expected_patterns def test_flatten_dump_patterns_with_no_patterns_errors(): dump_patterns = {'postgresql_databases': [], 'mysql_databases': []} with pytest.raises(ValueError): assert module.flatten_dump_patterns(dump_patterns, ('bob',)) def test_flatten_dump_patterns_with_no_hooks_errors(): dump_patterns = {} with pytest.raises(ValueError): assert module.flatten_dump_patterns(dump_patterns, ('bob',)) def test_remove_database_dumps_removes_dump_for_each_database(): databases = [{'name': 'foo'}, {'name': 'bar'}] flexmock(module).should_receive('make_database_dump_filename').with_args( 'databases', 'foo', None ).and_return('databases/localhost/foo') flexmock(module).should_receive('make_database_dump_filename').with_args( 'databases', 'bar', None ).and_return('databases/localhost/bar') flexmock(module.os.path).should_receive('isdir').and_return(False) flexmock(module.os).should_receive('remove').with_args('databases/localhost/foo').once() flexmock(module.os).should_receive('remove').with_args('databases/localhost/bar').once() flexmock(module.os).should_receive('listdir').with_args('databases/localhost').and_return( ['bar'] ).and_return([]) flexmock(module.os).should_receive('rmdir').with_args('databases/localhost').once() module.remove_database_dumps('databases', databases, 'SuperDB', 'test.yaml', dry_run=False) def test_remove_database_dumps_removes_dump_in_directory_format(): databases = [{'name': 'foo'}] flexmock(module).should_receive('make_database_dump_filename').with_args( 'databases', 'foo', None ).and_return('databases/localhost/foo') flexmock(module.os.path).should_receive('isdir').and_return(True) flexmock(module.os).should_receive('remove').never() flexmock(module.shutil).should_receive('rmtree').with_args('databases/localhost/foo').once() flexmock(module.os).should_receive('listdir').with_args('databases/localhost').and_return([]) flexmock(module.os).should_receive('rmdir').with_args('databases/localhost').once() module.remove_database_dumps('databases', databases, 'SuperDB', 'test.yaml', dry_run=False) def test_remove_database_dumps_with_dry_run_skips_removal(): databases = [{'name': 'foo'}, {'name': 'bar'}] flexmock(module.os).should_receive('rmdir').never() flexmock(module.os).should_receive('remove').never() module.remove_database_dumps('databases', databases, 'SuperDB', 'test.yaml', dry_run=True) def test_remove_database_dumps_without_databases_does_not_raise(): module.remove_database_dumps('databases', [], 'SuperDB', 'test.yaml', dry_run=False) def test_convert_glob_patterns_to_borg_patterns_removes_leading_slash(): assert module.convert_glob_patterns_to_borg_patterns(('/etc/foo/bar',)) == ['sh:etc/foo/bar'] def test_get_database_names_from_dumps_gets_names_from_filenames_matching_globs(): flexmock(module.glob).should_receive('glob').and_return( ('databases/localhost/foo',) ).and_return(('databases/localhost/bar',)).and_return(()) assert module.get_database_names_from_dumps( ('databases/*/foo', 'databases/*/bar', 'databases/*/baz') ) == ['foo', 'bar'] def test_get_database_configurations_only_produces_named_databases(): databases = [ {'name': 'foo', 'hostname': 'example.org'}, {'name': 'bar', 'hostname': 'example.com'}, {'name': 'baz', 'hostname': 'example.org'}, ] assert list(module.get_database_configurations(databases, ('foo', 'baz'))) == [ {'name': 'foo', 'hostname': 'example.org'}, {'name': 'baz', 'hostname': 'example.org'}, ] def test_get_database_configurations_matches_all_database(): databases = [ {'name': 'foo', 'hostname': 'example.org'}, {'name': 'all', 'hostname': 'example.com'}, ] assert list(module.get_database_configurations(databases, ('foo', 'bar', 'baz'))) == [ {'name': 'foo', 'hostname': 'example.org'}, {'name': 'bar', 'hostname': 'example.com'}, {'name': 'baz', 'hostname': 'example.com'}, ] def test_get_per_hook_database_configurations_partitions_by_hook(): hooks = {'postgresql_databases': [flexmock()]} names = ('foo', 'bar') dump_patterns = flexmock() expected_config = {'postgresql_databases': [{'name': 'foo'}, {'name': 'bar'}]} flexmock(module).should_receive('get_database_configurations').with_args( hooks['postgresql_databases'], names ).and_return(expected_config['postgresql_databases']) config = module.get_per_hook_database_configurations(hooks, names, dump_patterns) assert config == expected_config def test_get_per_hook_database_configurations_defaults_to_detected_database_names(): hooks = {'postgresql_databases': [flexmock()]} names = () detected_names = flexmock() dump_patterns = {'postgresql_databases': [flexmock()]} expected_config = {'postgresql_databases': [flexmock()]} flexmock(module).should_receive('get_database_names_from_dumps').and_return(detected_names) flexmock(module).should_receive('get_database_configurations').with_args( hooks['postgresql_databases'], detected_names ).and_return(expected_config['postgresql_databases']) config = module.get_per_hook_database_configurations(hooks, names, dump_patterns) assert config == expected_config def test_get_per_hook_database_configurations_with_unknown_database_name_raises(): hooks = {'postgresql_databases': [flexmock()]} names = ('foo', 'bar') dump_patterns = flexmock() flexmock(module).should_receive('get_database_configurations').with_args( hooks['postgresql_databases'], names ).and_return([]) with pytest.raises(ValueError): module.get_per_hook_database_configurations(hooks, names, dump_patterns) def test_get_per_hook_database_configurations_with_all_and_no_archive_dumps_raises(): hooks = {'postgresql_databases': [flexmock()]} names = ('foo', 'all') dump_patterns = flexmock() flexmock(module).should_receive('get_database_configurations').with_args( hooks['postgresql_databases'], names ).and_return([]) with pytest.raises(ValueError): module.get_per_hook_database_configurations(hooks, names, dump_patterns) borgmatic-1.5.1/tests/unit/hooks/test_healthchecks.py000066400000000000000000000106721361605604600230050ustar00rootroot00000000000000from flexmock import flexmock from borgmatic.hooks import healthchecks as module def test_forgetful_buffering_handler_emit_collects_log_records(): handler = module.Forgetful_buffering_handler(byte_capacity=100, log_level=1) handler.emit(flexmock(getMessage=lambda: 'foo')) handler.emit(flexmock(getMessage=lambda: 'bar')) assert handler.buffer == ['foo\n', 'bar\n'] assert not handler.forgot def test_forgetful_buffering_handler_emit_forgets_log_records_when_capacity_reached(): handler = module.Forgetful_buffering_handler(byte_capacity=len('foo\nbar\n'), log_level=1) handler.emit(flexmock(getMessage=lambda: 'foo')) assert handler.buffer == ['foo\n'] handler.emit(flexmock(getMessage=lambda: 'bar')) assert handler.buffer == ['foo\n', 'bar\n'] handler.emit(flexmock(getMessage=lambda: 'baz')) assert handler.buffer == ['bar\n', 'baz\n'] handler.emit(flexmock(getMessage=lambda: 'quux')) assert handler.buffer == ['quux\n'] assert handler.forgot def test_format_buffered_logs_for_payload_flattens_log_buffer(): handler = module.Forgetful_buffering_handler(byte_capacity=100, log_level=1) handler.buffer = ['foo\n', 'bar\n'] flexmock(module.logging).should_receive('getLogger').and_return(flexmock(handlers=[handler])) payload = module.format_buffered_logs_for_payload() assert payload == 'foo\nbar\n' def test_format_buffered_logs_for_payload_inserts_truncation_indicator_when_logs_forgotten(): handler = module.Forgetful_buffering_handler(byte_capacity=100, log_level=1) handler.buffer = ['foo\n', 'bar\n'] handler.forgot = True flexmock(module.logging).should_receive('getLogger').and_return(flexmock(handlers=[handler])) payload = module.format_buffered_logs_for_payload() assert payload == '...\nfoo\nbar\n' def test_format_buffered_logs_for_payload_without_handler_produces_empty_payload(): flexmock(module.logging).should_receive('getLogger').and_return( flexmock(handlers=[module.logging.Handler()]) ) payload = module.format_buffered_logs_for_payload() assert payload == '' def test_ping_monitor_hits_ping_url_for_start_state(): flexmock(module).should_receive('Forgetful_buffering_handler') ping_url = 'https://example.com' flexmock(module.requests).should_receive('post').with_args( '{}/{}'.format(ping_url, 'start'), data=''.encode('utf-8') ) module.ping_monitor( ping_url, 'config.yaml', state=module.monitor.State.START, monitoring_log_level=1, dry_run=False, ) def test_ping_monitor_hits_ping_url_for_finish_state(): ping_url = 'https://example.com' payload = 'data' flexmock(module).should_receive('format_buffered_logs_for_payload').and_return(payload) flexmock(module.requests).should_receive('post').with_args( ping_url, data=payload.encode('utf-8') ) module.ping_monitor( ping_url, 'config.yaml', state=module.monitor.State.FINISH, monitoring_log_level=1, dry_run=False, ) def test_ping_monitor_hits_ping_url_for_fail_state(): ping_url = 'https://example.com' payload = 'data' flexmock(module).should_receive('format_buffered_logs_for_payload').and_return(payload) flexmock(module.requests).should_receive('post').with_args( '{}/{}'.format(ping_url, 'fail'), data=payload.encode('utf') ) module.ping_monitor( ping_url, 'config.yaml', state=module.monitor.State.FAIL, monitoring_log_level=1, dry_run=False, ) def test_ping_monitor_with_ping_uuid_hits_corresponding_url(): ping_uuid = 'abcd-efgh-ijkl-mnop' payload = 'data' flexmock(module).should_receive('format_buffered_logs_for_payload').and_return(payload) flexmock(module.requests).should_receive('post').with_args( 'https://hc-ping.com/{}'.format(ping_uuid), data=payload.encode('utf-8') ) module.ping_monitor( ping_uuid, 'config.yaml', state=module.monitor.State.FINISH, monitoring_log_level=1, dry_run=False, ) def test_ping_monitor_dry_run_does_not_hit_ping_url(): flexmock(module).should_receive('Forgetful_buffering_handler') ping_url = 'https://example.com' flexmock(module.requests).should_receive('post').never() module.ping_monitor( ping_url, 'config.yaml', state=module.monitor.State.START, monitoring_log_level=1, dry_run=True, ) borgmatic-1.5.1/tests/unit/hooks/test_mysql.py000066400000000000000000000202101361605604600215110ustar00rootroot00000000000000import sys from flexmock import flexmock from borgmatic.hooks import mysql as module def test_dump_databases_runs_mysqldump_for_each_database(): databases = [{'name': 'foo'}, {'name': 'bar'}] output_file = flexmock() flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ).and_return('databases/localhost/bar') flexmock(module.os).should_receive('makedirs') flexmock(sys.modules['builtins']).should_receive('open').and_return(output_file) for name in ('foo', 'bar'): flexmock(module).should_receive('execute_command').with_args( ('mysqldump', '--add-drop-database', '--databases', name), output_file=output_file, extra_environment=None, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_dump_databases_with_dry_run_skips_mysqldump(): databases = [{'name': 'foo'}, {'name': 'bar'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ).and_return('databases/localhost/bar') flexmock(module.os).should_receive('makedirs').never() flexmock(module).should_receive('execute_command').never() module.dump_databases(databases, 'test.yaml', {}, dry_run=True) def test_dump_databases_runs_mysqldump_with_hostname_and_port(): databases = [{'name': 'foo', 'hostname': 'database.example.org', 'port': 5433}] output_file = flexmock() flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/database.example.org/foo' ) flexmock(module.os).should_receive('makedirs') flexmock(sys.modules['builtins']).should_receive('open').and_return(output_file) flexmock(module).should_receive('execute_command').with_args( ( 'mysqldump', '--add-drop-database', '--host', 'database.example.org', '--port', '5433', '--protocol', 'tcp', '--databases', 'foo', ), output_file=output_file, extra_environment=None, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_dump_databases_runs_mysqldump_with_username_and_password(): databases = [{'name': 'foo', 'username': 'root', 'password': 'trustsome1'}] output_file = flexmock() flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ) flexmock(module.os).should_receive('makedirs') flexmock(sys.modules['builtins']).should_receive('open').and_return(output_file) flexmock(module).should_receive('execute_command').with_args( ('mysqldump', '--add-drop-database', '--user', 'root', '--databases', 'foo'), output_file=output_file, extra_environment={'MYSQL_PWD': 'trustsome1'}, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_dump_databases_runs_mysqldump_with_options(): databases = [{'name': 'foo', 'options': '--stuff=such'}] output_file = flexmock() flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ) flexmock(module.os).should_receive('makedirs') flexmock(sys.modules['builtins']).should_receive('open').and_return(output_file) flexmock(module).should_receive('execute_command').with_args( ('mysqldump', '--add-drop-database', '--stuff=such', '--databases', 'foo'), output_file=output_file, extra_environment=None, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_dump_databases_runs_mysqldump_for_all_databases(): databases = [{'name': 'all'}] output_file = flexmock() flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/all' ) flexmock(module.os).should_receive('makedirs') flexmock(sys.modules['builtins']).should_receive('open').and_return(output_file) flexmock(module).should_receive('execute_command').with_args( ('mysqldump', '--add-drop-database', '--all-databases'), output_file=output_file, extra_environment=None, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_make_database_dump_patterns_converts_names_to_glob_paths(): flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/*/foo' ).and_return('databases/*/bar') assert module.make_database_dump_patterns(flexmock(), flexmock(), {}, ('foo', 'bar')) == [ 'databases/*/foo', 'databases/*/bar', ] def test_make_database_dump_patterns_treats_empty_names_as_matching_all_databases(): flexmock(module).should_receive('make_dump_path').and_return('/dump/path') flexmock(module.dump).should_receive('make_database_dump_filename').with_args( '/dump/path', '*', '*' ).and_return('databases/*/*') assert module.make_database_dump_patterns(flexmock(), flexmock(), {}, ()) == ['databases/*/*'] def test_restore_database_dumps_restores_each_database(): databases = [{'name': 'foo'}, {'name': 'bar'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ).and_return('databases/localhost/bar') for name in ('foo', 'bar'): dump_filename = 'databases/localhost/{}'.format(name) input_file = flexmock() flexmock(sys.modules['builtins']).should_receive('open').with_args( dump_filename ).and_return(input_file) flexmock(module).should_receive('execute_command').with_args( ('mysql', '--batch'), input_file=input_file, extra_environment=None ).once() module.restore_database_dumps(databases, 'test.yaml', {}, dry_run=False) def test_restore_database_dumps_runs_mysql_with_hostname_and_port(): databases = [{'name': 'foo', 'hostname': 'database.example.org', 'port': 5433}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ) dump_filename = 'databases/localhost/foo' input_file = flexmock() flexmock(sys.modules['builtins']).should_receive('open').with_args(dump_filename).and_return( input_file ) flexmock(module).should_receive('execute_command').with_args( ( 'mysql', '--batch', '--host', 'database.example.org', '--port', '5433', '--protocol', 'tcp', ), input_file=input_file, extra_environment=None, ).once() module.restore_database_dumps(databases, 'test.yaml', {}, dry_run=False) def test_restore_database_dumps_runs_mysql_with_username_and_password(): databases = [{'name': 'foo', 'username': 'root', 'password': 'trustsome1'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ) dump_filename = 'databases/localhost/foo' input_file = flexmock() flexmock(sys.modules['builtins']).should_receive('open').with_args(dump_filename).and_return( input_file ) flexmock(module).should_receive('execute_command').with_args( ('mysql', '--batch', '--user', 'root'), input_file=input_file, extra_environment={'MYSQL_PWD': 'trustsome1'}, ).once() module.restore_database_dumps(databases, 'test.yaml', {}, dry_run=False) borgmatic-1.5.1/tests/unit/hooks/test_pagerduty.py000066400000000000000000000020151361605604600223530ustar00rootroot00000000000000from flexmock import flexmock from borgmatic.hooks import pagerduty as module def test_ping_monitor_ignores_start_state(): flexmock(module.requests).should_receive('post').never() module.ping_monitor( 'abc123', 'config.yaml', module.monitor.State.START, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_ignores_finish_state(): flexmock(module.requests).should_receive('post').never() module.ping_monitor( 'abc123', 'config.yaml', module.monitor.State.FINISH, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_calls_api_for_fail_state(): flexmock(module.requests).should_receive('post') module.ping_monitor( 'abc123', 'config.yaml', module.monitor.State.FAIL, monitoring_log_level=1, dry_run=False ) def test_ping_monitor_dry_run_does_not_call_api(): flexmock(module.requests).should_receive('post').never() module.ping_monitor( 'abc123', 'config.yaml', module.monitor.State.FAIL, monitoring_log_level=1, dry_run=True ) borgmatic-1.5.1/tests/unit/hooks/test_postgresql.py000066400000000000000000000232441361605604600225610ustar00rootroot00000000000000from flexmock import flexmock from borgmatic.hooks import postgresql as module def test_dump_databases_runs_pg_dump_for_each_database(): databases = [{'name': 'foo'}, {'name': 'bar'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ).and_return('databases/localhost/bar') flexmock(module.os).should_receive('makedirs') for name in ('foo', 'bar'): flexmock(module).should_receive('execute_command').with_args( ( 'pg_dump', '--no-password', '--clean', '--file', 'databases/localhost/{}'.format(name), '--format', 'custom', name, ), extra_environment=None, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_dump_databases_with_dry_run_skips_pg_dump(): databases = [{'name': 'foo'}, {'name': 'bar'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ).and_return('databases/localhost/bar') flexmock(module.os).should_receive('makedirs').never() flexmock(module).should_receive('execute_command').never() module.dump_databases(databases, 'test.yaml', {}, dry_run=True) def test_dump_databases_runs_pg_dump_with_hostname_and_port(): databases = [{'name': 'foo', 'hostname': 'database.example.org', 'port': 5433}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/database.example.org/foo' ) flexmock(module.os).should_receive('makedirs') flexmock(module).should_receive('execute_command').with_args( ( 'pg_dump', '--no-password', '--clean', '--file', 'databases/database.example.org/foo', '--host', 'database.example.org', '--port', '5433', '--format', 'custom', 'foo', ), extra_environment=None, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_dump_databases_runs_pg_dump_with_username_and_password(): databases = [{'name': 'foo', 'username': 'postgres', 'password': 'trustsome1'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ) flexmock(module.os).should_receive('makedirs') flexmock(module).should_receive('execute_command').with_args( ( 'pg_dump', '--no-password', '--clean', '--file', 'databases/localhost/foo', '--username', 'postgres', '--format', 'custom', 'foo', ), extra_environment={'PGPASSWORD': 'trustsome1'}, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_dump_databases_runs_pg_dump_with_format(): databases = [{'name': 'foo', 'format': 'tar'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ) flexmock(module.os).should_receive('makedirs') flexmock(module).should_receive('execute_command').with_args( ( 'pg_dump', '--no-password', '--clean', '--file', 'databases/localhost/foo', '--format', 'tar', 'foo', ), extra_environment=None, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_dump_databases_runs_pg_dump_with_options(): databases = [{'name': 'foo', 'options': '--stuff=such'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ) flexmock(module.os).should_receive('makedirs') flexmock(module).should_receive('execute_command').with_args( ( 'pg_dump', '--no-password', '--clean', '--file', 'databases/localhost/foo', '--format', 'custom', '--stuff=such', 'foo', ), extra_environment=None, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_dump_databases_runs_pg_dumpall_for_all_databases(): databases = [{'name': 'all'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/all' ) flexmock(module.os).should_receive('makedirs') flexmock(module).should_receive('execute_command').with_args( ('pg_dumpall', '--no-password', '--clean', '--file', 'databases/localhost/all'), extra_environment=None, ).once() module.dump_databases(databases, 'test.yaml', {}, dry_run=False) def test_make_database_dump_patterns_converts_names_to_glob_paths(): flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/*/foo' ).and_return('databases/*/bar') assert module.make_database_dump_patterns(flexmock(), flexmock(), {}, ('foo', 'bar')) == [ 'databases/*/foo', 'databases/*/bar', ] def test_make_database_dump_patterns_treats_empty_names_as_matching_all_databases(): flexmock(module).should_receive('make_dump_path').and_return('/dump/path') flexmock(module.dump).should_receive('make_database_dump_filename').with_args( '/dump/path', '*', '*' ).and_return('databases/*/*') assert module.make_database_dump_patterns(flexmock(), flexmock(), {}, ()) == ['databases/*/*'] def test_restore_database_dumps_restores_each_database(): databases = [{'name': 'foo'}, {'name': 'bar'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ).and_return('databases/localhost/bar') for name in ('foo', 'bar'): flexmock(module).should_receive('execute_command').with_args( ( 'pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error', '--dbname', name, 'databases/localhost/{}'.format(name), ), extra_environment=None, ).once() flexmock(module).should_receive('execute_command').with_args( ('psql', '--no-password', '--quiet', '--dbname', name, '--command', 'ANALYZE'), extra_environment=None, ).once() module.restore_database_dumps(databases, 'test.yaml', {}, dry_run=False) def test_restore_database_dumps_runs_pg_restore_with_hostname_and_port(): databases = [{'name': 'foo', 'hostname': 'database.example.org', 'port': 5433}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ) flexmock(module).should_receive('execute_command').with_args( ( 'pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error', '--host', 'database.example.org', '--port', '5433', '--dbname', 'foo', 'databases/localhost/foo', ), extra_environment=None, ).once() flexmock(module).should_receive('execute_command').with_args( ( 'psql', '--no-password', '--quiet', '--host', 'database.example.org', '--port', '5433', '--dbname', 'foo', '--command', 'ANALYZE', ), extra_environment=None, ).once() module.restore_database_dumps(databases, 'test.yaml', {}, dry_run=False) def test_restore_database_dumps_runs_pg_restore_with_username_and_password(): databases = [{'name': 'foo', 'username': 'postgres', 'password': 'trustsome1'}] flexmock(module).should_receive('make_dump_path').and_return('') flexmock(module.dump).should_receive('make_database_dump_filename').and_return( 'databases/localhost/foo' ) flexmock(module).should_receive('execute_command').with_args( ( 'pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error', '--username', 'postgres', '--dbname', 'foo', 'databases/localhost/foo', ), extra_environment={'PGPASSWORD': 'trustsome1'}, ).once() flexmock(module).should_receive('execute_command').with_args( ( 'psql', '--no-password', '--quiet', '--username', 'postgres', '--dbname', 'foo', '--command', 'ANALYZE', ), extra_environment={'PGPASSWORD': 'trustsome1'}, ).once() module.restore_database_dumps(databases, 'test.yaml', {}, dry_run=False) borgmatic-1.5.1/tests/unit/test_execute.py000066400000000000000000000162031361605604600206720ustar00rootroot00000000000000import pytest from flexmock import flexmock from borgmatic import execute as module @pytest.mark.parametrize( 'exit_code,error_on_warnings,expected_result', ( (2, True, True), (2, False, True), (1, True, True), (1, False, False), (0, True, False), (0, False, False), ), ) def test_exit_code_indicates_error_respects_exit_code_and_error_on_warnings( exit_code, error_on_warnings, expected_result ): assert ( module.exit_code_indicates_error( ('command',), exit_code, error_on_warnings=error_on_warnings ) is expected_result ) def test_execute_command_calls_full_command(): full_command = ['foo', 'bar'] flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('Popen').with_args( full_command, stdin=None, stdout=module.subprocess.PIPE, stderr=module.subprocess.STDOUT, shell=False, env=None, cwd=None, ).and_return(flexmock(stdout=None)).once() flexmock(module).should_receive('log_output') output = module.execute_command(full_command) assert output is None def test_execute_command_calls_full_command_with_output_file(): full_command = ['foo', 'bar'] output_file = flexmock(name='test') flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('Popen').with_args( full_command, stdin=None, stdout=output_file, stderr=module.subprocess.PIPE, shell=False, env=None, cwd=None, ).and_return(flexmock(stderr=None)).once() flexmock(module).should_receive('log_output') output = module.execute_command(full_command, output_file=output_file) assert output is None def test_execute_command_calls_full_command_with_input_file(): full_command = ['foo', 'bar'] input_file = flexmock(name='test') flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('Popen').with_args( full_command, stdin=input_file, stdout=module.subprocess.PIPE, stderr=module.subprocess.STDOUT, shell=False, env=None, cwd=None, ).and_return(flexmock(stdout=None)).once() flexmock(module).should_receive('log_output') output = module.execute_command(full_command, input_file=input_file) assert output is None def test_execute_command_calls_full_command_with_shell(): full_command = ['foo', 'bar'] flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('Popen').with_args( full_command, stdin=None, stdout=module.subprocess.PIPE, stderr=module.subprocess.STDOUT, shell=True, env=None, cwd=None, ).and_return(flexmock(stdout=None)).once() flexmock(module).should_receive('log_output') output = module.execute_command(full_command, shell=True) assert output is None def test_execute_command_calls_full_command_with_extra_environment(): full_command = ['foo', 'bar'] flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('Popen').with_args( full_command, stdin=None, stdout=module.subprocess.PIPE, stderr=module.subprocess.STDOUT, shell=False, env={'a': 'b', 'c': 'd'}, cwd=None, ).and_return(flexmock(stdout=None)).once() flexmock(module).should_receive('log_output') output = module.execute_command(full_command, extra_environment={'c': 'd'}) assert output is None def test_execute_command_calls_full_command_with_working_directory(): full_command = ['foo', 'bar'] flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('Popen').with_args( full_command, stdin=None, stdout=module.subprocess.PIPE, stderr=module.subprocess.STDOUT, shell=False, env=None, cwd='/working', ).and_return(flexmock(stdout=None)).once() flexmock(module).should_receive('log_output') output = module.execute_command(full_command, working_directory='/working') assert output is None def test_execute_command_captures_output(): full_command = ['foo', 'bar'] expected_output = '[]' flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('check_output').with_args( full_command, shell=False, env=None, cwd=None ).and_return(flexmock(decode=lambda: expected_output)).once() output = module.execute_command(full_command, output_log_level=None) assert output == expected_output def test_execute_command_captures_output_with_shell(): full_command = ['foo', 'bar'] expected_output = '[]' flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('check_output').with_args( full_command, shell=True, env=None, cwd=None ).and_return(flexmock(decode=lambda: expected_output)).once() output = module.execute_command(full_command, output_log_level=None, shell=True) assert output == expected_output def test_execute_command_captures_output_with_extra_environment(): full_command = ['foo', 'bar'] expected_output = '[]' flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('check_output').with_args( full_command, shell=False, env={'a': 'b', 'c': 'd'}, cwd=None ).and_return(flexmock(decode=lambda: expected_output)).once() output = module.execute_command( full_command, output_log_level=None, shell=False, extra_environment={'c': 'd'} ) assert output == expected_output def test_execute_command_captures_output_with_working_directory(): full_command = ['foo', 'bar'] expected_output = '[]' flexmock(module.os, environ={'a': 'b'}) flexmock(module.subprocess).should_receive('check_output').with_args( full_command, shell=False, env=None, cwd='/working' ).and_return(flexmock(decode=lambda: expected_output)).once() output = module.execute_command( full_command, output_log_level=None, shell=False, working_directory='/working' ) assert output == expected_output def test_execute_command_without_capture_does_not_raise_on_success(): flexmock(module.subprocess).should_receive('check_call').and_raise( module.subprocess.CalledProcessError(0, 'borg init') ) module.execute_command_without_capture(('borg', 'init')) def test_execute_command_without_capture_does_not_raise_on_warning(): flexmock(module).should_receive('exit_code_indicates_error').and_return(False) flexmock(module.subprocess).should_receive('check_call').and_raise( module.subprocess.CalledProcessError(1, 'borg init') ) module.execute_command_without_capture(('borg', 'init')) def test_execute_command_without_capture_raises_on_error(): flexmock(module).should_receive('exit_code_indicates_error').and_return(True) flexmock(module.subprocess).should_receive('check_call').and_raise( module.subprocess.CalledProcessError(2, 'borg init') ) with pytest.raises(module.subprocess.CalledProcessError): module.execute_command_without_capture(('borg', 'init')) borgmatic-1.5.1/tests/unit/test_logger.py000066400000000000000000000235441361605604600205150ustar00rootroot00000000000000import logging import pytest from flexmock import flexmock from borgmatic import logger as module @pytest.mark.parametrize('bool_val', (True, 'yes', 'on', '1', 'true', 'True', 1)) def test_to_bool_parses_true_values(bool_val): assert module.to_bool(bool_val) @pytest.mark.parametrize('bool_val', (False, 'no', 'off', '0', 'false', 'False', 0)) def test_to_bool_parses_false_values(bool_val): assert not module.to_bool(bool_val) def test_to_bool_passes_none_through(): assert module.to_bool(None) is None def test_interactive_console_false_when_not_isatty(capsys): with capsys.disabled(): flexmock(module.sys.stderr).should_receive('isatty').and_return(False) assert module.interactive_console() is False def test_interactive_console_false_when_TERM_is_dumb(capsys): with capsys.disabled(): flexmock(module.sys.stderr).should_receive('isatty').and_return(True) flexmock(module.os.environ).should_receive('get').with_args('TERM').and_return('dumb') assert module.interactive_console() is False def test_interactive_console_true_when_isatty_and_TERM_is_not_dumb(capsys): with capsys.disabled(): flexmock(module.sys.stderr).should_receive('isatty').and_return(True) flexmock(module.os.environ).should_receive('get').with_args('TERM').and_return('smart') assert module.interactive_console() is True def test_should_do_markup_respects_no_color_value(): assert module.should_do_markup(no_color=True, configs={}) is False def test_should_do_markup_respects_config_value(): assert ( module.should_do_markup(no_color=False, configs={'foo.yaml': {'output': {'color': False}}}) is False ) def test_should_do_markup_prefers_any_false_config_value(): assert ( module.should_do_markup( no_color=False, configs={ 'foo.yaml': {'output': {'color': True}}, 'bar.yaml': {'output': {'color': False}}, }, ) is False ) def test_should_do_markup_respects_PY_COLORS_environment_variable(): flexmock(module.os.environ).should_receive('get').and_return('True') flexmock(module).should_receive('to_bool').and_return(True) assert module.should_do_markup(no_color=False, configs={}) is True def test_should_do_markup_prefers_no_color_value_to_config_value(): assert ( module.should_do_markup(no_color=True, configs={'foo.yaml': {'output': {'color': True}}}) is False ) def test_should_do_markup_prefers_config_value_to_PY_COLORS(): flexmock(module.os.environ).should_receive('get').and_return('True') flexmock(module).should_receive('to_bool').and_return(True) assert ( module.should_do_markup(no_color=False, configs={'foo.yaml': {'output': {'color': False}}}) is False ) def test_should_do_markup_prefers_no_color_value_to_PY_COLORS(): flexmock(module.os.environ).should_receive('get').and_return('True') flexmock(module).should_receive('to_bool').and_return(True) assert module.should_do_markup(no_color=True, configs={}) is False def test_should_do_markup_respects_interactive_console_value(): flexmock(module.os.environ).should_receive('get').and_return(None) flexmock(module).should_receive('interactive_console').and_return(True) assert module.should_do_markup(no_color=False, configs={}) is True def test_should_do_markup_prefers_PY_COLORS_to_interactive_console_value(): flexmock(module.os.environ).should_receive('get').and_return('True') flexmock(module).should_receive('to_bool').and_return(True) flexmock(module).should_receive('interactive_console').and_return(False) assert module.should_do_markup(no_color=False, configs={}) is True def test_multi_stream_handler_logs_to_handler_for_log_level(): error_handler = flexmock() error_handler.should_receive('emit').once() info_handler = flexmock() multi_handler = module.Multi_stream_handler( {module.logging.ERROR: error_handler, module.logging.INFO: info_handler} ) multi_handler.emit(flexmock(levelno=module.logging.ERROR)) def test_console_color_formatter_format_includes_log_message(): plain_message = 'uh oh' record = flexmock(levelno=logging.CRITICAL, msg=plain_message) colored_message = module.Console_color_formatter().format(record) assert colored_message != plain_message assert plain_message in colored_message def test_color_text_does_not_raise(): module.color_text(module.colorama.Fore.RED, 'hi') def test_color_text_without_color_does_not_raise(): module.color_text(None, 'hi') def test_configure_logging_probes_for_log_socket_on_linux(): flexmock(module).should_receive('Multi_stream_handler').and_return( flexmock(setFormatter=lambda formatter: None, setLevel=lambda level: None) ) flexmock(module).should_receive('Console_color_formatter') flexmock(module).should_receive('interactive_console').and_return(False) flexmock(module.logging).should_receive('basicConfig').with_args( level=logging.INFO, handlers=tuple ) flexmock(module.os.path).should_receive('exists').with_args('/dev/log').and_return(True) syslog_handler = logging.handlers.SysLogHandler() flexmock(module.logging.handlers).should_receive('SysLogHandler').with_args( address='/dev/log' ).and_return(syslog_handler).once() module.configure_logging(logging.INFO) def test_configure_logging_probes_for_log_socket_on_macos(): flexmock(module).should_receive('Multi_stream_handler').and_return( flexmock(setFormatter=lambda formatter: None, setLevel=lambda level: None) ) flexmock(module).should_receive('Console_color_formatter') flexmock(module).should_receive('interactive_console').and_return(False) flexmock(module.logging).should_receive('basicConfig').with_args( level=logging.INFO, handlers=tuple ) flexmock(module.os.path).should_receive('exists').with_args('/dev/log').and_return(False) flexmock(module.os.path).should_receive('exists').with_args('/var/run/syslog').and_return(True) syslog_handler = logging.handlers.SysLogHandler() flexmock(module.logging.handlers).should_receive('SysLogHandler').with_args( address='/var/run/syslog' ).and_return(syslog_handler).once() module.configure_logging(logging.INFO) def test_configure_logging_sets_global_logger_to_most_verbose_log_level(): flexmock(module).should_receive('Multi_stream_handler').and_return( flexmock(setFormatter=lambda formatter: None, setLevel=lambda level: None) ) flexmock(module).should_receive('Console_color_formatter') flexmock(module.logging).should_receive('basicConfig').with_args( level=logging.DEBUG, handlers=tuple ).once() flexmock(module.os.path).should_receive('exists').and_return(False) module.configure_logging(console_log_level=logging.INFO, syslog_log_level=logging.DEBUG) def test_configure_logging_skips_syslog_if_not_found(): flexmock(module).should_receive('Multi_stream_handler').and_return( flexmock(setFormatter=lambda formatter: None, setLevel=lambda level: None) ) flexmock(module).should_receive('Console_color_formatter') flexmock(module.logging).should_receive('basicConfig').with_args( level=logging.INFO, handlers=tuple ) flexmock(module.os.path).should_receive('exists').and_return(False) flexmock(module.logging.handlers).should_receive('SysLogHandler').never() module.configure_logging(console_log_level=logging.INFO) def test_configure_logging_skips_syslog_if_interactive_console(): flexmock(module).should_receive('Multi_stream_handler').and_return( flexmock(setFormatter=lambda formatter: None, setLevel=lambda level: None) ) flexmock(module).should_receive('Console_color_formatter') flexmock(module).should_receive('interactive_console').and_return(True) flexmock(module.logging).should_receive('basicConfig').with_args( level=logging.INFO, handlers=tuple ) flexmock(module.os.path).should_receive('exists').with_args('/dev/log').and_return(True) flexmock(module.logging.handlers).should_receive('SysLogHandler').never() module.configure_logging(console_log_level=logging.INFO) def test_configure_logging_to_logfile_instead_of_syslog(): flexmock(module).should_receive('Multi_stream_handler').and_return( flexmock(setFormatter=lambda formatter: None, setLevel=lambda level: None) ) # syslog skipped in non-interactive console if --log-file argument provided flexmock(module).should_receive('interactive_console').and_return(False) flexmock(module.logging).should_receive('basicConfig').with_args( level=logging.DEBUG, handlers=tuple ) flexmock(module.os.path).should_receive('exists').with_args('/dev/log').and_return(True) flexmock(module.logging.handlers).should_receive('SysLogHandler').never() file_handler = logging.handlers.WatchedFileHandler('/tmp/logfile') flexmock(module.logging.handlers).should_receive('WatchedFileHandler').with_args( '/tmp/logfile' ).and_return(file_handler).once() module.configure_logging( console_log_level=logging.INFO, log_file_log_level=logging.DEBUG, log_file='/tmp/logfile' ) def test_configure_logging_skips_logfile_if_argument_is_none(): flexmock(module).should_receive('Multi_stream_handler').and_return( flexmock(setFormatter=lambda formatter: None, setLevel=lambda level: None) ) # No WatchedFileHandler added if argument --log-file is None flexmock(module).should_receive('interactive_console').and_return(False) flexmock(module.logging).should_receive('basicConfig').with_args( level=logging.INFO, handlers=tuple ) flexmock(module.os.path).should_receive('exists').and_return(False) flexmock(module.logging.handlers).should_receive('WatchedFileHandler').never() module.configure_logging(console_log_level=logging.INFO, log_file=None) borgmatic-1.5.1/tests/unit/test_verbosity.py000066400000000000000000000015451361605604600212610ustar00rootroot00000000000000import logging from flexmock import flexmock from borgmatic import verbosity as module def insert_logging_mock(log_level): ''' Mock the isEnabledFor from Python logging. ''' logging = flexmock(module.logging.Logger) logging.should_receive('isEnabledFor').replace_with(lambda level: level >= log_level) logging.should_receive('getEffectiveLevel').replace_with(lambda: log_level) def test_verbosity_to_log_level_maps_known_verbosity_to_log_level(): assert module.verbosity_to_log_level(module.VERBOSITY_SOME) == logging.INFO assert module.verbosity_to_log_level(module.VERBOSITY_LOTS) == logging.DEBUG assert module.verbosity_to_log_level(module.VERBOSITY_ERROR) == logging.ERROR def test_verbosity_to_log_level_maps_unknown_verbosity_to_warning_level(): assert module.verbosity_to_log_level('my pants') == logging.WARNING borgmatic-1.5.1/tox.ini000066400000000000000000000013331361605604600150070ustar00rootroot00000000000000[tox] envlist = py35,py36,py37,py38 skip_missing_interpreters = True skipsdist = True minversion = 3.14.1 [testenv] usedevelop = True deps = -rtest_requirements.txt whitelist_externals = find sh passenv = COVERAGE_FILE commands = pytest {posargs} py36,py37,py38: black --check . isort --recursive --check-only --settings-path setup.cfg . flake8 borgmatic tests [testenv:black] commands = black {posargs} . [testenv:test] commands = pytest {posargs} [testenv:end-to-end] deps = -rtest_requirements.txt passenv = COVERAGE_FILE commands = pytest {posargs} --no-cov tests/end-to-end [testenv:isort] deps = {[testenv]deps} commands = isort {posargs:--recursive} --settings-path setup.cfg .