pax_global_header00006660000000000000000000000064136056240400014512gustar00rootroot0000000000000052 comment=43184e272f7d7c0e73acbb774eab8683a5439aff bundlewrap-3.8.0/000077500000000000000000000000001360562404000136655ustar00rootroot00000000000000bundlewrap-3.8.0/.gitignore000066400000000000000000000000241360562404000156510ustar00rootroot00000000000000/dist/ /docs/build/ bundlewrap-3.8.0/.travis.yml000066400000000000000000000007271360562404000160040ustar00rootroot00000000000000language: python python: - 2.7 - 3.5 - 3.6 - 3.7 dist: xenial # required for Python 3.7 https://github.com/travis-ci/travis-ci/issues/9815 services: - postgresql install: - pip install . before_script: - ssh-keygen -f ~/.ssh/id_rsa -N "" - cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys - ssh -o StrictHostKeyChecking=no localhost id script: - py.test tests notifications: irc: channels: - "irc.freenode.org#bundlewrap" use_notice: true skip_join: true bundlewrap-3.8.0/AUTHORS000066400000000000000000000004261360562404000147370ustar00rootroot00000000000000# By adding your name to this file you agree to the Copyright Assignment # Agreement found in the CAA.md file in this repository. Torsten Rehn Peter Hofmann Tim Buchwaldt Rico Ullmann bundlewrap-3.8.0/CAA.md000066400000000000000000000217141360562404000146000ustar00rootroot00000000000000# BundleWrap Individual Contributor Copyright Assignment Agreement Thank you for your interest in contributing to the BundleWrap open-source project, currently owned and represented by [Torsten Rehn](mailto:torsten@rehn.email) ("We" or "Us"). This contributor agreement ("Agreement") documents the rights granted by contributors to Us. To make this document effective, please sign it and send it to Us by email or electronic submission, following the instructions at [http://docs.bundlewrap.org/misc/contributing](http://docs.bundlewrap.org/misc/contributing). This is a legally binding document, so please read it carefully before agreeing to it. The Agreement may cover more than one software project managed by Us. ## 1. Definitions "You" means the individual who Submits a Contribution to Us. "Contribution" means any work of authorship that is Submitted by You to Us in which You own or assert ownership of the Copyright. If You do not own the Copyright in the entire work of authorship, please follow the instructions in [http://docs.bundlewrap.org/misc/contributing](http://docs.bundlewrap.org/misc/contributing). "Copyright" means all rights protecting works of authorship owned or controlled by You, including copyright, moral and neighboring rights, as appropriate, for the full term of their existence including any extensions by You. "Material" means the work of authorship which is made available by Us to third parties. When this Agreement covers more than one software project, the Material means the work of authorship to which the Contribution was Submitted. After You Submit the Contribution, it may be included in the Material. "Submit" means any form of electronic, verbal, or written communication sent to Us or our representatives, including but not limited to electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, Us for the purpose of discussing and improving the Material, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution." "Submission Date" means the date on which You Submit a Contribution to Us. "Effective Date" means the date You execute this Agreement or the date You first Submit a Contribution to Us, whichever is earlier. ## 2. Grant of Rights ### 2.1 Copyright Assignment 1) At the time the Contribution is Submitted, You assign to Us all right, title, and interest worldwide in all Copyright covering the Contribution; provided that this transfer is conditioned upon compliance with Section 2.3. 2) To the extent that any of the rights in Section 2.1.1 cannot be assigned by You to Us, You grant to Us a perpetual, worldwide, exclusive, royalty-free, transferable, irrevocable license under such non-assigned rights, with rights to sublicense through multiple tiers of sublicensees, to practice such non-assigned rights, including, but not limited to, the right to reproduce, modify, display, perform and distribute the Contribution; provided that this license is conditioned upon compliance with Section 2.3. 3) To the extent that any of the rights in Section 2.1.1 can neither be assigned nor licensed by You to Us, You irrevocably waive and agree never to assert such rights against Us, any of our successors in interest, or any of our licensees, either direct or indirect; provided that this agreement not to assert is conditioned upon compliance with Section 2.3. 4) Upon such transfer of rights to Us, to the maximum extent possible, We immediately grant to You a perpetual, worldwide, non-exclusive, royalty-free, transferable, irrevocable license under such rights covering the Contribution, with rights to sublicense through multiple tiers of sublicensees, to reproduce, modify, display, perform, and distribute the Contribution. The intention of the parties is that this license will be as broad as possible and to provide You with rights as similar as possible to the owner of the rights that You transferred. This license back is limited to the Contribution and does not provide any rights to the Material. ### 2.2 Patent License For patent claims including, without limitation, method, process, and apparatus claims which You own, control or have the right to grant, now or in the future, You grant to Us a perpetual, worldwide, non-exclusive, transferable, royalty-free, irrevocable patent license, with the right to sublicense these rights to multiple tiers of sublicensees, to make, have made, use, sell, offer for sale, import and otherwise transfer the Contribution and the Contribution in combination with the Material (and portions of such combination). This license is granted only to the extent that the exercise of the licensed rights infringes such patent claims; and provided that this license is conditioned upon compliance with Section 2.3. ### 2.3 Outbound License As a condition on the grant of rights in Sections 2.1 and 2.2, We agree to license the Contribution only under the terms of the license or licenses which We are using on the Submission Date for the Material (including any rights to adopt any future version of a license if permitted). ### 2.4 Moral Rights If moral rights apply to the Contribution, to the maximum extent permitted by law, You waive and agree not to assert such moral rights against Us or our successors in interest, or any of our licensees, either direct or indirect. ### 2.5 Our Rights You acknowledge that We are not obligated to use Your Contribution as part of the Material and may decide to include any Contribution We consider appropriate. ### 2.6 Reservation of Rights Any rights not expressly assigned or licensed under this section are expressly reserved by You. ## 3. Agreement You confirm that: 1) You have the legal authority to enter into this Agreement. 2) You own the Copyright and patent claims covering the Contribution which are required to grant the rights under Section 2. 3) The grant of rights under Section 2 does not violate any grant of rights which You have made to third parties, including Your employer. If You are an employee, You have had Your employer approve this Agreement or sign the Entity version of this document. If You are less than eighteen years old, please have Your parents or guardian sign the Agreement. 4) You have followed the instructions in [http://docs.bundlewrap.org/misc/contributing](http://docs.bundlewrap.org/misc/contributing), if You do not own the Copyright in the entire work of authorship Submitted. ## 4. Disclaimer EXCEPT FOR THE EXPRESS WARRANTIES IN SECTION 3, THE CONTRIBUTION IS PROVIDED "AS IS". MORE PARTICULARLY, ALL EXPRESS OR IMPLIED WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE EXPRESSLY DISCLAIMED BY YOU TO US AND BY US TO YOU. TO THE EXTENT THAT ANY SUCH WARRANTIES CANNOT BE DISCLAIMED, SUCH WARRANTY IS LIMITED IN DURATION TO THE MINIMUM PERIOD PERMITTED BY LAW. ## 5. Consequential Damage Waiver TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU OR US BE LIABLE FOR ANY LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF DATA, INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL AND EXEMPLARY DAMAGES ARISING OUT OF THIS AGREEMENT REGARDLESS OF THE LEGAL OR EQUITABLE THEORY (CONTRACT, TORT OR OTHERWISE) UPON WHICH THE CLAIM IS BASED. ## 6. Miscellaneous ### 6.1 This Agreement will be governed by and construed in accordance with the laws of Germany excluding its conflicts of law provisions. Under certain circumstances, the governing law in this section might be superseded by the United Nations Convention on Contracts for the International Sale of Goods ("UN Convention") and the parties intend to avoid the application of the UN Convention to this Agreement and, thus, exclude the application of the UN Convention in its entirety to this Agreement. ### 6.2 This Agreement sets out the entire agreement between You and Us for Your Contributions to Us and overrides all other agreements or understandings. ### 6.3 If You or We assign the rights or obligations received through this Agreement to a third party, as a condition of the assignment, that third party must agree in writing to abide by all the rights and obligations in the Agreement. ### 6.4 The failure of either party to require performance by the other party of any provision of this Agreement in one situation shall not affect the right of a party to require such performance at any time in the future. A waiver of performance under a provision in one situation shall not be considered a waiver of the performance of the provision in the future or a waiver of the provision in its entirety. ### 6.5 If any provision of this Agreement is found void and unenforceable, such provision will be replaced to the extent possible with a provision that comes closest to the meaning of the original provision and which is enforceable. The terms and conditions set forth in this Agreement shall apply notwithstanding any failure of essential purpose of this Agreement or any limited remedy to the maximum extent possible under law. bundlewrap-3.8.0/CHANGELOG.md000066400000000000000000000477261360562404000155160ustar00rootroot00000000000000# 3.8.0 2020-01-09 * `k8s_raw`: added support for items without a namespace * `k8s_raw`: fixed overriding resource name in YAML * `k8s_raw`: allow using builtin item types if there are no actual conflicts * decryption keys can now be set within encrypted files * improved detection of incorrect metadata processor usage * fixed excessive skipping of items because of concurrency dependencies * fixed `preceded_by` not working for actions # 3.7.0 2019-10-07 * Faults are now accepted as item attribute values * Filter objects, iterators and such can now be used as item attribute values * `BW_VAULT_DUMMY_MODE` will now yield dummy passwords of requested length * added `repo.vault.random_bytes_as_base64_for()` # 3.6.2 2019-07-25 * fixed `None` not being accepted as a file/directory mode * fixed overriding resource name in k8s manifests # 3.6.1 2019-03-12 * Faults can now be sorted * fixed detection of runtime-enabled `svc_systemd` * fixed resolving nested Faults # 3.6.0 2019-02-27 * added `bw apply --only` * added `Fault.b64encode()` * added support for using Faults in k8s manifests * improved display of some skipped items * improved error handling during `bw apply` * improved handling of offline nodes in `bw verify` * fixed corrupted hard lock warning * fix interactively overwriting symlinks/dirs # 3.5.3 2018-12-27 * added error message when trying to access node bundles from `members_add/remove` * improved performance for file verification * fixed symlinks being mistaken for directories in some circumstances # 3.5.2 2018-12-11 * fixed IO activation/deactivation when using bw as a library * fixed `atomic()` being removed prematurely during metadata processing # 3.5.1 2018-07-08 * added support for Python 3.7 * fixed merged metadata not overwriting atomic() values # 3.5.0 2018-06-12 * added `template_node` node attribute * actions are now included in `bw verify` * improved error message for KeyErrors in Mako templates * fixed hashing for filenames with escaped characters * fixed AttributeError when reverse-depending on `bundle:` items # 3.4.0 2018-05-02 * added k8s_clusterrole items * added k8s_clusterrolebinding items * added k8s_crd items * added k8s_networkpolicy items * added k8s_raw items * added k8s_role items * added k8s_rolebinding items * added Kubernetes item preview with `bw items -f` * improved handling of exceptions during `bw verify` and `bw apply` * improved progress display during `bw run` # 3.3.0 2018-03-09 * added experimental support for Kubernetes * some hooks can now raise an exception to skip nodes * fixed ED25519 public keys not being recognized as text files * fixed package names with hyphens for pkg_openbsd * fixed diff for user groups # 3.2.1 2018-01-08 * fixed metadata key filter for `bw metadata --blame` * fixed pkg_openbsd reported incorrectly as having having wrong flavor installed * fixed crash when declining actions interactively # 3.2.0 2018-01-01 * items skipped because of "unless" or "not triggered" are no longer shown during `bw apply` * added `BW_SCP_ARGS` * added `bw metadata --blame` * added `bw test --metadata-keys` * added flavor support to pkg_openbsd * fixed changing symlink targets if previous target is a dir * fixed display of some item attributes during `bw apply` and `bw verify` * fixed handling of postgres DBs/roles with hyphens in them # 3.1.1 2017-10-24 * will now detect bad wrappers around metadata processors * fixed crash in `bw plot` * fixed cut off status lines # 3.1.0 2017-10-10 * added pkg_opkg items * added `bw test -s` * improved error messages for unknown reverse triggers * fixed hash_method md5 on user items * fixed cursor sometimes not being restored # 3.0.3 2017-10-04 * dropped support for Python 3.3 * fixed `bw` trying to hide the cursor without a TTY present * fixed `ImportError` with Python 2.7 # 3.0.2 2017-10-04 * improved status line * `bw test` is now more responsive to SIGINT * sorted bundle and group lists in `bw nodes` output * fixed an issue with symlinks failing if fixing both target and ownership * fixed `bw run` with dummy nodes * fixed progress exceeding 100% during `bw apply` * fixed progress intermittently being stuck at 100% during `bw test` * fixed incorrent display of fixed item properties * fixed `bw metadata --table` being unable to show None * fixed `bw metadata` hiding KeyErrors # 3.0.1 2017-09-25 * fixed `bw run` * fixed `bw test -e` # 3.0.0 2017-09-24 * new metadata processor API and options (BACKWARDS INCOMPATIBLE) * files, directories, and symlinks now have defaults for owner, group, and mode (BACKWARDS INCOMPATIBLE) * overhauled options and output of `bw groups` (BACKWARDS INCOMPATIBLE) * overhauled options and output of `bw nodes` (BACKWARDS INCOMPATIBLE) * overhauled options and output of `bw run` (BACKWARDS INCOMPATIBLE) * overhauled options of `bw test` (BACKWARDS INCOMPATIBLE) * svc_systemd services are now 'enabled' by default (BACKWARDS INCOMPATIBLE) * `bw items --file-preview` no longer uses a separate file path argument (BACKWARDS INCOMPATIBLE) * removed `bw apply --profiling` (BACKWARDS INCOMPATIBLE) * removed `Item.display_keys()` (BACKWARDS INCOMPATIBLE) * changed return value of `Item.display_dicts()` (BACKWARDS INCOMPATIBLE) * changed `Item.BLOCK_CONCURRENT` into a class method (BACKWARDS INCOMPATIBLE) * removed `repo.vault.format()` (BACKWARDS INCOMPATIBLE) * removed env vars: BWADDHOSTKEYS, BWCOLORS, BWITEMWORKERS, BWNODEWORKERS (BACKWARDS INCOMPATIBLE) # 2.20.1 2017-09-21 * improved performance of metadata processors * pkg_* and svc_* items no longer throw exceptions when their commands fail * fixed BW_DEBUG_LOG_DIR with `bw debug` * fixed 'precedes' attribute for actions # 2.20.0 2017-08-15 * added progress info shown on SIGQUIT (CTRL+\\) * added pkg_snap items * fixed checking for dummy nodes during `bw lock` * fixed handling of missing Faults for actions * fixed handling of missing Faults for `bw items -w` # 2.19.0 2017-07-05 * actions can now receive data over stdin * added `Node.magic_number` * added `bw apply --resume-file` * added hooks for `bw lock` * added `bw metadata --table` # 2.18.1 2017-06-01 * fixed display of comments for actions # 2.18.0 2017-05-22 * added encoding and collation to postgres_db items * added the 'comment' attribute for all items * fixed group deletion * fixed accidental modification of lists in statedicts # 2.17.1 2017-04-19 * fixed parent groups not being removed by subgroups' members_remove * fixed `bw lock` trying to connect to dummy nodes # 2.17.0 2017-03-26 * pkg_apt: added start_service attribute * pkg_apt: added support for multiarch packages * improved reporting of exceptions in metadata processors * fixed package cache leaking across nodes # 2.16.0 2017-02-23 * added `BW_TABLE_STYLE` * added more Unicode tables * added number of bundles and metadata processors to `bw stats` * added oraclelinux to `OS_FAMILY_REDHAT` * added option to ignore running status of systemd services * improved circular dependency debugging * improved reporting of dependency errors * fixed avoidance of circular dependencies * fixed dealing with SUID and SGID on directories * fixed debug logging on Python 2.7 * fixed duplicates in `Group.subgroups` * fixed handling of subgroup patterns in `bw plot group` # 2.15.0 2017-01-19 * added item and attribute arguments to `bw items` * added orphaned bundle warnings to `bw test` * fixed regression when removing soft locks # 2.14.0 2017-01-16 * added key filtering to `bw metadata` * added `repo.vault.human_password_for()` * added `BW_REPO_PATH` and `bw --repo-path` * quotes are no longer required around commands with `bw run` * fixed intermittent circular dependencies with multiple custom items using BLOCK_CONCURRENT * fixed exception when removing non-existent soft lock # 2.13.0 2017-01-05 * added tuple return option to metadata processors * improved CLI output in various places * improved performance during dependency processing * improved performance when checking packages * fixed hashing of metadata containing sets * fixed exception with `svc_upstart` when service doesn't exist # 2.12.2 2016-12-23 * added support for Python 3.6 * changed diff line length limit from 128 to 1024 characters * fixed deadlock in Group.members_remove * fixed unknown subgroups not being detected properly # 2.12.1 2016-12-20 * fixed exception when changing owner of postgres databases * fixed postgres roles requiring a password even when deleted * fixed incorrect exit codes in some situations with `bw test` # 2.12.0 2016-11-28 * added `BW_DEBUG_LOG_DIR` * improved reporting of action failures * fixed `bw plot groups` and `bw plot groups-for-node` * fixed access to partial metadata in `Group.members_add` and `_remove` # 2.11.0 2016-11-14 * added `bw nodes --inline` * added `Group.members_add` and `.members_remove` * fixed symlinks not overwriting other path types * fixed `precedes` and `triggers` for bundle, tag and type items * fixed diffs for sets and tuples # 2.10.0 2016-11-03 * added pkg_dnf items * added rudimentary string operations on Faults * added Fault documentation * added `bw test --config-determinism` and `--metadata-determinism` * improved debugging facilities for metadata processor loops * improved handling and reporting of missing Faults # 2.9.1 2016-10-18 * fixed `bw verify` without `-S` * fixed asking for changes to directory items # 2.9.0 2016-10-17 * added directory purging * added `bw --adhoc-nodes` * improve handling of unknown nodes/groups * improvements to `bw nodes` # 2.8.0 2016-09-12 * added `BW_HARDLOCK_EXPIRY` env var * added `bw hash --group` * added `subgroup_patterns` * added `bw test --ignore-missing-faults` * added `node.cmd_wrapper_inner` and `_outer` * added `node.os_version` * fixed exception handling under Python 2 * fixed partial metadata not being completed in some cases # 2.7.1 2016-07-15 * improved responsiveness to SIGINT during metadata generation * fixed SIGINT handling on Python 2.7 # 2.7.0 2016-07-15 * `bw lock show` can now show entire groups * `bw` can now be invoked from any subdirectory of a repository * added `bw hash --metadata` * added `bw nodes --attrs` * added `repo.vault.format` * added graceful handling of SIGINT * added log level indicator to debug output * added `node.dummy` attribute * added `BW_SSH_ARGS` environment variable * `bash` is no longer required on nodes * `node.os` and `node.use_shadow_passwords` can now be set at the group level * sets are now allowed in metadata * optimized execution of metadata processors * fixed `bw apply --force` with unlocked nodes * fixed `bw test` not detecting merge of lists in unrelated groups' metadata * fixed installation of some pkg_openbsd * fixed piping into `bw apply -i` * fixed handling user names with non-ASCII characters * fixed skipped and failed items sometimes being handled incorrectly * fixed error with autoskipped triggered items * fixed skip reason for some soft locked items # 2.6.1 2016-05-29 * fixed accidentally changed default salt for user items # 2.6.0 2016-05-29 * added support for OpenBSD packages and services * added soft locking mechanism * added `enabled` option for `svc_systemd` * fixed running compound commands # 2.5.2 2016-05-04 * fixed compatibility with some exotic node shells * fixed quitting at question prompts * fixed creating files with content_type 'any' # 2.5.1 2016-04-07 * fixed false positive on metadata collision check # 2.5.0 2016-04-04 * improved performance and memory usage * added metadata conflict detection to `bw test` * added metadata type validation * added `BW_VAULT_DUMMY_MODE` * added q(uit) option to questions * output disabled by default when using as a library * fixed `bw hash -d` * fixed excessive numbers of open files * fixed partial metadata access from metadata processors # 2.4.0 2016-03-20 * added `bw plot group` * added `bw plot groups-for-node` * `bw` will now check requirements.txt in your repo before doing anything * improved output of `--help` * metadata processors now have access to partial node metadata while it is being compiled * fixed `bw test` when using more than the default number of node workers * fixed passing Faults to `postgres_role` and `users` * fixed detection of non-existent paths on CentOS and others # 2.3.1 2016-03-15 * fixed handling of 'generate' keys for `repo.vault` # 2.3.0 2016-03-15 * added `repo.vault` for handling secrets * circular dependencies are now detected by `bw test` * fixed handling of broken pipes in internal subprocesses * fixed previous input being read when asking a question * fixed reading non-ASCII templates on systems with ASCII locale * `bw apply` and `bw verify` now exit with return code 1 if there are errors # 2.2.0 2016-03-02 * added item tagging * added `bw apply --skip` * fixed newline warning on long diff files * fixed calling `bw` without arguments # 2.1.0 2016-02-25 * added `bw stats` * added `bw items --file-preview` * added hooks for `bw test` * reason for skipping an item is now displayed in regular output * fixed exception handling for invalid cdicts/sdicts * fixed handling of SSH errors * fixed broken diffs caused by partial file downloads * fixed interactive prompts sometimes not reading input correctly # 2.0.1 2016-02-22 * fixed display of failed actions * updated display of interactive lock override prompt * improved robustness of internal output subsystem # 2.0.0 2016-02-22 * added support for Python 3.3+ * switched from Fabric/Paramiko to OpenSSH * removed SSH and sudo passwords **(BACKWARDS INCOMPATIBLE)** * metadata is now merged recursively **(BACKWARDS INCOMPATIBLE)** * file items: the source attribute now has a default **(BACKWARDS INCOMPATIBLE)** * file items: the default content_type is now text **(BACKWARDS INCOMPATIBLE)** * reworked command line options for `bw verify` **(BACKWARDS INCOMPATIBLE)** * `cascade_skip` now defaults to `False` if the item is triggered or uses `unless` **(BACKWARDS INCOMPATIBLE)** * `bw verify` and `bw apply` now show incorrect/fixed/failed attributes * `bw apply` now uses a status line to show current activity * generally improved output formatting # 1.6.0 2016-02-22 * added `bw migrate` **(will be removed in 2.0.0)** * added warnings for upgrading to 2.0.0 **(will be removed in 2.0.0)** # 1.5.1 2015-06-11 * clean up local lock files * fixed detection of some types of directories * fixed exception spam when trying to load internal attributes as libs # 1.5.0 2015-05-10 * added postgres_db and postgres_role items * added `bw verify --only-needs-fixing` * added `bw verify --summary` * added `Repository.nodes_in_group()` * added `verify_with` attribute for file items * libs now have access to `repo_path` * user items: fixed asking for password hash change * file items: fixed `bw items -w` with `content_type: 'any'` * improved various error messages # 1.4.0 2015-03-02 * added virtualenv support for pkg_pip * added reverse syntax for triggers and preceded_by * lots of fixes and internal improvements around preceded_by # 1.3.0 2014-12-31 * added pkg_pip items * added pkg_yum items * added pkg_zypper items * added preceded_by item attribute * fixed detection of non-existing files on CentOS/RHEL * fixed detection of special files on Arch Linux * fixed handling UTF-8 output of failed commands # 1.2.2 2014-10-27 * fixed item classes not being restored after repo serialization # 1.2.1 2014-10-21 * fixed a critical bug in bundle serialization # 1.2.0 2014-10-19 * added item generators * added `bw test --plugin-conflict-error` * added `bw debug -c` * improved unicode handling * fixed logging issues # 1.1.0 2014-08-11 * added metadata processors * added `bw metadata` * added `bw apply --profiling` * added Repository.nodes_in_all_groups() * added Repository.nodes_in_any_group() * added the data subdirectory * improved various error messages # 1.0.0 2014-07-19 * API will now remain stable until 2.0.0 * added hooks for actions * added support for Jinja2 templates * fixed some CLI commands not terminating correctly # 0.14.0 2014-07-13 * files, directories and symlinks don't care about ownership and mode by default **(BACKWARDS INCOMPATIBLE)** * Mako file templates can now use include # 0.13.0 2014-06-19 * added password-based SSH/sudo authentication * fixed symlink items not checking existing link targets * fixed exception when triggering skipped items * output is now prefixed with `node:bundle:item_type:item_name` * `bw repo debug` is now a top-level command **(BACKWARDS INCOMPATIBLE)** * `bw repo plot` is now a top-level command **(BACKWARDS INCOMPATIBLE)** * `bw repo test` is now a top-level command **(BACKWARDS INCOMPATIBLE)** # 0.12.0 2014-05-11 * added plugins * added group metadata * user and group attributes are now optional * user groups may no longer contain primary group **(BACKWARDS INCOMPATIBLE)** * improvements to logging and output * fixed a critical bug preventing per-node customization of bundles * fixed pkg_apt choking on interactive dpkg prompts * fixed hashing of plaintext user passwords without salt # 0.11.2 2014-04-02 * packaging fixes only # 0.11.1 2014-04-02 * packaging fixes only # 0.11.0 2014-03-23 * renamed builtin item attribute 'depends' to 'needs' **(BACKWARDS INCOMPATIBLE)** * removed PARALLEL_APPLY on custom items in favor of BLOCK_CONCURRENT **(BACKWARDS INCOMPATIBLE)** * added builtin item attribute 'needed_by' * added canned actions for services * added deletion of files, groups and users * simplified output of `bw apply` * `bw repo test` now also verifies dependencies * fixed `bw repo test` for files without a template * fixed triggered actions being run every time * various fixes and improvements around dependency handling # 0.10.0 2014-03-08 * removed the 'timing' attribute on actions **(BACKWARDS INCOMPATIBLE)** * actions are now first-class items * items can now trigger each other (most useful with actions) * added System V service item * added `bw repo test` * added negated bundle and group selectors to CLI * can now manage files while ignoring their content * more control over how actions are run in interactive mode * bundles can now be assigned to nodes directly * fixed creating symlinks in nonexistent unmanaged directories # 0.9.0 2014-02-24 * added 'unless' for actions * improved exception handling * fixed actions not triggering in noninteractive mode * fixed noninteractive installation of Debian packages * slightly more verbose output # 0.8.0 2014-02-21 * move from Alpha into Beta stage * added builtin item attribute 'unless' * added lightweight git/hg/bzr integration * added -f switch to `bw apply` * template context can now be customized * added Node.has_bundle, .in_group etc. * fixed a LineBuffer bug * prevented output of some extraneous whitespace # 0.7.0 2014-02-16 * added safety checks to prevent diffs of unwieldy files * added a "text" content type for files * added support for arbitrary encodings in managed files * addes systemd and Upstart service items * added hooks * added action triggers (for service restarts after config changes) * lots of new documentation * better error messages when defining duplicate items * better dependencies between files, directories and symlinks * fixed a bug that prevented managing /etc/sudoers # 0.6.0 2014-01-01 * added actions * reworked group patterns **(BACKWARDS INCOMPATIBLE)** * reworked output verbosity **(BACKWARDS INCOMPATIBLE)** * added support for libs directory * fixed high CPU load while waiting for interactive response * various other minor fixes and improvements # 0.5.0 2013-11-09 * manage users and groups * manage symlinks * node locking * PARALLEL_APPLY setting for items * manage Arch Linux packages * plot item dependencies * encoding fixes for file handling # 0.4.0 2013-08-25 * manage directories * manage Debian packages * UI improvements # 0.3.0 2013-08-04 * basic file management * concurrency improvements * logging/output improvements * use Fabric for remote operations * lots of other small improvements # 0.2.0 2013-07-12 * bundle management * item APIs * new concurrency helpers # 0.1.0 2013-06-16 * initial release * node and group management * running commands on nodes bundlewrap-3.8.0/CONTRIBUTING.md000066400000000000000000000001251360562404000161140ustar00rootroot00000000000000Please see [the docs on contributing](http://docs.bundlewrap.org/misc/contributing). bundlewrap-3.8.0/LICENSE000066400000000000000000001045131360562404000146760ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . bundlewrap-3.8.0/MANIFEST.in000066400000000000000000000000571360562404000154250ustar00rootroot00000000000000include AUTHORS CHANGELOG.md LICENSE README.md bundlewrap-3.8.0/README.md000066400000000000000000000021761360562404000151520ustar00rootroot00000000000000BundleWrap is a decentralized configuration management system that is designed to be powerful, easy to extend and extremely versatile. For more information, have a look at [bundlewrap.org](http://bundlewrap.org) and [docs.bundlewrap.org](http://docs.bundlewrap.org). ------------------------------------------------------------------------ Latest Version   Build status   Python compatibility ------------------------------------------------------------------------ Donations appreciated: Bitcoin `13AJYksqncZromPF8HvDUXsmHChAm3Y7W7` Ethereum `0x5Eb3037e197d3C0d2E014bcfC2e027EB0AD42812` ------------------------------------------------------------------------ BundleWrap is © 2013 - 2019 [Torsten Rehn](mailto:torsten@rehn.email) bundlewrap-3.8.0/assets/000077500000000000000000000000001360562404000151675ustar00rootroot00000000000000bundlewrap-3.8.0/assets/icon.psd000066400000000000000000061511001360562404000166330ustar00rootroot000000000000008BPSo8BIMZ%G8BIM%}Ǿ pvN8BIM$< Adobe Photoshop CC (Macintosh) 2014-08-01T20:06:57+02:00 2017-12-11T10:25:56+01:00 2017-12-11T10:25:56+01:00 application/vnd.adobe.photoshop 3 sRGB IEC61966-2.1 bundlewrap.org bundlewrap.org xmp.did:01801174072068118C14E14EABABC3D8 xmp.iid:fc65bfbe-54dc-4678-b0ca-c63c73e1510e xmp.did:14291d37-72d9-4bdc-aba4-f5f491df7a76 xmp.did:14291d37-72d9-4bdc-aba4-f5f491df7a76 created xmp.iid:14291d37-72d9-4bdc-aba4-f5f491df7a76 2014-08-01T20:06:57+02:00 Adobe Photoshop CC 2014 (Macintosh) saved xmp.iid:392fa2c7-a547-4319-842b-de5aaa656f54 2014-08-01T20:24:10+02:00 Adobe Photoshop CC 2014 (Macintosh) / saved xmp.iid:fc65bfbe-54dc-4678-b0ca-c63c73e1510e 2017-12-11T10:25:56+01:00 Adobe Photoshop CC (Macintosh) / 8BIM: printOutputPstSboolInteenumInteClrmprintSixteenBitbool printerNameTEXT Phaser7400DNprintProofSetupObjc Proof Setup proofSetupBltnenum builtinProof proofCMYK8BIM;-printOutputOptionsCptnboolClbrboolRgsMboolCrnCboolCntCboolLblsboolNgtvboolEmlDboolIntrboolBckgObjcRGBCRd doub@oGrn doub@oBl doub@oBrdTUntF#RltBld UntF#RltRsltUntF#Pxl@r vectorDataboolPgPsenumPgPsPgPCLeftUntF#RltTop UntF#RltScl UntF#Prc@YcropWhenPrintingboolcropRectBottomlong cropRectLeftlong cropRectRightlong cropRectToplong8BIM,,8BIM&?8BIM 8BIM x8BIM8BIM 8BIM' 8BIMH/fflff/ff2Z5-8BIMp8BIM8BIM 8BIM08BIM-8BIM$@@&fٚ&fٚ8BIM8BIMnullbaseNameTEXTUserboundsObjcRct1Top longLeftlongBtomlongRghtlongslicesVlLsObjcslicesliceIDlonggroupIDlongoriginenum ESliceOrigin autoGeneratedTypeenum ESliceTypeImg boundsObjcRct1Top longLeftlongBtomlongRghtlongurlTEXTnullTEXTMsgeTEXTaltTagTEXTcellTextIsHTMLboolcellTextTEXT horzAlignenumESliceHorzAligndefault vertAlignenumESliceVertAligndefault bgColorTypeenumESliceBGColorTypeNone topOutsetlong leftOutsetlong bottomOutsetlong rightOutsetlong8BIM( ?8BIM H HLinomntrRGB XYZ  1acspMSFTIEC sRGB-HP cprtP3desclwtptbkptrXYZgXYZ,bXYZ@dmndTpdmddvuedLview$lumimeas $tech0 rTRC< gTRC< bTRC< textCopyright (c) 1998 Hewlett-Packard CompanydescsRGB IEC61966-2.1sRGB IEC61966-2.1XYZ QXYZ XYZ o8XYZ bXYZ $descIEC http://www.iec.chIEC http://www.iec.chdesc.IEC 61966-2.1 Default RGB colour space - sRGB.IEC 61966-2.1 Default RGB colour space - sRGBdesc,Reference Viewing Condition in IEC61966-2.1,Reference Viewing Condition in IEC61966-2.1view_. \XYZ L VPWmeassig CRT curv #(-27;@EJOTY^chmrw| %+28>ELRY`gnu| &/8AKT]gqz !-8COZfr~ -;HUcq~ +:IXgw'7HYj{+=Oat 2FZn  % : O d y  ' = T j " 9 Q i  * C \ u & @ Z t .Id %A^z &Ca~1Om&Ed#Cc'Ij4Vx&IlAe@e Ek*Qw;c*R{Gp@j>i  A l !!H!u!!!"'"U"""# #8#f###$$M$|$$% %8%h%%%&'&W&&&''I'z''( (?(q(())8)k))**5*h**++6+i++,,9,n,,- -A-v--..L.../$/Z///050l0011J1112*2c223 3F3334+4e4455M555676r667$7`7788P8899B999:6:t::;-;k;;<' >`>>?!?a??@#@d@@A)AjAAB0BrBBC:C}CDDGDDEEUEEF"FgFFG5G{GHHKHHIIcIIJ7J}JK KSKKL*LrLMMJMMN%NnNOOIOOP'PqPQQPQQR1R|RSS_SSTBTTU(UuUVV\VVWDWWX/X}XYYiYZZVZZ[E[[\5\\]']x]^^l^__a_``W``aOaabIbbcCccd@dde=eef=ffg=ggh?hhiCiijHjjkOkklWlmm`mnnknooxop+ppq:qqrKrss]sttptu(uuv>vvwVwxxnxy*yyzFz{{c{|!||}A}~~b~#G k͂0WGrׇ;iΉ3dʋ0cʍ1fΏ6n֑?zM _ɖ4 uL$h՛BdҞ@iءG&vVǥ8nRĩ7u\ЭD-u`ֲK³8%yhYѹJº;.! zpg_XQKFAǿ=ȼ:ɹ8ʷ6˶5̵5͵6ζ7ϸ9к<Ѿ?DINU\dlvۀ܊ݖޢ)߯6DScs 2F[p(@Xr4Pm8Ww)Km8BIM8BIM , Adobe_CMAdobed            " ?   3!1AQa"q2B#$Rb34rC%Scs5&DTdE£t6UeuF'Vfv7GWgw5!1AQaq"2B#R3$brCScs4%&5DTdEU6teuFVfv'7GWgw ?I%)$IJI$RI$I%)$IJI$RI$I%)$IOI%)$IJI_] ff[\~NUьemuK]좯+I$$I)I$JRI$I$$I)I$JI%)Z8?N}"R7m^ڷoٿUV?Uto?IOuz>zm~58j~~|T_>/SbJRI$I$$I)I$JRI$I$$I)I%)jSWF~⫣_ZJ}w%:O_6/>ӽOKYGu{Kww/zB]Yz~ Ycо龧e礒I)I+?,p0)vFVCSy'ֱc/r-E颾d]um#+/iS';GR^2:\WtYjCKKomDTS.g֜Rt9#~Ϗ?J~s Vg?+3`Sh> u$T־zt~T*ON}n]$$ )I/`ƌZUǎ˲no0.h{*k?&_NſYR~bzunolcjdfoӿWڒIJmqq hcdWXmu nI%)jSWF~⫣_ZJ~1}6kXִrֵ#g><\)_Sq/-e7TO?G'==y?a^-fc<OVO?%CEoS'\;nN6U_Yeyu_Y?N7SoW_/oS/ȯI[oOt.3>0KϣG!5*Swu.ԬJNk_CSEٲYZ3Ngf:Uˏ?gu} VOo#>qz NNED6Ξ5k[͟{,9Ft }.KlIM >V M쪦pSc5~ӪYp݊}eo֏zZ; ^ F鍎'2noMtM}G : +t#ww+[rSt/ctW\Nyb1\cQ1]#v=/ZdQ)߿7OO_^au<4}dGѥߧ{ mIJ^lcGhioc?Wgusu7}occ9gZovm$Zߢ7 $$I)I$JI%)h[ȧKx31p2=wZrI)+MlRSڏWΩ$7-E/7-E|J~9PCA$ 鯵n/7VnqqIԟd9g uI)JN936 ]FU] ϵ{}VeUIOs`ͱ?_dz!/~דusu7}oc쭈 $$I)I$JRI$I%)YNͯfA CK7V7Yź,kLZs$kǀ!洨lvvn'WQk_RnE>K\u^YBXPk[v5tߵ({Y[*[k=߿G_Oi) _S.]yy cnF>5bֹv˧oL?JԌlw;x& [nk3/蕜l:^[ꘙ-xuiʱcGC~%+mǾ:ּ 6?7$WcKZHyWioDY**;sG#cغ\11pMqvU6]ڝld>цmnCNUkkYꬣտ{k߿"ڶUSV>E暟`nycK~ߢ'aRs-uc;iׯJ?Bچe޷ھɷ?JpǾuonn?ߚpŬgh]{sWIqlgS5XNjMᕰ-vGw?X5*}?:ۙԩo2n5}jUߤ{+$ }osǴÚZYV'mch;VBJRI$I%)$IJI$RI$I%)$IIEǺƳ{ckCI$lLܜ+M-kier I).VVF]ɱ: c!$JRI$I%)$IJI$RI$I%)$IJI$RI$I%)$IO8BIM!SAdobe PhotoshopAdobe Photoshop CC8BIM"2MM*bj(1r2i-'-'Adobe Photoshop CC (Macintosh)2017:12:11 10:25:56"*(2HH8BIMmoptLTargetSettingsMttCObjc NativeQuadBl longGrn longRd longTrnsboolembedICCProfilebool fileFormatenum FileFormatPNG24 interlacedbool noMatteColorbooltransparencyDitherAlgorithmenumDitherAlgorithmNonetransparencyDitherAmountlong8BIMmsetnullHTMLBackgroundSettingsObjcnullBackgroundColorBluelongBackgroundColorGreenlongBackgroundColorRedlongBackgroundColorStatelongBackgroundImagePathTEXTUseImageAsBackgroundbool HTMLSettingsObjcnullAlwaysAddAltAttributebool AttributeCaselong CloseAllTagsboolEncodinglongFileSavingSettingsObjcnull CopyBackgroundboolDuplicateFileNameBehaviorlongHtmlFileNameComponentsVlLslonglonglonglonglonglongImageSubfolderNameTEXTimagesNameCompatibilityObjcnull NameCompatMacboolNameCompatUNIXboolNameCompatWindowsboolOutputMultipleFilesboolSavingFileNameComponentsVlLs longlonglonglonglonglonglonglonglongSliceFileNameComponentsVlLslonglonglonglonglonglongUseImageSubfolderboolUseLongExtensionsboolGoLiveCompatibleboolImageMapLocationlong ImageMapTypelongIncludeCommentsboolIncludeZeroMarginsboolIndentlong LineEndingslong OutputXHTMLboolQuoteAllAttributesboolSpacersEmptyCellslongSpacersHorizontallongSpacersVerticallong StylesFormatlong TDWidthHeightlongTagCaselongUseCSSboolUseLongHTMLExtensionboolMetadataOutputSettingsObjcnull AddCustomIRboolAddEXIFboolAddXMPboolAddXMPSourceFileURIbool ColorPolicylongMetadataPolicylongWriteMinimalXMPboolWriteXMPToSidecarFilesboolVersionlong8BIMms4w"L8BIMnorm D( Background8BIMluni Background8BIMlnsrbgnd8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf 8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ v8BIMfxrpL J J J8BIMnorm@(Rounded Rectangle8BIMSoCopnullClr ObjcRGBCRd doubGrn doubBl doub8BIMvmsk(b22>b22>>22b>22b8BIMvogknullkeyDescriptorListVlLsObjcnull keyOriginTypelongkeyOriginRRectRadiiObjcradiiunitValueQuadVersionlongtopRightUntF#Pxl@ytopLeftUntF#Pxl@y bottomLeftUntF#Pxl@y bottomRightUntF#Pxl@ykeyOriginShapeBBoxObjcunitRectunitValueQuadVersionlongTop UntF#PxlLeftUntF#PxlBtomUntF#Pxl@RghtUntF#Pxl@keyOriginIndexlongkeyOriginResolutiondoub@r8BIMluni(Rounded Rectangle8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ v8BIMsn2P8BIMfxrp8BIMlyvr|[0008BIMnorm$h(bundlewrap.org8BIMTySh# ??@w@QH2TxLrTxt TEXTbundlewrap.org textGriddingenum textGriddingNoneOrntenumOrntHrznAntAenumAnntantiAliasSharpboundsObjcboundsLeftUntF#PntTop UntF#Pnt6|RghtUntF#Pnt@pBtomUntF#Pnt@u boundingBoxObjc boundingBoxLeftUntF#Pnt@-XTop UntF#Pnt?"RghtUntF#Pnt@5BtomUntF#Pnt@eH TextIndexlong EngineDatatdta  << /EngineDict << /Editor << /Text (bundlewrap.org ) >> /ParagraphRun << /DefaultRunData << /ParagraphSheet << /DefaultStyleSheet 0 /Properties << >> >> /Adjustments << /Axis [ 1.0 0.0 1.0 ] /XY [ 0.0 0.0 ] >> >> /RunArray [ << /ParagraphSheet << /DefaultStyleSheet 0 /Properties << /Justification 0 /FirstLineIndent 0.0 /StartIndent 0.0 /EndIndent 0.0 /SpaceBefore 0.0 /SpaceAfter 0.0 /AutoHyphenate true /HyphenatedWordSize 6 /PreHyphen 2 /PostHyphen 2 /ConsecutiveHyphens 8 /Zone 36.0 /WordSpacing [ .8 1.0 1.33 ] /LetterSpacing [ 0.0 0.0 0.0 ] /GlyphSpacing [ 1.0 1.0 1.0 ] /AutoLeading 1.2 /LeadingType 0 /Hanging false /Burasagari false /KinsokuOrder 0 /EveryLineComposer false >> >> /Adjustments << /Axis [ 1.0 0.0 1.0 ] /XY [ 0.0 0.0 ] >> >> ] /RunLengthArray [ 15 ] /IsJoinable 1 >> /StyleRun << /DefaultRunData << /StyleSheet << /StyleSheetData << >> >> >> /RunArray [ << /StyleSheet << /StyleSheetData << /Font 0 /FontSize 183.33333 /AutoLeading false /Leading 770.83319 /AutoKerning true /Kerning 0 /FillColor << /Type 1 /Values [ 1.0 .61176 .61176 .61176 ] >> >> >> >> ] /RunLengthArray [ 15 ] /IsJoinable 2 >> /GridInfo << /GridIsOn false /ShowGrid false /GridSize 18.0 /GridLeading 22.0 /GridColor << /Type 1 /Values [ 0.0 0.0 0.0 1.0 ] >> /GridLeadingFillColor << /Type 1 /Values [ 0.0 0.0 0.0 1.0 ] >> /AlignLineHeightToGridFlags false >> /AntiAlias 4 /UseFractionalGlyphWidths true /Rendered << /Version 1 /Shapes << /WritingDirection 0 /Children [ << /ShapeType 1 /Procession 0 /Lines << /WritingDirection 0 /Children [ ] >> /Cookie << /Photoshop << /ShapeType 1 /BoxBounds [ 0.0 0.0 1576.60938 350.65625 ] /Base << /ShapeType 1 /TransformPoint0 [ 1.0 0.0 ] /TransformPoint1 [ 0.0 1.0 ] /TransformPoint2 [ 0.0 0.0 ] >> >> >> >> ] >> >> >> /ResourceDict << /KinsokuSet [ << /Name (PhotoshopKinsokuHard) /NoStart (00 00    0=]0 0 0 00000000A0C0E0G0I0c000000000000000000?!\)]},.:;!!  0) /NoEnd (  0;[00 0 00\([{ 0) /Keep (  %) /Hanging (00.,) >> << /Name (PhotoshopKinsokuSoft) /NoStart (00 0   0=]0 0 0 0000000) /NoEnd (  0;[00 0 00) /Keep (  %) /Hanging (00.,) >> ] /MojiKumiSet [ << /InternalName (Photoshop6MojiKumiSet1) >> << /InternalName (Photoshop6MojiKumiSet2) >> << /InternalName (Photoshop6MojiKumiSet3) >> << /InternalName (Photoshop6MojiKumiSet4) >> ] /TheNormalStyleSheet 0 /TheNormalParagraphSheet 0 /ParagraphSheetSet [ << /Name (Normal RGB) /DefaultStyleSheet 0 /Properties << /Justification 0 /FirstLineIndent 0.0 /StartIndent 0.0 /EndIndent 0.0 /SpaceBefore 0.0 /SpaceAfter 0.0 /AutoHyphenate true /HyphenatedWordSize 6 /PreHyphen 2 /PostHyphen 2 /ConsecutiveHyphens 8 /Zone 36.0 /WordSpacing [ .8 1.0 1.33 ] /LetterSpacing [ 0.0 0.0 0.0 ] /GlyphSpacing [ 1.0 1.0 1.0 ] /AutoLeading 1.2 /LeadingType 0 /Hanging false /Burasagari false /KinsokuOrder 0 /EveryLineComposer false >> >> ] /StyleSheetSet [ << /Name (Normal RGB) /StyleSheetData << /Font 2 /FontSize 12.0 /FauxBold false /FauxItalic false /AutoLeading true /Leading 0.0 /HorizontalScale 1.0 /VerticalScale 1.0 /Tracking 0 /AutoKerning true /Kerning 0 /BaselineShift 0.0 /FontCaps 0 /FontBaseline 0 /Underline false /Strikethrough false /Ligatures true /DLigatures false /BaselineDirection 2 /Tsume 0.0 /StyleRunAlignment 2 /Language 0 /NoBreak false /FillColor << /Type 1 /Values [ 1.0 0.0 0.0 0.0 ] >> /StrokeColor << /Type 1 /Values [ 1.0 0.0 0.0 0.0 ] >> /FillFlag true /StrokeFlag false /FillFirst true /YUnderline 1 /OutlineWidth 1.0 /CharacterDirection 0 /HindiNumbers false /Kashida 1 /DiacriticPos 2 >> >> ] /FontSet [ << /Name (MavenPro-Regular) /Script 0 /FontType 1 /Synthetic 0 >> << /Name (AdobeInvisFont) /Script 0 /FontType 0 /Synthetic 0 >> << /Name (MyriadPro-Regular) /Script 0 /FontType 0 /Synthetic 0 >> ] /SuperscriptSize .583 /SuperscriptPosition .333 /SubscriptSize .583 /SubscriptPosition .333 /SmallCapSize .7 >> /DocumentResources << /KinsokuSet [ << /Name (PhotoshopKinsokuHard) /NoStart (00 00    0=]0 0 0 00000000A0C0E0G0I0c000000000000000000?!\)]},.:;!!  0) /NoEnd (  0;[00 0 00\([{ 0) /Keep (  %) /Hanging (00.,) >> << /Name (PhotoshopKinsokuSoft) /NoStart (00 0   0=]0 0 0 0000000) /NoEnd (  0;[00 0 00) /Keep (  %) /Hanging (00.,) >> ] /MojiKumiSet [ << /InternalName (Photoshop6MojiKumiSet1) >> << /InternalName (Photoshop6MojiKumiSet2) >> << /InternalName (Photoshop6MojiKumiSet3) >> << /InternalName (Photoshop6MojiKumiSet4) >> ] /TheNormalStyleSheet 0 /TheNormalParagraphSheet 0 /ParagraphSheetSet [ << /Name (Normal RGB) /DefaultStyleSheet 0 /Properties << /Justification 0 /FirstLineIndent 0.0 /StartIndent 0.0 /EndIndent 0.0 /SpaceBefore 0.0 /SpaceAfter 0.0 /AutoHyphenate true /HyphenatedWordSize 6 /PreHyphen 2 /PostHyphen 2 /ConsecutiveHyphens 8 /Zone 36.0 /WordSpacing [ .8 1.0 1.33 ] /LetterSpacing [ 0.0 0.0 0.0 ] /GlyphSpacing [ 1.0 1.0 1.0 ] /AutoLeading 1.2 /LeadingType 0 /Hanging false /Burasagari false /KinsokuOrder 0 /EveryLineComposer false >> >> ] /StyleSheetSet [ << /Name (Normal RGB) /StyleSheetData << /Font 2 /FontSize 12.0 /FauxBold false /FauxItalic false /AutoLeading true /Leading 0.0 /HorizontalScale 1.0 /VerticalScale 1.0 /Tracking 0 /AutoKerning true /Kerning 0 /BaselineShift 0.0 /FontCaps 0 /FontBaseline 0 /Underline false /Strikethrough false /Ligatures true /DLigatures false /BaselineDirection 2 /Tsume 0.0 /StyleRunAlignment 2 /Language 0 /NoBreak false /FillColor << /Type 1 /Values [ 1.0 0.0 0.0 0.0 ] >> /StrokeColor << /Type 1 /Values [ 1.0 0.0 0.0 0.0 ] >> /FillFlag true /StrokeFlag false /FillFirst true /YUnderline 1 /OutlineWidth 1.0 /CharacterDirection 0 /HindiNumbers false /Kashida 1 /DiacriticPos 2 >> >> ] /FontSet [ << /Name (MavenPro-Regular) /Script 0 /FontType 1 /Synthetic 0 >> << /Name (AdobeInvisFont) /Script 0 /FontType 0 /Synthetic 0 >> << /Name (MyriadPro-Regular) /Script 0 /FontType 0 /Synthetic 0 >> ] /SuperscriptSize .583 /SuperscriptPosition .333 /SubscriptSize .583 /SubscriptPosition .333 /SmallCapSize .7 >> >>warp warpStyleenum warpStylewarpNone warpValuedoubwarpPerspectivedoubwarpPerspectiveOtherdoub warpRotateenumOrntHrzn8BIMluni bundlewrap.org8BIMlnsrrend8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ +8BIMfxrp@@4q q***8BIMnorm @( Logo centered8BIMluni Logo centered8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ b8BIMfxrp@w  """8BIMnorm8( Logo offset8BIMluni Logo offset8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ v8BIMfxrp@x   "X@:886442422220000.00...0.+..0..................,,...,..,..,.,,.,.,.,,,,,,.,,,,,,,,,,,,,,,,,,,,,,,,,,,,,**(**+,,,,,,,,,,,,,,,,,,,,**,,,,,,,**,,,,**,,,+*,,,**,,**,,**,,*,,**,,*,,*,,*,,*,**,*,,*,*,,*,*,,*,*,*,**,*,*,*,*,*,*,*,*,*,*,*,****,*,*,****,*,**,*,**,****,**,*******,**,*********,**,***+**,***,***,***,***,****,**************,*****,******+*******)))))))))((((((((((((&&&&&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&&&&&&&&&&&&&&&&&&&(((((((((((()))))))))**************,*****,**************,****,***,***,***,***,******,**,*********,**,*******,**,****,**,*,**,*,****,*,*,****,*,*,*,*,*,*,*,*,*,*,*,**,*,*,*,,*,*,,*,*,,*,**,*,,*,,*,,*,,**,,*,,**,,**,,**,,,*+,,,**,,,,**,,,,,,,**,,,,,,,,,,,,,,,,,,,,+**(**,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.,,,,.,.,.,.,,.,..,..,...,,..................0..+.0..000.000022224244688:@X" *:J[kw̻ wk[J:* 'vv> (_ϖ_):yy; @@ BÅB-rs-]^:ҥ҆;VV%rr%/֛փ15ۘۈ5==44&ُ&qʌq [] >>z܄{OPGGuv67VVww((@@QQbb ss   qq ddUV@@')ssYY11fg89hi..  J޾Kbbij"#pp#ij#ffQR>?éRS +ӥ,ff  ,ס,]]ɝBCpq˘==jk$ג%??``  Ō ؋ 22GG__|| ˁ#$''**..2255;;::5612..*+''$$  xx[[BB57$$  _`AB$$hl<B"  psBDTU''  ef++  QR??QXfi$$rr%%qr##ddNO  33ʾkk88˻ff00¸VV޷tt''A@ͳUTײccrr  rsab֨PQǧ@@**ttVVǣ88vwܡHHQQtv֝88  \\rrʚ$'~ؙ33<=56ۖ00֕((ssUU<<Ғ`^5.yt DDŏ  GGÎ=>z{''``Ό  >>vvJJ}}AApp͉''QR~~ш##LLus  -.OOrsֆ34SSpp…ޅ&&==SSjjĄل  ++;;LL\\llzyÃ΃҃҃΃Ãzyll\\LL;;++  لĄjjSS==&&ޅ…ppST34ֆrsOO-.  uuLL##ш~~RS''͉pqAA}}KKvv>>  ό``''{{>>ÎGG  ŏDD yt5.a^Ғ<<UWss((֕01ܖ67==36ؙ''̚rr\\  88֝vvQQHHݡww88ǣVVtt**@AȧQRרbcst  rrccزWVͳBB((uu߷WW¸00iiϻ98ll̾34OOee##rr&'rr$%fkRZ?@ST  ++ff  ''UUCEps  #=Bim$$BB`a  $$77EJ[[xx  $$''++..2256::;;5622//**'($$́  }}`aGH23!؋!Ō  ``??%ؒ%jk==˘prCCǝUU(ӡ(  ff,ӥ,  STĩ?@RSff#Ƴ#qq#$jk  cdK߾L  ./jj9:hi11YYss)*@@VVdd rr   ss bbQRAA()wwWW78wwHHRR}܄}>? `` uҌv-ߏ߈-99==8ݘ݊81֛փ1&ss'WW;ҥ҈<__.tt/DĆD AA ;yz;)`И`) ?ww@ HxܩxH=mΠm=#NẉwN#"Kt޺tK"'JmֶmK'.LiiL. '`4݉( $d\݄(Sك'SV $dph`CL`P(y6)\`\N+: UV3>kV&VSN`=U`P#x1,Z `]Ls{Mc5$-Bc#*5 th`6`P c `4# ME <K M tf` `P+.Z`hHN>2 NUNU0`-`P>Q `! skf5c?Y]  ?q`Q`PH[ `@yi y]`m`P\O/`R<`j--` `x`P`/7`[s[Hc`L~161``l`P` ,`Lk`B,%`0W`߱mR70# '0=TtO`P` ݺhRB3* '0AXpӠ~^C2, .2He`3qXD0( #09Ri/` ղx`M?0% '6Jd޿ğkVA00 +3FZuyѩw^G=0/ %0;Gf8աaE4. +0@Vu` ղx`M?0% '6Jd޿-߳x[C:0' 04@Kf`C 6}+`P`賀T(,m3I/l`k*]asL) c`حS'ݨq<,p]ܗW+ G)F)e`حS'!ІH8|`Q . `P`ӔXM8`x d!tS`Ĕd5 ^}k @Bi`Ĕd5  }""`W`P`[ ?E?8`kqY#`S#? {e50(F-X9`S#Xt`$?@`P`$ t s`[s<s]`a %HDPo L`\ry``P`5e`c U;`H-Z$!@|`$`#n`P`{$ `8{Nx`X`Y  #` @` `P``Z m,/g]` ` D`)ko`u`P`&+`5r|7N#`.` -```P`b`_`: p`I`AH [`"`]Z`P``@MmGZ]`v` '`S``P``p`u*lb``)`wl` `. `P`|`<cY `[``Z!`x`P`)( `(Mcm_M]``Y(``+`P`_Gzz`tu]v``Cd}` $``P`(b` D'A``%u`U`T\`P`}` QDm vA]```G` `P`LM`?M*``l$0_Z```P``zd&;*``dkc``\ `P``4mpv4]```Q@`=`P`m`o #=A`` ``h`P`0` FI P``F G7```P`@`/< omR _(X`Ep tR1 `2#^``w`~`P`^`Q-Zx ` dإu6`[I1` @`U`P`y`j}d`ng`d `, `-`P`}`m3FX`'^`z`L``P`e`ql ``bn`k``P`K`n$w`v`@W`` `P`6`h)/X`` B`u`+`P`!`&`|^ `=` z* `_`0`P` `0^;`N`m%`J`<`P``0h> X`E`#`0`6`@`P``0 AeS `'`0`5`#`@`P``4NT`Ӽ`=R@` `?`P``@h Q X`峏mU@0% .0ATg~`@P@``0`P` `" ME `%Y 9f`@Z <``,`P`"`?nk`TU`@`0```P`4`heX`'@<`:e+```P`G` 58 `r[`0q" ` ` `P`Z`/O`:`'6` `#`P`p`h6yS`?-` J`5`C`P``+, `C`b`J`e`P`m`0j}``|`d``P`N`hho|OS` `>m`}`|`P`0`IK ?  `B~`lV`y`Z`P`.`#vQp`u`3@r`^`0P M`b`h_]fS``q#J`@`SK#/@``R ``S_`0`@9H8``,  :c`l`Q%`m`5^k.`X!`jtcP>S`P`)``)l`"g`/0f  `B`Jx-t``-``'"W`A`Os4`LM```c@ pS`P`/%``@,>``u`` jH{ `c`*E``Wt``v,J``8 M`zz`,UL`X` c0 XS``.6s+`( `*=`h`_L` ` ``z`<L`^` 3@=`Y`gn-K0`p`1` ` c @M``& <``ztD<`6`q-x`{N` YL`_`?Nk`J`x+T1`M``'+`-x2{`@-`!Slc (M``zeiG``c4 $`+`<au`` `u`$S[U2`%D\`LYCi$`{`a,G`K %`5@C Pk`}r`RZkcM`hGt`i v`[5u` s\%61y`NSf`? S`X]9`&P `2`F>3Q3?-=`` ;"` T%n[|`6zF`jd.`UK6`M?|=Mf4|4>.`A\>x`(SF|c.M`~9-d`ωM"`S`J >E`IF|L㻕{eK;0. #07AYxxmĖtY>0( )0AZve`SⲒqP@0* )05AVkO`BީfG70 "00=Ldx`ˡ}\H4( %002@Pct(`ൌkT90# $0:Llo`ףbE4. ,2C^|i`ҟ~^C2, +0EZuu`U`Ns`T`x`|`,G\`D`U]%AM``w``75`%-`D`N```s`0` '`*`l`;` P`! `b\`U`]=VM`Wg``y(` |+(`D`Zk`<`c`8`Z6=tn`<]``H ``y3`XI`P=`(Z ]UjH`]`j$`V`-sA:`BZ `#m`MA`-`{M`)p XhMJ`sѓJ` XҗP `v~4`/|C`Cր#` ZӓN %^͘U8~}6`b浂M` Lը}G]m~H`^从b.`Lv2`7yl%` KL 5Zväe8 OxȭwM  Jr DZy]42] ijeG( 9` ̼|gH(`.[ƫmGLq̳qJ,X|ԸrJ!`````````5t`oR`2` `s``fM```_D`:`Bd`0т3x`0FQO`0n+m`0a+ E`0˧gT>0) ,08Lbv[`0`0`0`0`0`0x`0+`$N`%dD`Xh`BuD `8b ƭ`6 HZ\\RTTTTTTTTztttrrrppprrrrrrrrrrrrrrrrnnnnnnnnrrrrrrrrrrrrrrppprrrrrrrrrvvrrrrrTTTTTTRTT\\\\<""""윳蜥霿ߜݜܜٜ؜ۜۜ֜לޜߜԜӜלߜ՜Üݜ˜ܜۜϜќӜۜќœڜɜۜ؜˜МϜۜ͜؜Ȝٜ՜ȜΜۜ͜˜֜Ɯ؜ҜŜ͜ɜۜȜӜŜלМĜ˜ǜۜƜӜĜ֜МÜʜŜۜĜӜÜ՜МœɜÜۜÜӜœԜМȜۜœӜӜМǜۜ蜼ꜽ윾ʜʜʜڜʜ՜ʜќʜʜʜʜʜʜʜʜʜҜœԜǜӜÜ՜ȜœӜĜ֜ɜĜœԜŜלʜƜܜ՜Ɯ؜˜ȜĜŜלȜٜœ̜ʜŜǜ؜ɜۜĜ͜͜ǜ˜ٜ̜ܜǜϜМȜۜ͜МޜʜќӜʜӜݜ՜Μ؜לללߜڜԜݜܜܜޜۜ眱ĜŜŜƜǜȜɜ˜͜ќלޜ HZ\\RTTTTTTTTztttrrrppprrrrrrrrrrrrrrrrnnnnnnnnrrrrrrrrrrrrrrppprrrrrrrrrvvrrrrrTTTTTTRTT\\\\<""""윳蜥霿ߜݜܜٜ؜ۜۜ֜לޜߜԜӜלߜ՜Üݜ˜ܜۜϜќӜۜќœڜɜۜ؜˜МϜۜ͜؜Ȝٜ՜ȜΜۜ͜˜֜Ɯ؜ҜŜ͜ɜۜȜӜŜלМĜ˜ǜۜƜӜĜ֜МÜʜŜۜĜӜÜ՜МœɜÜۜÜӜœԜМȜۜœӜӜМǜۜ蜼ꜽ윾ʜʜʜڜʜ՜ʜќʜʜʜʜʜʜʜʜʜҜœԜǜӜÜ՜ȜœӜĜ֜ɜĜœԜŜלʜƜܜ՜Ɯ؜˜ȜĜŜלȜٜœ̜ʜŜǜ؜ɜۜĜ͜͜ǜ˜ٜ̜ܜǜϜМȜۜ͜МޜʜќӜʜӜݜ՜Μ؜לללߜڜԜݜܜܜޜۜ眱ĜŜŜƜǜȜɜ˜͜ќלޜ HZ\\RTTTTTTTTztttrrrppprrrrrrrrrrrrrrrrnnnnnnnnrrrrrrrrrrrrrrppprrrrrrrrrvvrrrrrTTTTTTRTT\\\\<""""윳蜥霿ߜݜܜٜ؜ۜۜ֜לޜߜԜӜלߜ՜Üݜ˜ܜۜϜќӜۜќœڜɜۜ؜˜МϜۜ͜؜Ȝٜ՜ȜΜۜ͜˜֜Ɯ؜ҜŜ͜ɜۜȜӜŜלМĜ˜ǜۜƜӜĜ֜МÜʜŜۜĜӜÜ՜МœɜÜۜÜӜœԜМȜۜœӜӜМǜۜ蜼ꜽ윾ʜʜʜڜʜ՜ʜќʜʜʜʜʜʜʜʜʜҜœԜǜӜÜ՜ȜœӜĜ֜ɜĜœԜŜלʜƜܜ՜Ɯ؜˜ȜĜŜלȜٜœ̜ʜŜǜ؜ɜۜĜ͜͜ǜ˜ٜ̜ܜǜϜМȜۜ͜МޜʜќӜʜӜݜ՜Μ؜לללߜڜԜݜܜܜޜۜ眱ĜŜŜƜǜȜɜ˜͜ќלޜ(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&(()*+-+,+,,+,+,++-+,++,+,+,++-+,++-+,+,++,+,++-+,++,+,+,++-+,++-+,+,++-+,++-+,+,,+,+,++-+,++,+,+,*))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJIGGIEHIHKIJJJKGHGHGHKKJJKKIIJJJHKJIJGIIJKJJGJGHHFIHJJJKIJIKJHJIIJIKIJKLJIKHKKHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJJJHKIJKIKIJKLJFHGHGHONQQSTRTQTTQSSQRQSRTUVTRUQRRNQOSTTURTRUTOSSSSUURTTVURUSUURTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHGIHGGGJJIIHKIJHEFEFIHIHKHIHGGGGEFHGJIIIHJHJIHHGIEICHIIKEIGIJFJEGGGIEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRTTQTNTTUVOTPUVORPRRUVOTTVVPTPTRONGH???<>=>?<;;<=>>:=>>?=><>??><????<==>?>><>;5413445443456535555443455535565344454434565345554534565355553444532243653444545345653555534445431/. 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7֚ER7&] \Bq$Hۘ{Ta #hE:}:GƖkd\~.ϕ}AF<2_ bIpBۓ M] -`L<w3Iґr_`&Ɛt?μ?>=c eQ s ;َGW 3^ XйBr'IڌxXc !iDʷ<:EifZy2҉}D׵L ;-^ bشHpE܇} R` )dG<z5Iͅoa_*ʄx@ϰA=9c ePq?ڂHZ 2 `T@r-Kx[c$oAʫ=<ChsfWt7sDکQ8)_d ]ӨEo!H}Sb %h _E<z8Ho|c^~,|j@ҤE=5ai cݣKnA}N\ .b^ O=t1Ju~_`%tc@͟>=?fo fS s9wEݝT 6"_a YќAr'H|Wa !"h^C;~;GjwdZ{2}mA՘I ;0^f aڗHpF N^ + b]I;y4Jq~ `_(vg@ϓ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʎ=<CgufYx4rCٌO9*]f ^֋Fp F|Ra (e _E={8Hn|b_+yk@чB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@˂>=@gr fV s8uET7$_b \Bo$H}퀒Ta #j^F;|9Fmxe]{/~jCH :2`h dKpB O_ +b^M=w2Ir} aa&udAA>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6J˿p{a^,˾zh@?=9bk dNo>ڼ}IY 1^a S>s.J׺v\a"nbA<=Bft fUv6ԷuDQ8']e ]Eq#Gٵ|Tb $g _E<}9Gdzmzc^~-β{k@E>4_k cLpA۰ L^ /`` M<v2IԮs}^`%ŭsd>>?>do eR s9ثxFU 5!_c YAr(Hک|낻Wa   j`B;;EixdYz1Ѧ}oBJ :.^h aGpDܤ~ P_ (c]J=y4I΢o| b`)ɡwfBA<:am fQr =ٟzJZ 3^` VAt+Jٝw\c"m`C=<DfsfYv6ԚrCP9*]d ^Ep Gܘ~Ra & g ]E=y8IȖozb^~-͕{h@C=7aj dMoBړ}K] / a^ P?t0KՑu]b&Đrd@˼==@fs fTs8ՎwEܺS 7$_d [ѹBo%Hٌ|Va "k`D;|:FÊlydZz/Ή~kAյG ;1`g b۴InCڇ N^ ,b]J;z4Iυr| `_'Ƅue?ΰ?=<bn eP r<ׂyGW 4^` W?r*JxYa"j`Bʫ;<EhweXx4qCةM 9+^g _רEpE}R` 'd _E;z7In}b]+yj@ФA=8`m dOq?~J[ 0^` Q@u/Jv\b#ob@̟>=Aet fVu7vEܝS8&]Ic ^ӜDq#H{%Tb $h>^E<}9Gk9xc]~.}!i@ҘF<2aHi cܗKpB  }L^ -bE^ M>u2It1} ^a'u.d@̓@=?eDo eT q<xFޑW 5!_Ia ZϐBp(Hz)ꀁYc  k:^Dʎ<}:Ek=vfZy1nD׌L ;.`Jf b؋HpD} "R` )dD]G;z5Jo6| a^+x*g@χ?=:cGm eOq? {HY 2 `I_ T?r-Kx-[a$o7ځ`A˂;<ChBsfVt7 ՃsDO8)]Kd ]Cp!H|&S` %f @_E;{8Hm8|c\,z$وj@C=5`Hk cKpAÊL\ .`F` O=v1Js/]b%r1ڍd@>=?dCr fU t9ϏxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۔mAI <0^Jh `JpC "M^ ,dE]H;y5JϿs4~ `_(ɾw+ۙg@?<;dEn eOq =ټ˛yIX 2`I` W?s*Iںy+[a!k8ڞ`C=;Cg?tgYx 4Է٠pEO8*]Je `FpFܵ|#Ta ' e A]H={7Jʳn7{c_,̲y&ۥiAB=8`Hk eNpA۰Ƨ|J\ 0 `G_ Q?u/K׮w.\b%ĭq2۪b@>=BgBr fVs9׫ҬuET7&_Ib \Do$I۩}'Ta #!j?^E:|:Hħm<xd\{0Ц~!رkAF<2`Hj bInBڤ  M] -bE_ L<u3IТt1 _`&šv.ڶf??>=cDp eQ q ;ן̸zGW 4^Jb XBr)I؝x+Xc !i:ڻ;<Ei?Yy2њ} ٽK :-^KFpEܘ} $_ (d C;z6I͖o7^*ʕx)?=9aHNq>ړY 1^H>t.Jّv,a"n5<=BfAUv6ՎQ8']JEq#Gی|%b #g ?<}8GNJm9^~-Ή{"E=4_HLpAۇ  ^ .`F>v1Iԅs0a%ńs0@=>dCT s9؂V 6!_JCr'J|(c !"j;=};Gk=[y2K ;0`IInF !` + dC=w4Jr4`(x*A=;cFQp = Z 3`J?q+Iy,a!m8;<ChAWv4 N9*]KDp F~%` & g B;y8In9]~-y&A=7`ILpB![ / `H=u0Kt0`&p2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|H ;1_IKpC !_ ,bE=w4Ir2a'u-A?<bER r<X 5^IAr)Ix*c !k9ځ<bDj>vxz4كo i+_JeniE "}f B]x^ Ip6{~+z'ۈhc8bGkqo=NJ|w^G_ uaJv-"o4ۍ`^BeAruw8ԏtm']IcqfI{% h >^}]Hk9x~~ /}!ڔig4aJipmC "–}z bG^ u`Kҿt2} 'ƾu0ܙd`?eFo qs<ټ Λxr!_JcpdHغz(낪 i;`}]Ei=xzz1з}ؠo h.^IhpjDڵ~ !}cC_y^ I̳o4} )ɲw*ڥhd:aFors=ٰ ȧ|w^IatdJٮw-킙"m7ڪ``BfBsvy5ԫ լsl)]KdpjFܩ~&g @_{_Hȧm8|~ ,ͦz$۱jf5`HkppAڤij{`F` vbJբs/%ġr1ܶb`?dCq tv9ןѸvo"]IbqeH۝z' h<^~]Fj<w{{0К}ٽk h1^IgpkDۘ !|dD]y_ Kϖs4| )ɕw+e`=dEn qr>ٓyt!`I`qcJڑy+"l8`^Ej=wv9x4Ԏq 9!ڬi+_JgoJiEڌ~"'"®}f B_y7G^ IɊp8}~2} +ʉ{'i=0ܱd8bIkoFo?ه |0 γx`H_ u/JcJׅv/(#„o4b=;`AeCru=x7Ղ u7ظo&]Kbq$IhI{'#!!h?^}:C_Hk<x~4} 0|!k<*ڽh2_HjpFoB  - ȿ|`E_ w3IaIr1 -&t.f>7`=cDp s Bs;z4 s^Ibr)KdIx)&!i:`<@_Ei=wy8|2}p ;$j-^JgpHkE} ")d B]z5F` Io5| /*x)g=1b9cFmqCq>{2v`H_ r-IbJx,'"o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"=+aHE .`F1Is0+s1=8dD=6_K'J|)!"j=;Bk>8!ڃ;'`JI "+ bE4Hq4/v+ڈ=4bE CɊ3 ^I+Kw+'k8ٍ<?g?<׏9!]J H|#( e A8En71y&۔>.`ID!Ɩ1`H0Iw0)q2ۙ=:gB =қ6_I$J}!"i9Bk5|:)܃_F +ȅb2Hr,u>5ڈb A4Պ^)Jx%k<?j9:"ۏ_H ( Ñf 6Fp0z=1۔bD1ϖ`.Kw)p==g>8!ڛ]#J|$"g 9Em4{<+ڠ_E -ɢ`2Is+s=8٥d?5ק](Jy #i;Ai7};&۬^I~ )!Ʈc5Ho0w<2۱a B3ҳ^+Iw&m<=f;9ڸ] I~&!g 8Eo2{=-ܽaE/Ϳa0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&(()*+-+,+,,+,+,++-+,++,+,+,++-+,++-+,+,++,+,++-+,++,+,+,++-+,++-+,+,++-+,++-+,+,,+,+,++-+,++,+,+,*))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJIGGIEHIHKIJJJKGHGHGHKKJJKKIIJJJHKJIJGIIJKJJGJGHHFIHJJJKIJIKJHJIIJIKIJKLJIKHKKHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJJJHKIJKIKIJKLJFHGHGHONQQSTRTQTTQSSQRQSRTUVTRUQRRNQOSTTURTRUTOSSSSUURTTVURUSUURTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHGIHGGGJJIIHKIJHEFEFIHIHKHIHGGGGEFHGJIIIHJHJIHHGIEICHIIKEIGIJFJEGGGIEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRTTQTNTTUVOTPUVORPRRUVOTTVVPTPTRONGH???<>=>?<;;<=>>:=>>?=><>??><????<==>?>><>;5413445443456535555443455535565344454434565345554534565355553444532243653444545345653555534445431/. 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7֚ER7&] \Bq$Hۘ{Ta #hE:}:GƖkd\~.ϕ}AF<2_ bIpBۓ M] -`L<w3Iґr_`&Ɛt?μ?>=c eQ s ;َGW 3^ XйBr'IڌxXc !iDʷ<:EifZy2҉}D׵L ;-^ bشHpE܇} R` )dG<z5Iͅoa_*ʄx@ϰA=9c ePq?ڂHZ 2 `T@r-Kx[c$oAʫ=<ChsfWt7sDکQ8)_d ]ӨEo!H}Sb %h _E<z8Ho|c^~,|j@ҤE=5ai cݣKnA}N\ .b^ O=t1Ju~_`%tc@͟>=?fo fS s9wEݝT 6"_a YќAr'H|Wa !"h^C;~;GjwdZ{2}mA՘I ;0^f aڗHpF N^ + b]I;y4Jq~ `_(vg@ϓ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʎ=<CgufYx4rCٌO9*]f ^֋Fp F|Ra (e _E={8Hn|b_+yk@чB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@˂>=@gr fV s8uET7$_b \Bo$H}퀒Ta #j^F;|9Fmxe]{/~jCH :2`h dKpB O_ +b^M=w2Ir} aa&udAA>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6J˿p{a^,˾zh@?=9bk dNo>ڼ}IY 1^a S>s.J׺v\a"nbA<=Bft fUv6ԷuDQ8']e ]Eq#Gٵ|Tb $g _E<}9Gdzmzc^~-β{k@E>4_k cLpA۰ L^ /`` M<v2IԮs}^`%ŭsd>>?>do eR s9ثxFU 5!_c YAr(Hک|낻Wa   j`B;;EixdYz1Ѧ}oBJ :.^h aGpDܤ~ P_ (c]J=y4I΢o| b`)ɡwfBA<:am fQr =ٟzJZ 3^` VAt+Jٝw\c"m`C=<DfsfYv6ԚrCP9*]d ^Ep Gܘ~Ra & g ]E=y8IȖozb^~-͕{h@C=7aj dMoBړ}K] / a^ P?t0KՑu]b&Đrd@˼==@fs fTs8ՎwEܺS 7$_d [ѹBo%Hٌ|Va "k`D;|:FÊlydZz/Ή~kAյG ;1`g b۴InCڇ N^ ,b]J;z4Iυr| `_'Ƅue?ΰ?=<bn eP r<ׂyGW 4^` W?r*JxYa"j`Bʫ;<EhweXx4qCةM 9+^g _רEpE}R` 'd _E;z7In}b]+yj@ФA=8`m dOq?~J[ 0^` Q@u/Jv\b#ob@̟>=Aet fVu7vEܝS8&]Ic ^ӜDq#H{%Tb $h>^E<}9Gk9xc]~.}!i@ҘF<2aHi cܗKpB  }L^ -bE^ M>u2It1} ^a'u.d@̓@=?eDo eT q<xFޑW 5!_Ia ZϐBp(Hz)ꀁYc  k:^Dʎ<}:Ek=vfZy1nD׌L ;.`Jf b؋HpD} "R` )dD]G;z5Jo6| a^+x*g@χ?=:cGm eOq? {HY 2 `I_ T?r-Kx-[a$o7ځ`A˂;<ChBsfVt7 ՃsDO8)]Kd ]Cp!H|&S` %f @_E;{8Hm8|c\,z$وj@C=5`Hk cKpAÊL\ .`F` O=v1Js/]b%r1ڍd@>=?dCr fU t9ϏxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۔mAI <0^Jh `JpC "M^ ,dE]H;y5JϿs4~ `_(ɾw+ۙg@?<;dEn eOq =ټ˛yIX 2`I` W?s*Iںy+[a!k8ڞ`C=;Cg?tgYx 4Է٠pEO8*]Je `FpFܵ|#Ta ' e A]H={7Jʳn7{c_,̲y&ۥiAB=8`Hk eNpA۰Ƨ|J\ 0 `G_ Q?u/K׮w.\b%ĭq2۪b@>=BgBr fVs9׫ҬuET7&_Ib \Do$I۩}'Ta #!j?^E:|:Hħm<xd\{0Ц~!رkAF<2`Hj bInBڤ  M] -bE_ L<u3IТt1 _`&šv.ڶf??>=cDp eQ q ;ן̸zGW 4^Jb XBr)I؝x+Xc !i:ڻ;<Ei?Yy2њ} ٽK :-^KFpEܘ} $_ (d C;z6I͖o7^*ʕx)?=9aHNq>ړY 1^H>t.Jّv,a"n5<=BfAUv6ՎQ8']JEq#Gی|%b #g ?<}8GNJm9^~-Ή{"E=4_HLpAۇ  ^ .`F>v1Iԅs0a%ńs0@=>dCT s9؂V 6!_JCr'J|(c !"j;=};Gk=[y2K ;0`IInF !` + dC=w4Jr4`(x*A=;cFQp = Z 3`J?q+Iy,a!m8;<ChAWv4 N9*]KDp F~%` & g B;y8In9]~-y&A=7`ILpB![ / `H=u0Kt0`&p2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|H ;1_IKpC !_ ,bE=w4Ir2a'u-A?<bER r<X 5^IAr)Ix*c !k9ځ<bDj>vxz4كo i+_JeniE "}f B]x^ Ip6{~+z'ۈhc8bGkqo=NJ|w^G_ uaJv-"o4ۍ`^BeAruw8ԏtm']IcqfI{% h >^}]Hk9x~~ /}!ڔig4aJipmC "–}z bG^ u`Kҿt2} 'ƾu0ܙd`?eFo qs<ټ Λxr!_JcpdHغz(낪 i;`}]Ei=xzz1з}ؠo h.^IhpjDڵ~ !}cC_y^ I̳o4} )ɲw*ڥhd:aFors=ٰ ȧ|w^IatdJٮw-킙"m7ڪ``BfBsvy5ԫ լsl)]KdpjFܩ~&g @_{_Hȧm8|~ ,ͦz$۱jf5`HkppAڤij{`F` vbJբs/%ġr1ܶb`?dCq tv9ןѸvo"]IbqeH۝z' h<^~]Fj<w{{0К}ٽk h1^IgpkDۘ !|dD]y_ Kϖs4| )ɕw+e`=dEn qr>ٓyt!`I`qcJڑy+"l8`^Ej=wv9x4Ԏq 9!ڬi+_JgoJiEڌ~"'"®}f B_y7G^ IɊp8}~2} +ʉ{'i=0ܱd8bIkoFo?ه |0 γx`H_ u/JcJׅv/(#„o4b=;`AeCru=x7Ղ u7ظo&]Kbq$IhI{'#!!h?^}:C_Hk<x~4} 0|!k<*ڽh2_HjpFoB  - ȿ|`E_ w3IaIr1 -&t.f>7`=cDp s Bs;z4 s^Ibr)KdIx)&!i:`<@_Ei=wy8|2}p ;$j-^JgpHkE} ")d B]z5F` Io5| /*x)g=1b9cFmqCq>{2v`H_ r-IbJx,'"o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"=+aHE .`F1Is0+s1=8dD=6_K'J|)!"j=;Bk>8!ڃ;'`JI "+ bE4Hq4/v+ڈ=4bE CɊ3 ^I+Kw+'k8ٍ<?g?<׏9!]J H|#( e A8En71y&۔>.`ID!Ɩ1`H0Iw0)q2ۙ=:gB =қ6_I$J}!"i9Bk5|:)܃_F +ȅb2Hr,u>5ڈb A4Պ^)Jx%k<?j9:"ۏ_H ( Ñf 6Fp0z=1۔bD1ϖ`.Kw)p==g>8!ڛ]#J|$"g 9Em4{<+ڠ_E -ɢ`2Is+s=8٥d?5ק](Jy #i;Ai7};&۬^I~ )!Ʈc5Ho0w<2۱a B3ҳ^+Iw&m<=f;9ڸ] I~&!g 8Eo2{=-ܽaE/Ϳa0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""8BIMPatt8BIMTxt2) /98 << /0 7 >> /0 << /1 << /0 [ << /0 << /99 /CoolTypeFont /0 << /0 (MavenPro-Regular) /2 1 >> >> >> << /0 << /99 /CoolTypeFont /0 << /0 (AdobeInvisFont) /2 0 >> >> >> << /0 << /99 /CoolTypeFont /0 << /0 (MyriadPro-Regular) /2 0 >> >> >> << /0 << /99 /CoolTypeFont /0 << /0 (TimesNewRomanPSMT) /2 1 >> >> >> ] >> /2 << /0 [ << /0 << /0 () >> >> ] /1 [ << /0 0 >> ] >> /3 << /0 [ << /0 << /0 (Photoshop6MojiKumiSet4) /5 << /0 0 /3 2 >> >> >> << /0 << /0 (Photoshop6MojiKumiSet3) /5 << /0 0 /3 4 >> >> >> << /0 << /0 (Photoshop6MojiKumiSet2) /5 << /0 0 /3 3 >> >> >> << /0 << /0 (Photoshop6MojiKumiSet1) /5 << /0 0 /3 1 >> >> >> << /0 << /0 (YakumonoHankaku) /5 << /0 0 /3 1 >> >> >> << /0 << /0 (GyomatsuYakumonoHankaku) /5 << /0 0 /3 3 >> >> >> << /0 << /0 (GyomatsuYakumonoZenkaku) /5 << /0 0 /3 4 >> >> >> << /0 << /0 (YakumonoZenkaku) /5 << /0 0 /3 2 >> >> >> ] /1 [ << /0 0 >> << /0 1 >> << /0 2 >> << /0 3 >> << /0 4 >> << /0 5 >> << /0 6 >> << /0 7 >> ] >> /4 << /0 [ << /0 << /0 (None) /5 << /0 () /1 () /2 () /3 () /4 0 >> >> >> << /0 << /0 (PhotoshopKinsokuHard) /5 << /0 (!\),.:;?]}    0!! 0000 0 0 0000A0C0E0G0I0c000000000000000000000000 =]) /1 (\([{  00 0 0000 ;[) /2 (  % &) /3 (00 ) /4 1 >> >> >> << /0 << /0 (PhotoshopKinsokuSoft) /5 << /0 (  0000 0 0 00000000 =]) /1 (  00 0 000;[) /2 (  % &) /3 (00 ) /4 2 >> >> >> << /0 << /0 (Hard) /5 << /0 (!\),.:;?]}    0!! 0000 0 0 0000A0C0E0G0I0c000000000000000000000000 =]) /1 (\([{  00 0 0000 ;[) /2 (  % &) /3 (00 ) /4 1 >> >> >> << /0 << /0 (Soft) /5 << /0 (  0000 0 0 00000000 =]) /1 (  00 0 000;[) /2 (  % &) /3 (00 ) /4 2 >> >> >> ] /1 [ << /0 0 >> << /0 1 >> << /0 2 >> << /0 3 >> << /0 4 >> ] >> /5 << /0 [ << /0 << /0 (Normal RGB) /6 << /0 2 /1 12.0 /2 false /3 false /4 true /5 0.0 /6 1.0 /7 1.0 /8 0 /9 0.0 /10 0.0 /11 1 /12 0 /13 0 /14 0 /15 0 /16 0 /17 0.0 /18 true /19 false /20 false /21 false /22 false /23 false /24 false /25 false /26 false /27 false /28 false /29 false /30 0 /31 false /32 false /33 false /34 false /35 2 /36 0.0 /37 2 /38 0 /39 0 /40 false /41 2 /42 0 /43 << /0 .5 >> /44 2 /45 2 /46 7 /47 0 /48 0 /49 -1.0 /50 -1.0 /51 0 /52 false /53 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /54 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /55 << /99 /SimpleBlender >> /56 true /57 false /58 true /59 false /60 false /61 0 /62 0 /63 1.0 /64 4.0 /65 0.0 /66 [ ] /67 [ ] /68 0 /69 0 /70 0 /71 4 /72 0.0 /73 0.0 /74 false /75 false /76 false /77 true /78 true /79 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 1.0 1.0 0.0 ] >> >> /80 false /81 0 /82 3.0 /83 3.0 /84 false /85 0 /86 << /99 /SimpleCustomFeature >> /87 100.0 /88 true >> >> >> << /0 << /0 (None) /5 0 /6 << >> >> >> ] /1 [ << /0 0 >> << /0 1 >> ] >> /6 << /0 [ << /0 << /0 (Normal RGB) /5 << /0 0 /1 0.0 /2 0.0 /3 0.0 /4 0.0 /5 0.0 /6 1 /7 1.2 /8 0 /9 true /10 6 /11 2 /12 2 /13 0 /14 36.0 /15 true /16 .5 /17 [ .8 1.0 1.33 ] /18 [ 0.0 0.0 0.0 ] /19 [ 1.0 1.0 1.0 ] /20 6 /21 false /22 0 /23 true /24 0 /25 0 /27 /nil /26 false /28 /nil /29 false /30 << >> /31 36.0 /32 << >> /33 0 /34 7 /35 0 /36 /nil /37 0 /38 false /39 0 /40 2 >> >> >> << /0 << /0 (Basic Paragraph) /5 << /0 0 /1 0.0 /2 0.0 /3 0.0 /4 0.0 /5 0.0 /6 1 /7 1.2 /8 0 /9 false /10 6 /11 2 /12 2 /13 0 /14 36.0 /15 true /16 .5 /17 [ .8 1.0 1.33 ] /18 [ 0.0 0.0 0.0 ] /19 [ 1.0 1.0 1.0 ] /20 6 /21 false /22 0 /23 true /24 0 /25 0 /27 /nil /26 false /28 /nil /29 false /30 << >> /31 36.0 /32 << /0 2 /1 50.0 /2 false /3 false /4 true /5 0.0 /6 1.0 /7 1.0 /8 0 /9 0.0 /10 0.0 /11 1 /12 0 /13 0 /15 0 /16 0 /18 true /19 false /20 true /21 false /22 false /23 false /24 false /25 false /26 false /27 false /28 false /29 false /30 0 /35 1 /38 0 /53 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /54 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /68 0 /70 1 /71 4 /72 0.0 /73 0.0 /87 0.0 >> /33 0 /34 7 /36 /nil /37 0 /38 false /39 0 /40 2 >> /6 0 >> >> ] /1 [ << /0 0 >> << /0 1 >> ] >> /8 << /0 [ << /0 << /1 << /0 [ 0.0 0.0 0.0 0.0 1576.60938 0.0 1576.60938 0.0 1576.60938 0.0 1576.60938 0.0 1576.60938 350.65625 1576.60938 350.65625 1576.60938 350.65625 1576.60938 350.65625 0.0 350.65625 0.0 350.65625 0.0 350.65625 0.0 350.65625 0.0 0.0 0.0 0.0 ] >> /2 << /0 1 /6 [ -2.0 -2.0 ] /11 << /4 -2 /7 false >> >> >> >> ] >> /9 << /0 [ << /0 << /0 (kPredefinedNumericListStyleTag) /6 1 >> >> << /0 << /0 (kPredefinedUppercaseAlphaListStyleTag) /6 2 >> >> << /0 << /0 (kPredefinedLowercaseAlphaListStyleTag) /6 3 >> >> << /0 << /0 (kPredefinedUppercaseRomanNumListStyleTag) /6 4 >> >> << /0 << /0 (kPredefinedLowercaseRomanNumListStyleTag) /6 5 >> >> << /0 << /0 (kPredefinedBulletListStyleTag) /6 6 >> >> ] /1 [ << /0 0 >> << /0 1 >> << /0 2 >> << /0 3 >> << /0 4 >> << /0 5 >> ] >> >> /1 << /0 << /0 << /0 1 /1 [ << /0 ( ) /1 (1) >> << /0 ( ) /1 (6) >> << /0 ( ) /1 (0) >> << /0 ( \)) /1 (5) >> << /0 () /1 (5) >> << /0 (0) /1 (1) >> << /0 () /1 (3) >> ] >> /1 0 /2 0 /3 .583 /4 .333 /5 .583 /6 .333 /7 .7 /8 true /9 [ << /0 0 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 1 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 2 /1 () /2 () /3 ( ) /4 ( ) >> << /0 3 /1 () /2 () /3 ( ) /4 ( ) >> << /0 4 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 5 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 6 /1 () /2 () /3 ( 9) /4 ( :) >> << /0 7 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 8 /1 () /2 () /3 ( ) /4 ( ) >> << /0 9 /1 () /2 () /3 ( ) /4 ( ) >> << /0 10 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 11 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 12 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 13 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 14 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 15 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 16 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 17 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 18 /1 () /2 () /3 ( ) /4 ( ) >> << /0 19 /1 () /2 () /3 ( ) /4 ( ) >> << /0 20 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 21 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 22 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 23 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 24 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 25 /1 () /2 () /3 ( 9) /4 ( :) >> << /0 26 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 27 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 28 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 29 /1 (0) /2 (0) >> << /0 30 /1 (0 ) /2 (0 ) >> << /0 31 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 32 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 33 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 34 /1 () /2 () /3 ( ) /4 ( ) >> << /0 35 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 36 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 37 /1 () /2 () /3 ( ) /4 ( ) >> << /0 38 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 39 /1 () /2 () /3 (<) /4 (>) >> << /0 40 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 41 /1 () /2 () /3 (<) /4 (>) >> << /0 42 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 43 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 44 /1 () /2 () /3 ( 9) /4 ( :) >> << /0 45 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> ] /15 << /0 (Hunspell) >> /16 false /17 0 >> /1 [ << /0 << /0 (bundlewrap.org ) /5 << /0 [ << /0 << /0 << /0 () /5 << /0 0 /9 true >> /6 1 >> >> /1 15 >> ] >> /6 << /0 [ << /0 << /0 << /0 () /5 0 /6 << /0 0 /1 183.33333 /4 false /5 770.83319 /53 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 .61176 .61176 .61176 ] >> >> >> >> >> /1 15 >> ] >> /10 << /0 4 /2 true >> >> /1 << /0 [ << /0 0 >> ] /2 [ << /99 /PC /5 0 /6 [ << /99 /F /10 0 /1 [ 0.0 0.0 1576.60938 350.65625 ] /5 2 /6 [ << /99 /R /1 [ 0.0 0.0 1576.60938 350.65625 ] /6 [ << /99 /R /1 [ 0.0 0.0 1576.60938 350.65625 ] /5 2 /6 [ << /99 /L /10 130.1651 /14 -152.80762 /15 39.78246 /0 << /0 [ 0.0 130.1651 ] >> /1 [ 0.0 -152.80762 1576.60938 39.78246 ] /6 [ << /99 /S /15 << /0 15 /2 0 /5 false >> /6 [ << /99 /G /1 [ 0.0 -152.80762 1308.05713 39.78246 ] /5 [ 265 427 350 273 339 279 453 401 237 398 548 362 401 304 3 ] /8 [ 0.0 -22.64252 1308.05713 169.94756 ] /9 [ 0.0 -22.64252 1392.57349 169.94756 ] /11 true /12 -152.80762 /13 39.78246 /20 0 >> ] >> ] >> ] >> ] >> ] >> ] >> ] >> >> ] /2 << /0 2 /1 12.0 /2 false /3 false /4 true /5 0.0 /6 1.0 /7 1.0 /8 0 /9 0.0 /10 0.0 /11 1 /12 0 /13 0 /14 0 /15 0 /16 0 /17 0.0 /18 true /19 false /20 false /21 false /22 false /23 false /24 false /25 false /26 false /27 false /28 false /29 false /30 0 /31 false /32 false /33 false /34 false /35 2 /36 0.0 /37 2 /38 0 /39 0 /40 false /41 2 /42 0 /43 << /0 .5 >> /44 2 /45 2 /46 7 /47 0 /48 0 /49 -1.0 /50 -1.0 /51 0 /52 false /53 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /54 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /55 << /99 /SimpleBlender >> /56 true /57 false /58 true /59 false /60 false /61 0 /62 0 /63 1.0 /64 4.0 /65 0.0 /66 [ ] /67 [ ] /68 0 /69 0 /70 0 /71 4 /72 0.0 /73 0.0 /74 false /75 false /76 false /77 true /78 true /79 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 1.0 1.0 0.0 ] >> >> /80 false /81 0 /82 3.0 /83 3.0 /84 false /85 0 /86 << /99 /SimpleCustomFeature >> /87 100.0 /88 true >> /3 << /0 0 /1 0.0 /2 0.0 /3 0.0 /4 0.0 /5 0.0 /6 1 /7 1.2 /8 0 /9 true /10 6 /11 2 /12 2 /13 0 /14 36.0 /15 true /16 .5 /17 [ .8 1.0 1.33 ] /18 [ 0.0 0.0 0.0 ] /19 [ 1.0 1.0 1.0 ] /20 6 /21 false /22 0 /23 true /24 0 /25 0 /27 /nil /26 false /28 /nil /29 false /30 << >> /31 36.0 /32 << >> /33 0 /34 7 /35 0 /36 /nil /37 0 /38 false /39 0 /40 2 >> >>8BIMFMsk 2 $&&'()+)*)**)*)*))+)*))*)*)*))+)*))+)*)*))*)*))+)*))*)*)*))+)*))+)*)*))+)*))+)*)**)*)*))+)*))*)*)*)))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJJHIJGIIHKIJJJKIKIKKHKKHHJIIJJJJHKIKKIKIJJJFFIFJKHKIJJJKHIHGHGIIJKIKIJKLIHJGJJHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJKKILJKKIKIJKLJHKIKKHONOORRRUQTTQSRRSRTPSSVPPSOTUQTRTUTUQSQQRORSTTUURTTVTQTRTTRTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHHJIHHHJJIIHKIJIIHHJIHIHKHIHHGGIGGHGIHHGGIHFFFFGJGJEIJIKEIGIJFJEIIHKEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRUURUOUTUVOTPUVPTQTTUVORRUTPUPTTQOGH>>>:==>=<<;>??>:===><=;<>>><????<==>?>><?:431222322123431333322123331334312223221234312333231234313333122232213343122232312343133331222122/.- ....6666666666666666666666666666666666-~4 V6O//////0///1/0/1//109;:>E./000101457A $&&'()+)*)**)*)*))+)*))*)*)*))+)*))+)*)*))*)*))+)*))*)*)*))+)*))+)*)*))+)*))+)*)**)*)*))+)*))*)*)*)))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJJHIJGIIHKIJJJKIKIKKHKKHHJIIJJJJHKIKKIKIJJJFFIFJKHKIJJJKHIHGHGIIJKIKIJKLIHJGJJHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJKKILJKKIKIJKLJHKIKKHONOORRRUQTTQSRRSRTPSSVPPSOTUQTRTUTUQSQQRORSTTUURTTVTQTRTTRTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHHJIHHHJJIIHKIJIIHHJIHIHKHIHHGGIGGHGIHHGGIHFFFFGJGJEIJIKEIGIJFJEIIHKEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRUURUOUTUVOTPUVPTQTTUVORRUTPUPTTQOGH>>>:==>=<<;>??>:===><=;<>>><????<==>?>><?:431222322123431333322123331334312223221234312333231234313333122232213343122232312343133331222122/.- ....6666666666666666666666666666666666-~4 V6O//////0///1/0/1//109;:>E./000101457A $&&'()+)*)**)*)*))+)*))*)*)*))+)*))+)*)*))*)*))+)*))*)*)*))+)*))+)*)*))+)*))+)*)**)*)*))+)*))*)*)*)))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJJHIJGIIHKIJJJKIKIKKHKKHHJIIJJJJHKIKKIKIJJJFFIFJKHKIJJJKHIHGHGIIJKIKIJKLIHJGJJHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJKKILJKKIKIJKLJHKIKKHONOORRRUQTTQSRRSRTPSSVPPSOTUQTRTUTUQSQQRORSTTUURTTVTQTRTTRTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHHJIHHHJJIIHKIJIIHHJIHIHKHIHHGGIGGHGIHHGGIHFFFFGJGJEIJIKEIGIJFJEIIHKEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRUURUOUTUVOTPUVPTQTTUVORRUTPUPTTQOGH>>>:==>=<<;>??>:===><=;<>>><????<==>?>><?:431222322123431333322123331334312223221234312333231234313333122232213343122232312343133331222122/.- ....6666666666666666666666666666666666-~4 V6O//////0///1/0/1//109;:>E./000101457A 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7ֵER7&] \Bq$H۳{Ta #hE:}:GƱkd\~.ϰ}AF<2_ bIpBۮ M] -`L<w3IҬr_`&ƫt??>=c eQ s ;٩GW 3^ XBr'IڧxXc !iD<:EifZy2Ҥ}DL ;-^ bHpEܢ} R` )dG<z5I͠oa_*ʟx@ϼA=9c ePq?ڝHZ 2 `T@r-Kٛx[c$šoAʷ=<ChsfWt7՘sDڵQ8)_d ]ӴEo!Hۖ}Sb %h _E<z8Hǔo|c^~,̓|j@ҰE=5ai cݯKnAّ}N\ .b^ O=t1Jԏu~_`%Îtc@ͫ>=?fo fS s9֌wEݩT 6"_a YѨAr'Hي|뀁Wa !"h^C;~;GjwdZ{2ч}mAդI ;0^f aڣHpF܅ N^ + b]I;y4JЃq~ `_(ɂvg@ϟ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʚ=<CgufYx4rC٘O9*]f ^֗Fp F|Ra (e _E={8Hn|b_+yk@ѓB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@ˎ>=@gr fV s8uEیT7$_b \ҋBo$H}퀞Ta #j^F;|9Fmxe]{/~jCՇH :2`h dۆKpB O_ +b^M=w2Ir} aa&udA΂A>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6Jp{a^,zh@?=9bk dNo>}IY 1^a S>s.Jv\a"nbA<=Bft fUv6uDQ8']e ]Eq#G|Tb $g _E<}9Gmzc^~-{k@E>4_k cLpA L^ /`` M<v2Is}^`%sd>>?>do eR s9xFU 5!_c YAr(H|Wa   j`B;;EixdYz1}oBJ :.^h aGpDܿ~ P_ (c]J=y4Iνo| b`)ɼwfBA<:am fQr =ٺzJZ 3^` VAt+Jٸw\c"m`C=<DfsfYv6ԵrCP9*]d ^Ep Gܳ~Ra & g ]E=y8Iȱozb^~-Ͱ{h@C=7aj dMoBڮ}K] / a^ P?t0Kլu]b&īrd@==@fs fTs8թwES 7$_d [Bo%H٧|Va "k`D;|:FålydZz/Τ~kAG ;1`g bInCڢ N^ ,b]J;z4IϠr| `_'Ɵue?μ?=<bn eP r<םyGW 4^` W?r*JۛxYa"j`Bʷ;<EhweXx4՘qCصM 9+^g _״EpEۖ}R` 'd _E;z7I˔n}b]+˓yj@аA=8`m dOq?ڑ~J[ 0^` Q@u/Jُv\b#Îob@̫>=Aet fVu7֌vEܩS8&]Ic ^ӨDq#Hۊ{%Tb $h>^E<}9Gƈk9xc]~.χ}!i@ҤF<2aHi cܣKpBۅ  }L^ -bE^ M>u2I҃t1} ^a'Ƃu.d@̟@=?eDo eT q<xFޝW 5!_Ia ZϜBp(Hz)ꀍYc  k:^Dʚ<}:Ek=vfZy1كnDטL ;.`Jf bؗHpD} "R` )dD]G;z5Jo6| a^+x*܈g@ϓ?=:cGm eOq? Ȋ{HY 2 `I_ T?r-Kx-[a$o7ڍ`Aˎ;<ChBsfVt7 ՏsDڌO8)]Kd ]ՋCp!H|&S` %f @_E;{8Hm8|c\,z$ٔj@҇C=5`Hk c߆KpAÖL\ .`F` O=v1Js/]b%r1ڙd@̂>=?dCr fU t9ϛxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۠mAI <0^Jh `JpC "M^ ,dE]H;y5Js4~ `_(w+ۥg@?<;dEn eOq =˧yIX 2`I` W?s*Iy+[a!k8ڪ`C=;Cg?tgYx 4٬pEO8*]Je `FpF|#Ta ' e A]H={7Jn7{c_,y&۱iAB=8`Hk eNpAƳ|J\ 0 `G_ Q?u/Kw.\b%q2۶b@>=BgBr fVs9ҸuET7&_Ib \Do$I}'Ta #!j?^E:|:Hm<xd\{0~!ؽkAF<2`Hj bInBڿ  M] -bE_ L<u3Iнt1 _`&żv.f??>=cDp eQ q ;׺zGW 4^Jb XBr)Iظx+Xc !i:;<Ei?Yy2ѵ} K :-^KFpEܳ} $_ (d C;z6Iͱo7^*ʰx)?=9aHNq>ڮY 1^H>t.J٬v,a"«n5<=BfAUv6թQ8']JEq#Gۧ|%b #g ?<}8Gǥm9^~-Τ{"E=4_HLpAۢ  ^ .`F>v1IԠs0a%şs0@=>dCT s9؝V 6!_JCr'Jڛ|(c !"j;=};Gk=[y2јK ;0`IInFܖ !` + dC=w4JΔr4`(ȓx*A=;cFQp =ב Z 3`J?q+I؏y,a!m8;<ChAWv4Ҍ N9*]KDp Fڊ~%` & g B;y8IȈn9]~-͇y&A=7`ILpBڅ![ / `H=u0KՃt0`&Ăp2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|؃H ;1_IKpC !_ ,bE=w4Ir2a'u-܈A?<bER r<͊X 5^IAr)Ix*c !k9ڍ<bDj>vxz4ُo i+_JeniE "}f B]x^ Ip6{~+z'۔hc8bGkqo=ǖ|w^G_ uaJv-뀗"o4ۙ`^BeAruw8ԛtm']IcqfI{% h >^}]Hk9x~~ /}!ڠig4aJipmC "¢}z bG^ u`Kt2} 'u0ܥd`?eFo qs< Χxr!_JcpdHz(낶 i;`}]Ei=xzz1}جo h.^IhpjD~ !}cC_y^ Io4} )w*ڱhd:aFors= ȳ|w^IatdJw-킴"m7ڶ``BfBsvy5 ոsl)]KdpjF~&g @_{_Hm8|~ ,z$۽jf5`HkppAڿĿ{`F` vbJսs/%ļr1b`?dCq tv9׺vo"]IbqeH۸z' h<^~]Fj<w{{0е}k h1^IgpkD۳ !|dD]y_ Kϱs4| )ɰw+e`=dEn qr>ٮyt!`I`qcJڬy+"l8`^Ej=wv9x4ԩq 9!i+_JgoJiEڧ~"'"}f B_y7G^ Iɥp8}~2} +ʤ{'i=0d8bIkoFo?٢ |0 x`H_ u/JcJנv/(#Ÿo4b=;`AeCru=x7՝ u7o&]Kbq$IhIۛ{'#!!h?^}:C_Hƙk<x~4} 0ј|!k<*h2_HjpFoBۖ  - |`E_ w3IaIҔr1 -&Ɠt.f>7`=cDp s Bs;ّz4 s^Ibr)KdIڏx)&!i:`<@_Ei=wy8|2Ҍ}p ;$j-^JgpHkE܊} ")d B]z5F` I͈o5| /*ʇx)g=1b9cFmqCq>څ{2v`H_ r-IbJكx,'"‚o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"ۃ=+aHE Å.`F1Is0+s1ۈ=8dD=ϊ6_K'J|)!"j=;Bk>8!ڏ;'ہ`JI "+ ƃbE4Hq4/v+ڔ=4ۆbE Cɖ3 ҈^I+Kw+'k8ٙ<?g?<כ9!؍]J H|#( e A8En71y&۠>.ڒ`ID!Ƣ1̔`H0Iw0)q2ۥ=:gB =ҧ6ٙ_I$J}!"i9Bk5|:)ܞ_F +Ƞb2Hr,u>5ڣb A4ե^)Jx%k<?j9:"۪_H ( ìf 6Fp0z=1ۯbD1ϱ`.Kw)p==g>8!ڶ]#J|$"g 9Em4{<+ڻ_E -ɽ`2Is+s=8d?5](Jy #i;Ai7};&^I~ )!c5Ho0w<2a B3^+Iw&m<=f;9] I~&!g 8Eo2{=-aE/a0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%PjzPjzPjzPjzPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPP7QhyvfR9 :Qamw xm_H)-K_q~}nY@jzP ,Lbs}o\F$  Tfp{ ~sbH) +BVfq|~p]F!*I_p} |o^H)  jzP5`|\.[htS{RA]tw?]v~^4BbyzY(>ee@A]twFkhB P)X|^!PF BiY L{e:jzP3jg)!{ &SsNpd)Hnߜd)HxyJ &SsYUP>lݜ}DPF)^w5*lޜxNjzPNE,7 J~Jq2fٜ|:WٜJ"iݜk(JqHݜ}EP:o؜JPF%^~1-x؜}M jzPND[_eDvR4c&fԜ.=xԜR%s؜v+4c \לUPӜ5PF Rl pԜzP3~%!p Doݜ?~Мk 9ҜCj՜n Doݜ \ӜTPĜk PF,l.MМzPgUA~Evڜ:̜2 oМzEќJEvܜJϜ0Pœ.PF:~ޜG tΜzP#v [WT4oR;sלvɜLϜN o͜r ;sܜ!}̜ZPGPFAܜM"͜zP<ߜ!zd^ԜƜU͜s  ˜!^ܜNɜqPXPFNڜF/̜zPHޜ9~PҜŜV̜".ɜ.Pܜ tƜuP^PFPڜ/5˜zPMݜ[MA!iRPҜŜDa˜15ǜ1Pܜ"ŜkPWPFPڜ -ʜzPDܜ!xlWPҜĜ"ʜ=-Ŝ'Pܜ0ĜKPzgXH50%")0:I\lFPFP ~ocUH=2+")02-"".2ASbtzP2mZK>0)""%07HUh|t~/~P {k^PD;0'")5BSfxreWJ<00",2@M\p^wj]OA:0/""'08ATfv6sbQ?3."",070)""03 5`,PFP{bI)-X2nB/WwzP~W+NvQ[D*bRPxbH)vZ9-YNnK, Ai*l@*S~PxbH)#eA6`kPsG .o PFPlKbjD6fzP~^ S#!p\HPlS4 vO`W"FYDePN[^Pi~PFPvf4SzPvcR hJ8PAx.M&#f~<`P&jPx%uXPFPs_& bzP6_E^rbf{PKxfPL ~{"n%Pow <Pv PFPtjzP~rM X-/pTNPp P >rhnP*uWYP\PFP(,jzP4j[`5E%~P.P"u.!PvjP!PFPQPoPjzPv7cm Y{PkBP<Ak MkP$qPNMPFPpxjzPv<DX AzMNP]Pr )xPkHxxP~PFPPYjzP\+~{WQxP{nP*P]WP |qzP."PFP`jzPz9RxL {PM~PbjdPM#jzPr^PFPv*) jzP)DRXyPuDNPPL)P|qjzP,PFPPA__jzP\\{N]qPPl>Sf`P &jzPPFP)QjzPr" x>)<{PP'kx\"PJxjzPINPFP`tjzP G>X j]j<NPsP|PpAjzP pPFPjDDjzP;ndD+gkPjPW&0~PMPjzPvPFP}pfjzP_Sq(8+{PePSWRnPhjzPNPFPurxjzPxq3XYj]3NPbPqqPG<jzP!:PFPrxXjzPY %:<qePbPhc PfjzPUPFPjz0jzP @BiFxPbP@ oA5P|yjzPwjPFPjz<jzP/9""YXHu P)KP?Ynz xm\H1 bP2%OPP]jzPayPFPjzOjzPGќm.qM{x^ P"Suu\5bPMB1gP <jzPJPFPjz^ujzPVќ`SxPXyTbPxdS ~wP- jzP.PFPjzl`jzPdќX2@KP)pܜ~ObPds_kPDjzPPFPjzxSjzPoќ!bZlW Pt֜dbPQmXPWjzPPFPjzCjzP}ќX&]xP]ӜibP<xvKPemjzP"PFPjz5jzPќU*/KP!ϜP"k=Po\jzP,PFPjz#jzPќ(Pg`O P:P _+"PzPjzP0PFPjz"jzPќ0Ox8hzxPEPX'PBjzP9PFPjzjzPќ0Uy: KP?P%P0P5jzP<PFPjzjzPќ0 <sSH P)P0P4P%jzP<PFPjzjzPќ3EiIrztxPP:H<P"jzP;PFPjzjzPќ<UqG KPe{jXJ<0'".0PFPjzejjzPp, - Pk>bPqzQPBjzPSPFPjzX{jzPe"0pV`vPfbP"f`PxSjzPhpPFPjzEjzPU"UY`Fi}HPbP:Xs|Pl`jzP{`PFPjz0jzPBC ; " P=abPWJnP^ujzPMPFPjz.jzP%]{GYvP\bPou2<[POjzP0F DPjzQjzPvUPNTsrHPmbPbZ%BP<jzPHC%/<PjzmjzPyvH P{lbPHxPP0jzPn<7A6PjzhjzPq-~  7RvPWbPG'|oPXjzPs4OW.PjzK#jzPV\RF:b|f|HPFbPmd*{PxjzP*WfmPjz$TjzP/rg0T P=bP}B^.\PfjzPb.{PjzjzP)~$KvP<bPF[d3PDDjzPPjztjzPxeR<"mYvHPFbPy/'PtjzP<v-:|PjzP\jzPP jVA_ PRbP+dj?sPjzPzKh\edPjzjzP]~|-BvPibPuj6|DP__jzP-JDPjzKjzP~R0 yKpHPbP.5[,P) jzPwx +:PjzUjzPPgbDPju P bPj{mP_jzP9DpzPjzOjzP 2~n<:tPLbPTX.kC0PYjzPn!1jPjzt jzPj R"<iDPrbP(9PxjzP_\>w9Pjz5jjzP"wZ.^siP_EfPfLxDPpPjzP;EWPjzBjzPrg~^,I1tPDnP~bP),jzP.^d"2_Pjz<.mzP#cHWR )cDP~P{_SuUAPtjzPR3 &Pjz},zPim9jQj}\PwP~ Px\~P&pzHMJo"2jPjz'>NwPDj~zL>U&tP_!hsPQ-APC q'Pf4<z>~ v{FWPjz`e[kPHMWjRyfDP"UA\PU ]qPzM4\P [Nz'5bcu"1^PjzEHTPs; HzjvPzKN7Pf(F P2!qP@:2gz}G2k;m.:wPjz"P 8i$P I'X}j~M`tP5_"@PV S.PJ}C5r!PjD;fzx`:DjT3`3:e.Pjz<vN:^~P)tH@`{jR.DPwa7.SrlPgD$P{HPmoB :f?PnB@`|zDm_SC80.""%05O`qUPraO>2-"",0?M\lz\oΜkPjzJ˜~PEݜj|[jtPIsPɜ^PŜ`P-ÜzAœNyϜuPjz>̜PJޜjN'<DPePʜ]PǜcP5ĜzpĜ4wМuPjz'͜.P>ߜjPnEP̜PPɜ[P0ŜzƜzkќgPjz rΜ)P+j{hutPfœWP͜8P z̜FP#Ɯz |ɜPQӜNPjzJМuPvjN:JDPKƜTPΜ|P^Ϝ)Ptǜz`̜u,Ԝ)Pjzsќ>PMWP!}Ȝ9PМRP6ҜhPMɜz5Ϝ:\֜XPjz9ӜNPme{s~rPA̜e P՜mP^՜2PpԜzKӜBjٜiPjzF֜t:P)v}M NJVAPNМcPVٜq&PjٜJP.xל[jzP>qb%P M{lEjz'OknJ6a`5PjzQu|cDP Dhv`ANXaAPOoiQ.PDh{]2P5^}xW'P ChfDjz 4M]hu~teS6"F^myxl]D"  B[k{ zm^N32Nct{rdSA)7Pfv vl`TA)P.MdxxjXA!DZlz{mZB-K`q~~o[B#jzPjzPjzPkzPrzPvzPsPpPiP4\PYHP2P P[PkPTDPPruPP>P7P=SP0c2^P0m@GFP0{X,XP0sQ, ?b!P0vdTI:0*"-06DQ]l~MP0ǜqP0ȜP0ɜP0ʜP0˜}P0͜^P0Μ,P&ќEP'Sr؜x>PKnߜ~UP=\w}b> P6Qes xnbP5 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7ֵER7&] \Bq$H۳{Ta #hE:}:GƱkd\~.ϰ}AF<2_ bIpBۮ M] -`L<w3IҬr_`&ƫt??>=c eQ s ;٩GW 3^ XBr'IڧxXc !iD<:EifZy2Ҥ}DL ;-^ bHpEܢ} R` )dG<z5I͠oa_*ʟx@ϼA=9c ePq?ڝHZ 2 `T@r-Kٛx[c$šoAʷ=<ChsfWt7՘sDڵQ8)_d ]ӴEo!Hۖ}Sb %h _E<z8Hǔo|c^~,̓|j@ҰE=5ai cݯKnAّ}N\ .b^ O=t1Jԏu~_`%Îtc@ͫ>=?fo fS s9֌wEݩT 6"_a YѨAr'Hي|뀁Wa !"h^C;~;GjwdZ{2ч}mAդI ;0^f aڣHpF܅ N^ + b]I;y4JЃq~ `_(ɂvg@ϟ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʚ=<CgufYx4rC٘O9*]f ^֗Fp F|Ra (e _E={8Hn|b_+yk@ѓB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@ˎ>=@gr fV s8uEیT7$_b \ҋBo$H}퀞Ta #j^F;|9Fmxe]{/~jCՇH :2`h dۆKpB O_ +b^M=w2Ir} aa&udA΂A>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6Jp{a^,zh@?=9bk dNo>}IY 1^a S>s.Jv\a"nbA<=Bft fUv6uDQ8']e ]Eq#G|Tb $g _E<}9Gmzc^~-{k@E>4_k cLpA L^ /`` M<v2Is}^`%sd>>?>do eR s9xFU 5!_c YAr(H|Wa   j`B;;EixdYz1}oBJ :.^h aGpDܿ~ P_ (c]J=y4Iνo| b`)ɼwfBA<:am fQr =ٺzJZ 3^` VAt+Jٸw\c"m`C=<DfsfYv6ԵrCP9*]d ^Ep Gܳ~Ra & g ]E=y8Iȱozb^~-Ͱ{h@C=7aj dMoBڮ}K] / a^ P?t0Kլu]b&īrd@==@fs fTs8թwES 7$_d [Bo%H٧|Va "k`D;|:FålydZz/Τ~kAG ;1`g bInCڢ N^ ,b]J;z4IϠr| `_'Ɵue?μ?=<bn eP r<םyGW 4^` W?r*JۛxYa"j`Bʷ;<EhweXx4՘qCصM 9+^g _״EpEۖ}R` 'd _E;z7I˔n}b]+˓yj@аA=8`m dOq?ڑ~J[ 0^` Q@u/Jُv\b#Îob@̫>=Aet fVu7֌vEܩS8&]Ic ^ӨDq#Hۊ{%Tb $h>^E<}9Gƈk9xc]~.χ}!i@ҤF<2aHi cܣKpBۅ  }L^ -bE^ M>u2I҃t1} ^a'Ƃu.d@̟@=?eDo eT q<xFޝW 5!_Ia ZϜBp(Hz)ꀍYc  k:^Dʚ<}:Ek=vfZy1كnDטL ;.`Jf bؗHpD} "R` )dD]G;z5Jo6| a^+x*܈g@ϓ?=:cGm eOq? Ȋ{HY 2 `I_ T?r-Kx-[a$o7ڍ`Aˎ;<ChBsfVt7 ՏsDڌO8)]Kd ]ՋCp!H|&S` %f @_E;{8Hm8|c\,z$ٔj@҇C=5`Hk c߆KpAÖL\ .`F` O=v1Js/]b%r1ڙd@̂>=?dCr fU t9ϛxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۠mAI <0^Jh `JpC "M^ ,dE]H;y5Js4~ `_(w+ۥg@?<;dEn eOq =˧yIX 2`I` W?s*Iy+[a!k8ڪ`C=;Cg?tgYx 4٬pEO8*]Je `FpF|#Ta ' e A]H={7Jn7{c_,y&۱iAB=8`Hk eNpAƳ|J\ 0 `G_ Q?u/Kw.\b%q2۶b@>=BgBr fVs9ҸuET7&_Ib \Do$I}'Ta #!j?^E:|:Hm<xd\{0~!ؽkAF<2`Hj bInBڿ  M] -bE_ L<u3Iнt1 _`&żv.f??>=cDp eQ q ;׺zGW 4^Jb XBr)Iظx+Xc !i:;<Ei?Yy2ѵ} K :-^KFpEܳ} $_ (d C;z6Iͱo7^*ʰx)?=9aHNq>ڮY 1^H>t.J٬v,a"«n5<=BfAUv6թQ8']JEq#Gۧ|%b #g ?<}8Gǥm9^~-Τ{"E=4_HLpAۢ  ^ .`F>v1IԠs0a%şs0@=>dCT s9؝V 6!_JCr'Jڛ|(c !"j;=};Gk=[y2јK ;0`IInFܖ !` + dC=w4JΔr4`(ȓx*A=;cFQp =ב Z 3`J?q+I؏y,a!m8;<ChAWv4Ҍ N9*]KDp Fڊ~%` & g B;y8IȈn9]~-͇y&A=7`ILpBڅ![ / `H=u0KՃt0`&Ăp2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|؃H ;1_IKpC !_ ,bE=w4Ir2a'u-܈A?<bER r<͊X 5^IAr)Ix*c !k9ڍ<bDj>vxz4ُo i+_JeniE "}f B]x^ Ip6{~+z'۔hc8bGkqo=ǖ|w^G_ uaJv-뀗"o4ۙ`^BeAruw8ԛtm']IcqfI{% h >^}]Hk9x~~ /}!ڠig4aJipmC "¢}z bG^ u`Kt2} 'u0ܥd`?eFo qs< Χxr!_JcpdHz(낶 i;`}]Ei=xzz1}جo h.^IhpjD~ !}cC_y^ Io4} )w*ڱhd:aFors= ȳ|w^IatdJw-킴"m7ڶ``BfBsvy5 ոsl)]KdpjF~&g @_{_Hm8|~ ,z$۽jf5`HkppAڿĿ{`F` vbJսs/%ļr1b`?dCq tv9׺vo"]IbqeH۸z' h<^~]Fj<w{{0е}k h1^IgpkD۳ !|dD]y_ Kϱs4| )ɰw+e`=dEn qr>ٮyt!`I`qcJڬy+"l8`^Ej=wv9x4ԩq 9!i+_JgoJiEڧ~"'"}f B_y7G^ Iɥp8}~2} +ʤ{'i=0d8bIkoFo?٢ |0 x`H_ u/JcJנv/(#Ÿo4b=;`AeCru=x7՝ u7o&]Kbq$IhIۛ{'#!!h?^}:C_Hƙk<x~4} 0ј|!k<*h2_HjpFoBۖ  - |`E_ w3IaIҔr1 -&Ɠt.f>7`=cDp s Bs;ّz4 s^Ibr)KdIڏx)&!i:`<@_Ei=wy8|2Ҍ}p ;$j-^JgpHkE܊} ")d B]z5F` I͈o5| /*ʇx)g=1b9cFmqCq>څ{2v`H_ r-IbJكx,'"‚o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"ۃ=+aHE Å.`F1Is0+s1ۈ=8dD=ϊ6_K'J|)!"j=;Bk>8!ڏ;'ہ`JI "+ ƃbE4Hq4/v+ڔ=4ۆbE Cɖ3 ҈^I+Kw+'k8ٙ<?g?<כ9!؍]J H|#( e A8En71y&۠>.ڒ`ID!Ƣ1̔`H0Iw0)q2ۥ=:gB =ҧ6ٙ_I$J}!"i9Bk5|:)ܞ_F +Ƞb2Hr,u>5ڣb A4ե^)Jx%k<?j9:"۪_H ( ìf 6Fp0z=1ۯbD1ϱ`.Kw)p==g>8!ڶ]#J|$"g 9Em4{<+ڻ_E -ɽ`2Is+s=8d?5](Jy #i;Ai7};&^I~ )!c5Ho0w<2a B3^+Iw&m<=f;9] I~&!g 8Eo2{=-aE/a0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%PjzPjzPjzPjzPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPP7QhyvfR9 :Qamw xm_H)-K_q~}nY@jzP ,Lbs}o\F$  Tfp{ ~sbH) +BVfq|~p]F!*I_p} |o^H)  jzP5`|\.[htS{RA]tw?]v~^4BbyzY(>ee@A]twFkhB P)X|^!PF BiY L{e:jzP3jg)!{ &SsNpd)Hnߜd)HxyJ &SsYUP>lݜ}DPF)^w5*lޜxNjzPNE,7 J~Jq2fٜ|:WٜJ"iݜk(JqHݜ}EP:o؜JPF%^~1-x؜}M jzPND[_eDvR4c&fԜ.=xԜR%s؜v+4c \לUPӜ5PF Rl pԜzP3~%!p Doݜ?~Мk 9ҜCj՜n Doݜ \ӜTPĜk PF,l.MМzPgUA~Evڜ:̜2 oМzEќJEvܜJϜ0Pœ.PF:~ޜG tΜzP#v [WT4oR;sלvɜLϜN o͜r ;sܜ!}̜ZPGPFAܜM"͜zP<ߜ!zd^ԜƜU͜s  ˜!^ܜNɜqPXPFNڜF/̜zPHޜ9~PҜŜV̜".ɜ.Pܜ tƜuP^PFPڜ/5˜zPMݜ[MA!iRPҜŜDa˜15ǜ1Pܜ"ŜkPWPFPڜ -ʜzPDܜ!xlWPҜĜ"ʜ=-Ŝ'Pܜ0ĜKPzgXH50%")0:I\lFPFP ~ocUH=2+")02-"".2ASbtzP2mZK>0)""%07HUh|t~/~P {k^PD;0'")5BSfxreWJ<00",2@M\p^wj]OA:0/""'08ATfv6sbQ?3."",070)""03 5`,PFP{bI)-X2nB/WwzP~W+NvQ[D*bRPxbH)vZ9-YNnK, Ai*l@*S~PxbH)#eA6`kPsG .o PFPlKbjD6fzP~^ S#!p\HPlS4 vO`W"FYDePN[^Pi~PFPvf4SzPvcR hJ8PAx.M&#f~<`P&jPx%uXPFPs_& bzP6_E^rbf{PKxfPL ~{"n%Pow <Pv PFPtjzP~rM X-/pTNPp P >rhnP*uWYP\PFP(,jzP4j[`5E%~P.P"u.!PvjP!PFPQPoPjzPv7cm Y{PkBP<Ak MkP$qPNMPFPpxjzPv<DX AzMNP]Pr )xPkHxxP~PFPPYjzP\+~{WQxP{nP*P]WP |qzP."PFP`jzPz9RxL {PM~PbjdPM#jzPr^PFPv*) jzP)DRXyPuDNPPL)P|qjzP,PFPPA__jzP\\{N]qPPl>Sf`P &jzPPFP)QjzPr" x>)<{PP'kx\"PJxjzPINPFP`tjzP G>X j]j<NPsP|PpAjzP pPFPjDDjzP;ndD+gkPjPW&0~PMPjzPvPFP}pfjzP_Sq(8+{PePSWRnPhjzPNPFPurxjzPxq3XYj]3NPbPqqPG<jzP!:PFPrxXjzPY %:<qePbPhc PfjzPUPFPjz0jzP @BiFxPbP@ oA5P|yjzPwjPFPjz<jzP/9""YXHu P)KP?Ynz xm\H1 bP2%OPP]jzPayPFPjzOjzPGќm.qM{x^ P"Suu\5bPMB1gP <jzPJPFPjz^ujzPVќ`SxPXyTbPxdS ~wP- jzP.PFPjzl`jzPdќX2@KP)pܜ~ObPds_kPDjzPPFPjzxSjzPoќ!bZlW Pt֜dbPQmXPWjzPPFPjzCjzP}ќX&]xP]ӜibP<xvKPemjzP"PFPjz5jzPќU*/KP!ϜP"k=Po\jzP,PFPjz#jzPќ(Pg`O P:P _+"PzPjzP0PFPjz"jzPќ0Ox8hzxPEPX'PBjzP9PFPjzjzPќ0Uy: KP?P%P0P5jzP<PFPjzjzPќ0 <sSH P)P0P4P%jzP<PFPjzjzPќ3EiIrztxPP:H<P"jzP;PFPjzjzPќ<UqG KPe{jXJ<0'".0PFPjzejjzPp, - Pk>bPqzQPBjzPSPFPjzX{jzPe"0pV`vPfbP"f`PxSjzPhpPFPjzEjzPU"UY`Fi}HPbP:Xs|Pl`jzP{`PFPjz0jzPBC ; " P=abPWJnP^ujzPMPFPjz.jzP%]{GYvP\bPou2<[POjzP0F DPjzQjzPvUPNTsrHPmbPbZ%BP<jzPHC%/<PjzmjzPyvH P{lbPHxPP0jzPn<7A6PjzhjzPq-~  7RvPWbPG'|oPXjzPs4OW.PjzK#jzPV\RF:b|f|HPFbPmd*{PxjzP*WfmPjz$TjzP/rg0T P=bP}B^.\PfjzPb.{PjzjzP)~$KvP<bPF[d3PDDjzPPjztjzPxeR<"mYvHPFbPy/'PtjzP<v-:|PjzP\jzPP jVA_ PRbP+dj?sPjzPzKh\edPjzjzP]~|-BvPibPuj6|DP__jzP-JDPjzKjzP~R0 yKpHPbP.5[,P) jzPwx +:PjzUjzPPgbDPju P bPj{mP_jzP9DpzPjzOjzP 2~n<:tPLbPTX.kC0PYjzPn!1jPjzt jzPj R"<iDPrbP(9PxjzP_\>w9Pjz5jjzP"wZ.^siP_EfPfLxDPpPjzP;EWPjzBjzPrg~^,I1tPDnP~bP),jzP.^d"2_Pjz<.mzP#cHWR )cDP~P{_SuUAPtjzPR3 &Pjz},zPim9jQj}\PwP~ Px\~P&pzHMJo"2jPjz'>NwPDj~zL>U&tP_!hsPQ-APC q'Pf4<z>~ v{FWPjz`e[kPHMWjRyfDP"UA\PU ]qPzM4\P [Nz'5bcu"1^PjzEHTPs; HzjvPzKN7Pf(F P2!qP@:2gz}G2k;m.:wPjz"P 8i$P I'X}j~M`tP5_"@PV S.PJ}C5r!PjD;fzx`:DjT3`3:e.Pjz<vN:^~P)tH@`{jR.DPwa7.SrlPgD$P{HPmoB :f?PnB@`|zDm_SC80.""%05O`qUPraO>2-"",0?M\lz\oΜkPjzJ˜~PEݜj|[jtPIsPɜ^PŜ`P-ÜzAœNyϜuPjz>̜PJޜjN'<DPePʜ]PǜcP5ĜzpĜ4wМuPjz'͜.P>ߜjPnEP̜PPɜ[P0ŜzƜzkќgPjz rΜ)P+j{hutPfœWP͜8P z̜FP#Ɯz |ɜPQӜNPjzJМuPvjN:JDPKƜTPΜ|P^Ϝ)Ptǜz`̜u,Ԝ)Pjzsќ>PMWP!}Ȝ9PМRP6ҜhPMɜz5Ϝ:\֜XPjz9ӜNPme{s~rPA̜e P՜mP^՜2PpԜzKӜBjٜiPjzF֜t:P)v}M NJVAPNМcPVٜq&PjٜJP.xל[jzP>qb%P M{lEjz'OknJ6a`5PjzQu|cDP Dhv`ANXaAPOoiQ.PDh{]2P5^}xW'P ChfDjz 4M]hu~teS6"F^myxl]D"  B[k{ zm^N32Nct{rdSA)7Pfv vl`TA)P.MdxxjXA!DZlz{mZB-K`q~~o[B#jzPjzPjzPkzPrzPvzPsPpPiP4\PYHP2P P[PkPTDPPruPP>P7P=SP0c2^P0m@GFP0{X,XP0sQ, ?b!P0vdTI:0*"-06DQ]l~MP0ǜqP0ȜP0ɜP0ʜP0˜}P0͜^P0Μ,P&ќEP'Sr؜x>PKnߜ~UP=\w}b> P6Qes xnbP5 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7ֵER7&] \Bq$H۳{Ta #hE:}:GƱkd\~.ϰ}AF<2_ bIpBۮ M] -`L<w3IҬr_`&ƫt??>=c eQ s ;٩GW 3^ XBr'IڧxXc !iD<:EifZy2Ҥ}DL ;-^ bHpEܢ} R` )dG<z5I͠oa_*ʟx@ϼA=9c ePq?ڝHZ 2 `T@r-Kٛx[c$šoAʷ=<ChsfWt7՘sDڵQ8)_d ]ӴEo!Hۖ}Sb %h _E<z8Hǔo|c^~,̓|j@ҰE=5ai cݯKnAّ}N\ .b^ O=t1Jԏu~_`%Îtc@ͫ>=?fo fS s9֌wEݩT 6"_a YѨAr'Hي|뀁Wa !"h^C;~;GjwdZ{2ч}mAդI ;0^f aڣHpF܅ N^ + b]I;y4JЃq~ `_(ɂvg@ϟ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʚ=<CgufYx4rC٘O9*]f ^֗Fp F|Ra (e _E={8Hn|b_+yk@ѓB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@ˎ>=@gr fV s8uEیT7$_b \ҋBo$H}퀞Ta #j^F;|9Fmxe]{/~jCՇH :2`h dۆKpB O_ +b^M=w2Ir} aa&udA΂A>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6Jp{a^,zh@?=9bk dNo>}IY 1^a S>s.Jv\a"nbA<=Bft fUv6uDQ8']e ]Eq#G|Tb $g _E<}9Gmzc^~-{k@E>4_k cLpA L^ /`` M<v2Is}^`%sd>>?>do eR s9xFU 5!_c YAr(H|Wa   j`B;;EixdYz1}oBJ :.^h aGpDܿ~ P_ (c]J=y4Iνo| b`)ɼwfBA<:am fQr =ٺzJZ 3^` VAt+Jٸw\c"m`C=<DfsfYv6ԵrCP9*]d ^Ep Gܳ~Ra & g ]E=y8Iȱozb^~-Ͱ{h@C=7aj dMoBڮ}K] / a^ P?t0Kլu]b&īrd@==@fs fTs8թwES 7$_d [Bo%H٧|Va "k`D;|:FålydZz/Τ~kAG ;1`g bInCڢ N^ ,b]J;z4IϠr| `_'Ɵue?μ?=<bn eP r<םyGW 4^` W?r*JۛxYa"j`Bʷ;<EhweXx4՘qCصM 9+^g _״EpEۖ}R` 'd _E;z7I˔n}b]+˓yj@аA=8`m dOq?ڑ~J[ 0^` Q@u/Jُv\b#Îob@̫>=Aet fVu7֌vEܩS8&]Ic ^ӨDq#Hۊ{%Tb $h>^E<}9Gƈk9xc]~.χ}!i@ҤF<2aHi cܣKpBۅ  }L^ -bE^ M>u2I҃t1} ^a'Ƃu.d@̟@=?eDo eT q<xFޝW 5!_Ia ZϜBp(Hz)ꀍYc  k:^Dʚ<}:Ek=vfZy1كnDטL ;.`Jf bؗHpD} "R` )dD]G;z5Jo6| a^+x*܈g@ϓ?=:cGm eOq? Ȋ{HY 2 `I_ T?r-Kx-[a$o7ڍ`Aˎ;<ChBsfVt7 ՏsDڌO8)]Kd ]ՋCp!H|&S` %f @_E;{8Hm8|c\,z$ٔj@҇C=5`Hk c߆KpAÖL\ .`F` O=v1Js/]b%r1ڙd@̂>=?dCr fU t9ϛxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۠mAI <0^Jh `JpC "M^ ,dE]H;y5Js4~ `_(w+ۥg@?<;dEn eOq =˧yIX 2`I` W?s*Iy+[a!k8ڪ`C=;Cg?tgYx 4٬pEO8*]Je `FpF|#Ta ' e A]H={7Jn7{c_,y&۱iAB=8`Hk eNpAƳ|J\ 0 `G_ Q?u/Kw.\b%q2۶b@>=BgBr fVs9ҸuET7&_Ib \Do$I}'Ta #!j?^E:|:Hm<xd\{0~!ؽkAF<2`Hj bInBڿ  M] -bE_ L<u3Iнt1 _`&żv.f??>=cDp eQ q ;׺zGW 4^Jb XBr)Iظx+Xc !i:;<Ei?Yy2ѵ} K :-^KFpEܳ} $_ (d C;z6Iͱo7^*ʰx)?=9aHNq>ڮY 1^H>t.J٬v,a"«n5<=BfAUv6թQ8']JEq#Gۧ|%b #g ?<}8Gǥm9^~-Τ{"E=4_HLpAۢ  ^ .`F>v1IԠs0a%şs0@=>dCT s9؝V 6!_JCr'Jڛ|(c !"j;=};Gk=[y2јK ;0`IInFܖ !` + dC=w4JΔr4`(ȓx*A=;cFQp =ב Z 3`J?q+I؏y,a!m8;<ChAWv4Ҍ N9*]KDp Fڊ~%` & g B;y8IȈn9]~-͇y&A=7`ILpBڅ![ / `H=u0KՃt0`&Ăp2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|؃H ;1_IKpC !_ ,bE=w4Ir2a'u-܈A?<bER r<͊X 5^IAr)Ix*c !k9ڍ<bDj>vxz4ُo i+_JeniE "}f B]x^ Ip6{~+z'۔hc8bGkqo=ǖ|w^G_ uaJv-뀗"o4ۙ`^BeAruw8ԛtm']IcqfI{% h >^}]Hk9x~~ /}!ڠig4aJipmC "¢}z bG^ u`Kt2} 'u0ܥd`?eFo qs< Χxr!_JcpdHz(낶 i;`}]Ei=xzz1}جo h.^IhpjD~ !}cC_y^ Io4} )w*ڱhd:aFors= ȳ|w^IatdJw-킴"m7ڶ``BfBsvy5 ոsl)]KdpjF~&g @_{_Hm8|~ ,z$۽jf5`HkppAڿĿ{`F` vbJսs/%ļr1b`?dCq tv9׺vo"]IbqeH۸z' h<^~]Fj<w{{0е}k h1^IgpkD۳ !|dD]y_ Kϱs4| )ɰw+e`=dEn qr>ٮyt!`I`qcJڬy+"l8`^Ej=wv9x4ԩq 9!i+_JgoJiEڧ~"'"}f B_y7G^ Iɥp8}~2} +ʤ{'i=0d8bIkoFo?٢ |0 x`H_ u/JcJנv/(#Ÿo4b=;`AeCru=x7՝ u7o&]Kbq$IhIۛ{'#!!h?^}:C_Hƙk<x~4} 0ј|!k<*h2_HjpFoBۖ  - |`E_ w3IaIҔr1 -&Ɠt.f>7`=cDp s Bs;ّz4 s^Ibr)KdIڏx)&!i:`<@_Ei=wy8|2Ҍ}p ;$j-^JgpHkE܊} ")d B]z5F` I͈o5| /*ʇx)g=1b9cFmqCq>څ{2v`H_ r-IbJكx,'"‚o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"ۃ=+aHE Å.`F1Is0+s1ۈ=8dD=ϊ6_K'J|)!"j=;Bk>8!ڏ;'ہ`JI "+ ƃbE4Hq4/v+ڔ=4ۆbE Cɖ3 ҈^I+Kw+'k8ٙ<?g?<כ9!؍]J H|#( e A8En71y&۠>.ڒ`ID!Ƣ1̔`H0Iw0)q2ۥ=:gB =ҧ6ٙ_I$J}!"i9Bk5|:)ܞ_F +Ƞb2Hr,u>5ڣb A4ե^)Jx%k<?j9:"۪_H ( ìf 6Fp0z=1ۯbD1ϱ`.Kw)p==g>8!ڶ]#J|$"g 9Em4{<+ڻ_E -ɽ`2Is+s=8d?5](Jy #i;Ai7};&^I~ )!c5Ho0w<2a B3^+Iw&m<=f;9] I~&!g 8Eo2{=-aE/a0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%PjzPjzPjzPjzPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPP7QhyvfR9 :Qamw xm_H)-K_q~}nY@jzP ,Lbs}o\F$  Tfp{ ~sbH) +BVfq|~p]F!*I_p} |o^H)  jzP5`|\.[htS{RA]tw?]v~^4BbyzY(>ee@A]twFkhB P)X|^!PF BiY L{e:jzP3jg)!{ &SsNpd)Hnߜd)HxyJ &SsYUP>lݜ}DPF)^w5*lޜxNjzPNE,7 J~Jq2fٜ|:WٜJ"iݜk(JqHݜ}EP:o؜JPF%^~1-x؜}M jzPND[_eDvR4c&fԜ.=xԜR%s؜v+4c \לUPӜ5PF Rl pԜzP3~%!p Doݜ?~Мk 9ҜCj՜n Doݜ \ӜTPĜk PF,l.MМzPgUA~Evڜ:̜2 oМzEќJEvܜJϜ0Pœ.PF:~ޜG tΜzP#v [WT4oR;sלvɜLϜN o͜r ;sܜ!}̜ZPGPFAܜM"͜zP<ߜ!zd^ԜƜU͜s  ˜!^ܜNɜqPXPFNڜF/̜zPHޜ9~PҜŜV̜".ɜ.Pܜ tƜuP^PFPڜ/5˜zPMݜ[MA!iRPҜŜDa˜15ǜ1Pܜ"ŜkPWPFPڜ -ʜzPDܜ!xlWPҜĜ"ʜ=-Ŝ'Pܜ0ĜKPzgXH50%")0:I\lFPFP ~ocUH=2+")02-"".2ASbtzP2mZK>0)""%07HUh|t~/~P {k^PD;0'")5BSfxreWJ<00",2@M\p^wj]OA:0/""'08ATfv6sbQ?3."",070)""03 5`,PFP{bI)-X2nB/WwzP~W+NvQ[D*bRPxbH)vZ9-YNnK, Ai*l@*S~PxbH)#eA6`kPsG .o PFPlKbjD6fzP~^ S#!p\HPlS4 vO`W"FYDePN[^Pi~PFPvf4SzPvcR hJ8PAx.M&#f~<`P&jPx%uXPFPs_& bzP6_E^rbf{PKxfPL ~{"n%Pow <Pv PFPtjzP~rM X-/pTNPp P >rhnP*uWYP\PFP(,jzP4j[`5E%~P.P"u.!PvjP!PFPQPoPjzPv7cm Y{PkBP<Ak MkP$qPNMPFPpxjzPv<DX AzMNP]Pr )xPkHxxP~PFPPYjzP\+~{WQxP{nP*P]WP |qzP."PFP`jzPz9RxL {PM~PbjdPM#jzPr^PFPv*) jzP)DRXyPuDNPPL)P|qjzP,PFPPA__jzP\\{N]qPPl>Sf`P &jzPPFP)QjzPr" x>)<{PP'kx\"PJxjzPINPFP`tjzP G>X j]j<NPsP|PpAjzP pPFPjDDjzP;ndD+gkPjPW&0~PMPjzPvPFP}pfjzP_Sq(8+{PePSWRnPhjzPNPFPurxjzPxq3XYj]3NPbPqqPG<jzP!:PFPrxXjzPY %:<qePbPhc PfjzPUPFPjz0jzP @BiFxPbP@ oA5P|yjzPwjPFPjz<jzP/9""YXHu P)KP?Ynz xm\H1 bP2%OPP]jzPayPFPjzOjzPGќm.qM{x^ P"Suu\5bPMB1gP <jzPJPFPjz^ujzPVќ`SxPXyTbPxdS ~wP- jzP.PFPjzl`jzPdќX2@KP)pܜ~ObPds_kPDjzPPFPjzxSjzPoќ!bZlW Pt֜dbPQmXPWjzPPFPjzCjzP}ќX&]xP]ӜibP<xvKPemjzP"PFPjz5jzPќU*/KP!ϜP"k=Po\jzP,PFPjz#jzPќ(Pg`O P:P _+"PzPjzP0PFPjz"jzPќ0Ox8hzxPEPX'PBjzP9PFPjzjzPќ0Uy: KP?P%P0P5jzP<PFPjzjzPќ0 <sSH P)P0P4P%jzP<PFPjzjzPќ3EiIrztxPP:H<P"jzP;PFPjzjzPќ<UqG KPe{jXJ<0'".0PFPjzejjzPp, - Pk>bPqzQPBjzPSPFPjzX{jzPe"0pV`vPfbP"f`PxSjzPhpPFPjzEjzPU"UY`Fi}HPbP:Xs|Pl`jzP{`PFPjz0jzPBC ; " P=abPWJnP^ujzPMPFPjz.jzP%]{GYvP\bPou2<[POjzP0F DPjzQjzPvUPNTsrHPmbPbZ%BP<jzPHC%/<PjzmjzPyvH P{lbPHxPP0jzPn<7A6PjzhjzPq-~  7RvPWbPG'|oPXjzPs4OW.PjzK#jzPV\RF:b|f|HPFbPmd*{PxjzP*WfmPjz$TjzP/rg0T P=bP}B^.\PfjzPb.{PjzjzP)~$KvP<bPF[d3PDDjzPPjztjzPxeR<"mYvHPFbPy/'PtjzP<v-:|PjzP\jzPP jVA_ PRbP+dj?sPjzPzKh\edPjzjzP]~|-BvPibPuj6|DP__jzP-JDPjzKjzP~R0 yKpHPbP.5[,P) jzPwx +:PjzUjzPPgbDPju P bPj{mP_jzP9DpzPjzOjzP 2~n<:tPLbPTX.kC0PYjzPn!1jPjzt jzPj R"<iDPrbP(9PxjzP_\>w9Pjz5jjzP"wZ.^siP_EfPfLxDPpPjzP;EWPjzBjzPrg~^,I1tPDnP~bP),jzP.^d"2_Pjz<.mzP#cHWR )cDP~P{_SuUAPtjzPR3 &Pjz},zPim9jQj}\PwP~ Px\~P&pzHMJo"2jPjz'>NwPDj~zL>U&tP_!hsPQ-APC q'Pf4<z>~ v{FWPjz`e[kPHMWjRyfDP"UA\PU ]qPzM4\P [Nz'5bcu"1^PjzEHTPs; HzjvPzKN7Pf(F P2!qP@:2gz}G2k;m.:wPjz"P 8i$P I'X}j~M`tP5_"@PV S.PJ}C5r!PjD;fzx`:DjT3`3:e.Pjz<vN:^~P)tH@`{jR.DPwa7.SrlPgD$P{HPmoB :f?PnB@`|zDm_SC80.""%05O`qUPraO>2-"",0?M\lz\oΜkPjzJ˜~PEݜj|[jtPIsPɜ^PŜ`P-ÜzAœNyϜuPjz>̜PJޜjN'<DPePʜ]PǜcP5ĜzpĜ4wМuPjz'͜.P>ߜjPnEP̜PPɜ[P0ŜzƜzkќgPjz rΜ)P+j{hutPfœWP͜8P z̜FP#Ɯz |ɜPQӜNPjzJМuPvjN:JDPKƜTPΜ|P^Ϝ)Ptǜz`̜u,Ԝ)Pjzsќ>PMWP!}Ȝ9PМRP6ҜhPMɜz5Ϝ:\֜XPjz9ӜNPme{s~rPA̜e P՜mP^՜2PpԜzKӜBjٜiPjzF֜t:P)v}M NJVAPNМcPVٜq&PjٜJP.xל[jzP>qb%P M{lEjz'OknJ6a`5PjzQu|cDP Dhv`ANXaAPOoiQ.PDh{]2P5^}xW'P ChfDjz 4M]hu~teS6"F^myxl]D"  B[k{ zm^N32Nct{rdSA)7Pfv vl`TA)P.MdxxjXA!DZlz{mZB-K`q~~o[B#jzPjzPjzPkzPrzPvzPsPpPiP4\PYHP2P P[PkPTDPPruPP>P7P=SP0c2^P0m@GFP0{X,XP0sQ, ?b!P0vdTI:0*"-06DQ]l~MP0ǜqP0ȜP0ɜP0ʜP0˜}P0͜^P0Μ,P&ќEP'Sr؜x>PKnߜ~UP=\w}b> P6Qes xnbP5bundlewrap-3.8.0/bundlewrap/000077500000000000000000000000001360562404000160305ustar00rootroot00000000000000bundlewrap-3.8.0/bundlewrap/__init__.py000066400000000000000000000002121360562404000201340ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals VERSION = (3, 8, 0) VERSION_STRING = ".".join([str(v) for v in VERSION]) bundlewrap-3.8.0/bundlewrap/bundle.py000066400000000000000000000114311360562404000176530ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os.path import exists, join from .exceptions import BundleError, NoSuchBundle, RepositoryError from .metadata import DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE from .utils import cached_property, get_all_attrs_from_file from .utils.text import bold, mark_for_translation as _ from .utils.text import validate_name from .utils.ui import io FILENAME_BUNDLE = "items.py" FILENAME_METADATA = "metadata.py" def metadata_processor(func): """ Decorator that tags metadata processors. """ func.__is_a_metadata_processor = True return func class Bundle(object): """ A collection of config items, bound to a node. """ def __init__(self, node, name): self.name = name self.node = node self.repo = node.repo if not validate_name(name): raise RepositoryError(_("invalid bundle name: {}").format(name)) if name not in self.repo.bundle_names: raise NoSuchBundle(_("bundle not found: {}").format(name)) self.bundle_dir = join(self.repo.bundles_dir, self.name) self.bundle_data_dir = join(self.repo.data_dir, self.name) self.bundle_file = join(self.bundle_dir, FILENAME_BUNDLE) self.metadata_file = join(self.bundle_dir, FILENAME_METADATA) def __lt__(self, other): return self.name < other.name @cached_property @io.job_wrapper(_("{} {} parsing bundle").format(bold("{0.node.name}"), bold("{0.name}"))) def bundle_attrs(self): if not exists(self.bundle_file): return {} else: return get_all_attrs_from_file( self.bundle_file, base_env={ 'node': self.node, 'repo': self.repo, }, ) @cached_property @io.job_wrapper(_("{} {} creating items").format(bold("{0.node.name}"), bold("{0.name}"))) def items(self): for item_class in self.repo.item_classes: for item_name, item_attrs in self.bundle_attrs.get( item_class.BUNDLE_ATTRIBUTE_NAME, {}, ).items(): yield self.make_item( item_class.BUNDLE_ATTRIBUTE_NAME, item_name, item_attrs, ) def make_item(self, attribute_name, item_name, item_attrs): for item_class in self.repo.item_classes: if item_class.BUNDLE_ATTRIBUTE_NAME == attribute_name: return item_class(self, item_name, item_attrs) raise RuntimeError( _("bundle '{bundle}' tried to generate item '{item}' from " "unknown attribute '{attr}'").format( attr=attribute_name, bundle=self.name, item=item_name, ) ) @cached_property def metadata_processors(self): with io.job(_("{node} {bundle} collecting metadata processors").format( node=bold(self.node.name), bundle=bold(self.name), )): if not exists(self.metadata_file): return [] result = [] internal_names = set() for name, attr in get_all_attrs_from_file( self.metadata_file, base_env={ 'DEFAULTS': DEFAULTS, 'DONE': DONE, 'RUN_ME_AGAIN': RUN_ME_AGAIN, 'OVERWRITE': OVERWRITE, 'metadata_processor': metadata_processor, 'node': self.node, 'repo': self.repo, }, ).items(): if getattr(attr, '__is_a_metadata_processor', False): internal_name = getattr(attr, '__name__', name) if internal_name in internal_names: raise BundleError(_( "Metadata processor '{name}' in bundle {bundle} for node {node} has " "__name__ '{internal_name}', which was previously used by another " "metadata processor in the same metadata.py. BundleWrap uses __name__ " "internally to tell metadata processors apart, so this is a problem. " "Perhaps you used a decorator on your metadata processors that " "doesn't use functools.wraps? You should use that." ).format( bundle=self.name, node=self.node.name, internal_name=internal_name, name=name, )) internal_names.add(internal_name) result.append(attr) return result bundlewrap-3.8.0/bundlewrap/cmdline/000077500000000000000000000000001360562404000174435ustar00rootroot00000000000000bundlewrap-3.8.0/bundlewrap/cmdline/__init__.py000066400000000000000000000120721360562404000215560ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from cProfile import Profile from functools import wraps from os import environ from os.path import abspath, dirname from pipes import quote from sys import argv, exit, stderr, stdout from traceback import format_exc, print_exc from ..exceptions import NoSuchRepository, MissingRepoDependency from ..repo import Repository from ..utils.text import force_text, mark_for_translation as _, red from ..utils.ui import io from .parser import build_parser_bw def suppress_broken_pipe_msg(f): """ Oh boy. CPython does funny things with SIGPIPE. By default, it is caught and raised as a BrokenPipeError. When do we get a SIGPIPE? Most commonly when piping into head: bw nodes | head -n 1 head will exit after receiving the first line, causing the kernel to send SIGPIPE to our process. Since in most cases, we can't just quit early, we simply ignore BrokenPipeError in utils.ui.write_to_stream. Unfortunately, Python will still print a message: Exception ignored in: <_io.TextIOWrapper name='' mode='w' encoding='UTF-8'> BrokenPipeError: [Errno 32] Broken pipe See also http://bugs.python.org/issue11380. The crazy try/finally construct below is taken from there and I quote: This will: - capture any exceptions *you've* raised as the context for the errors raised in this handler - expose any exceptions generated during this thing itself - prevent the interpreter dying during shutdown in flush_std_files by closing the files (you can't easily wipe out the pending writes that have failed) CAVEAT: There is a seamingly easier method floating around on the net (http://stackoverflow.com/a/16865106) that restores the default behavior for SIGPIPE (i.e. not turning it into a BrokenPipeError): from signal import signal, SIGPIPE, SIG_DFL signal(SIGPIPE,SIG_DFL) This worked fine for a while but broke when using multiprocessing.Manager() to share the list of jobs in utils.ui between processes. When the main process terminated, it quit with return code 141 (indicating a broken pipe), and the background process used for the manager continued to hang around indefinitely. Bonus fun: This was observed only on Ubuntu Trusty (14.04). """ @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except SystemExit: raise except: print_exc() exit(1) finally: try: stdout.flush() finally: try: stdout.close() finally: try: stderr.flush() finally: stderr.close() return wrapper @suppress_broken_pipe_msg def main(*args, **kwargs): """ Entry point for the 'bw' command line utility. The args and path parameters are used for integration tests. """ if not args: args = argv[1:] text_args = [force_text(arg) for arg in args] parser_bw = build_parser_bw() pargs = parser_bw.parse_args(args) if not hasattr(pargs, 'func'): parser_bw.print_help() exit(2) if pargs.profile: profile = Profile() profile.enable() path = abspath(pargs.repo_path) io.debug_mode = pargs.debug io.activate() io.debug(_("invocation: {}").format(" ".join([force_text(arg) for arg in argv]))) environ.setdefault('BW_ADD_HOST_KEYS', "1" if pargs.add_ssh_host_keys else "0") if len(text_args) >= 1 and ( text_args[0] == "--version" or (len(text_args) >= 2 and text_args[0] == "repo" and text_args[1] == "create") or text_args[0] == "zen" or "-h" in text_args or "--help" in text_args ): # 'bw repo create' is a special case that only takes a path repo = path else: while True: try: repo = Repository(path) break except NoSuchRepository: if path == dirname(path): io.stderr(_( "{x} {path} " "is not a BundleWrap repository." ).format(path=quote(abspath(pargs.repo_path)), x=red("!!!"))) io.deactivate() exit(1) else: path = dirname(path) except MissingRepoDependency as exc: io.stderr(str(exc)) io.deactivate() exit(1) except Exception: io.stderr(format_exc()) io.deactivate() exit(1) # convert all string args into text text_pargs = {key: force_text(value) for key, value in vars(pargs).items()} try: pargs.func(repo, text_pargs) finally: io.deactivate() if pargs.profile: profile.disable() profile.dump_stats(pargs.profile) bundlewrap-3.8.0/bundlewrap/cmdline/apply.py000066400000000000000000000110341360562404000211410ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime from sys import exit from ..concurrency import WorkerPool from ..exceptions import GracefulApplyException, ItemDependencyLoop from ..utils import SkipList from ..utils.cmdline import count_items, get_target_nodes from ..utils.plot import explain_item_dependency_loop from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import ( blue, bold, error_summary, format_duration, green, green_unless_zero, mark_for_translation as _, red, red_unless_zero, yellow, yellow_unless_zero, ) from ..utils.ui import io def bw_apply(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) pending_nodes = target_nodes[:] io.progress_set_total(count_items(pending_nodes)) repo.hooks.apply_start( repo, args['target'], target_nodes, interactive=args['interactive'], ) start_time = datetime.now() results = [] skip_list = SkipList(args['resume_file']) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': node.apply, 'task_id': node.name, 'kwargs': { 'autoskip_selector': args['autoskip'], 'autoonly_selector': args['autoonly'], 'force': args['force'], 'interactive': args['interactive'], 'skip_list': skip_list, 'workers': args['item_workers'], }, } def handle_result(task_id, return_value, duration): if return_value is None: # node skipped return skip_list.add(task_id) results.append(return_value) def handle_exception(task_id, exception, traceback): msg = _("{x} {node} {msg}").format( node=bold(task_id), msg=exception, x=red("!"), ) if isinstance(exception, ItemDependencyLoop): for line in explain_item_dependency_loop(exception, task_id): io.stderr(line) errors.append(line) elif isinstance(exception, GracefulApplyException): errors.append(msg) else: io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_result=handle_result, handle_exception=handle_exception, cleanup=skip_list.dump, pool_id="apply", workers=args['node_workers'], ) worker_pool.run() total_duration = datetime.now() - start_time if args['summary'] and results: stats_summary(results, total_duration) error_summary(errors) repo.hooks.apply_end( repo, args['target'], target_nodes, duration=total_duration, ) exit(1 if errors else 0) def stats_summary(results, total_duration): totals = { 'items': 0, 'correct': 0, 'fixed': 0, 'skipped': 0, 'failed': 0, } rows = [[ bold(_("node")), _("items"), _("OK"), green(_("fixed")), yellow(_("skipped")), red(_("failed")), _("time"), ], ROW_SEPARATOR] for result in results: totals['items'] += result.total for metric in ('correct', 'fixed', 'skipped', 'failed'): totals[metric] += getattr(result, metric) rows.append([ result.node_name, str(result.total), str(result.correct), green_unless_zero(result.fixed), yellow_unless_zero(result.skipped), red_unless_zero(result.failed), format_duration(result.duration), ]) if len(results) > 1: rows.append(ROW_SEPARATOR) rows.append([ bold(_("total ({} nodes)").format(len(results))), str(totals['items']), str(totals['correct']), green_unless_zero(totals['fixed']), yellow_unless_zero(totals['skipped']), red_unless_zero(totals['failed']), format_duration(total_duration), ]) alignments = { 1: 'right', 2: 'right', 3: 'right', 4: 'right', 5: 'right', 6: 'right', } for line in render_table(rows, alignments=alignments): io.stdout("{x} {line}".format(x=blue("i"), line=line)) bundlewrap-3.8.0/bundlewrap/cmdline/debug.py000066400000000000000000000015711360562404000211070ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from code import interact from .. import VERSION_STRING from ..utils.cmdline import get_node from ..utils.text import mark_for_translation as _ from ..utils.ui import io DEBUG_BANNER = _("BundleWrap {version} interactive repository inspector\n" "> You can access the current repository as 'repo'." "").format(version=VERSION_STRING) DEBUG_BANNER_NODE = DEBUG_BANNER + "\n" + \ _("> You can access the selected node as 'node'.") def bw_debug(repo, args): if args['node'] is None: env = {'repo': repo} banner = DEBUG_BANNER else: env = {'node': get_node(repo, args['node']), 'repo': repo} banner = DEBUG_BANNER_NODE io.deactivate() if args['command']: exec(args['command'], env) else: interact(banner, local=env) bundlewrap-3.8.0/bundlewrap/cmdline/groups.py000066400000000000000000000020061360562404000213320ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from ..group import GROUP_ATTR_DEFAULTS from ..utils.text import bold, mark_for_translation as _ from ..utils.ui import io from .nodes import _attribute_table GROUP_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['nodes']) GROUP_ATTRS_LISTS = ('nodes',) def bw_groups(repo, args): if not args['groups']: for group in repo.groups: io.stdout(group.name) else: groups = [repo.get_group(group.strip()) for group in args['groups'].split(",")] if not args['attrs']: subgroups = set(groups) for group in groups: subgroups = subgroups.union(group.subgroups) for subgroup in sorted(subgroups): io.stdout(subgroup.name) else: _attribute_table( groups, bold(_("group")), args['attrs'], GROUP_ATTRS, GROUP_ATTRS_LISTS, args['inline'], ) bundlewrap-3.8.0/bundlewrap/cmdline/hash.py000066400000000000000000000057071360562404000207510ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from sys import exit from ..exceptions import NoSuchGroup, NoSuchNode from ..utils.cmdline import get_item from ..utils.text import mark_for_translation as _, red from ..utils.ui import io def bw_hash(repo, args): if args['group_membership'] and args['metadata']: io.stdout(_( "{x} Cannot hash group membership and metadata at the same time").format(x=red("!!!") )) exit(1) if args['group_membership'] and args['item']: io.stdout(_("{x} Cannot hash group membership for an item").format(x=red("!!!"))) exit(1) if args['item'] and args['metadata']: io.stdout(_("{x} Items don't have metadata").format(x=red("!!!"))) exit(1) if args['node_or_group']: try: target = repo.get_node(args['node_or_group']) target_type = 'node' except NoSuchNode: try: target = repo.get_group(args['node_or_group']) target_type = 'group' except NoSuchGroup: if args['adhoc_nodes']: target = repo.create_node(args['node_or_group']) target_type = 'node' else: io.stderr(_("{x} No such node or group: {node_or_group}").format( node_or_group=args['node_or_group'], x=red("!!!"), )) exit(1) else: if args['item']: target = get_item(target, args['item']) target_type = 'item' else: target = repo target_type = 'repo' if target_type == 'node' and args['dict'] and args['metadata']: io.stdout(_("{x} Cannot show a metadata dict for a single node").format(x=red("!!!"))) exit(1) if target_type == 'group' and args['item']: io.stdout(_("{x} Cannot select item for group").format(x=red("!!!"))) exit(1) if args['dict']: if args['group_membership']: if target_type in ('node', 'repo'): for group in target.groups: io.stdout(group.name) else: for node in target.nodes: io.stdout(node.name) elif args['metadata']: for node in target.nodes: io.stdout("{}\t{}".format(node.name, node.metadata_hash())) else: cdict = target.cached_cdict if args['item'] else target.cdict if cdict is None: io.stdout("REMOVE") else: for key, value in sorted(cdict.items()): io.stdout("{}\t{}".format(key, value) if args['item'] else "{} {}".format(value, key)) else: if args['group_membership']: io.stdout(target.group_membership_hash()) elif args['metadata']: io.stdout(target.metadata_hash()) else: io.stdout(target.hash()) bundlewrap-3.8.0/bundlewrap/cmdline/items.py000066400000000000000000000105421360562404000211400ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os import makedirs from os.path import dirname, exists, join from sys import exit from ..exceptions import FaultUnavailable from ..utils.cmdline import get_item, get_node from ..utils.dicts import statedict_to_json from ..utils.text import bold, green, mark_for_translation as _, red, yellow from ..utils.ui import io def write_preview(file_item, base_path): """ Writes the content of the given file item to the given path. """ # this might raise an exception, try it before creating anything content = file_item.content file_path = join(base_path, file_item.name.lstrip("/")) dir_path = dirname(file_path) if not exists(dir_path): makedirs(dir_path) with open(file_path, 'wb') as f: f.write(content) def bw_items(repo, args): node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) if args['preview'] and not args['item']: io.stderr(_("{x} no ITEM given for preview").format(x=red("!!!"))) exit(1) elif args['file_preview_path']: if args['item']: io.stderr(_("{x} use --file-preview to preview single files").format(x=red("!!!"))) exit(1) if exists(args['file_preview_path']): io.stderr(_( "not writing to existing path: {path}" ).format(path=args['file_preview_path'])) exit(1) for item in sorted(node.items): if not item.id.startswith("file:"): continue if item.attributes['content_type'] == 'any': io.stderr(_( "{x} skipped {filename} (content_type 'any')" ).format(x=yellow("»"), filename=bold(item.name))) continue if item.attributes['content_type'] == 'binary': io.stderr(_( "{x} skipped {filename} (content_type 'binary')" ).format(x=yellow("»"), filename=bold(item.name))) continue if item.attributes['delete']: io.stderr(_( "{x} skipped {filename} ('delete' attribute set)" ).format(x=yellow("»"), filename=bold(item.name))) continue try: write_preview(item, args['file_preview_path']) except FaultUnavailable: io.stderr(_( "{x} skipped {path} (Fault unavailable)" ).format(x=yellow("»"), path=bold(item.name))) else: io.stdout(_( "{x} wrote {path}" ).format( x=green("✓"), path=bold(join( args['file_preview_path'], item.name.lstrip("/"), )), )) elif args['item']: item = get_item(node, args['item']) if args['preview']: try: io.stdout( item.preview(), append_newline=False, ) except NotImplementedError: io.stderr(_( "{x} cannot preview {item} on {node} (doesn't support previews)" ).format(x=red("!!!"), item=item.id, node=node.name)) exit(1) except ValueError: io.stderr(_( "{x} cannot preview {item} on {node} (not available for this item config)" ).format(x=red("!!!"), item=item.id, node=node.name)) exit(1) except FaultUnavailable: io.stderr(_( "{x} cannot preview {item} on {node} (Fault unavailable)" ).format(x=red("!!!"), item=item.id, node=node.name)) exit(1) else: if args['show_sdict']: statedict = item.sdict() else: statedict = item.cdict() if statedict is None: io.stdout("REMOVE") else: if args['attr']: io.stdout(repr(statedict[args['attr']])) else: io.stdout(statedict_to_json(statedict, pretty=True)) else: for item in sorted(node.items): if args['show_repr']: io.stdout(repr(item)) else: io.stdout(item.id) bundlewrap-3.8.0/bundlewrap/cmdline/lock.py000066400000000000000000000160631360562404000207530ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os import environ from ..concurrency import WorkerPool from ..lock import softlock_add, softlock_list, softlock_remove from ..utils.cmdline import get_target_nodes from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import ( bold, error_summary, format_timestamp, green, mark_for_translation as _, randstr, red, yellow, ) from ..utils.ui import io, page_lines def remove_dummy_nodes(targets): _targets = [] for node in targets: if node.dummy: io.stdout(_("{x} {node} is a dummy node").format(node=bold(node.name), x=yellow("»"))) else: _targets.append(node) return _targets def remove_lock_if_present(node, lock_id): for lock in softlock_list(node): if lock['id'] == lock_id: softlock_remove(node, lock_id) return True return False def bw_lock_add(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) target_nodes = remove_dummy_nodes(target_nodes) pending_nodes = target_nodes[:] max_node_name_length = max([len(node.name) for node in target_nodes]) lock_id = randstr(length=4).upper() io.progress_set_total(len(pending_nodes)) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': softlock_add, 'task_id': node.name, 'args': (node, lock_id), 'kwargs': { 'comment': args['comment'], 'expiry': args['expiry'], 'item_selectors': args['items'].split(","), }, } def handle_result(task_id, return_value, duration): io.progress_advance() io.stdout(_("{x} {node} locked with ID {id} (expires in {exp})").format( x=green("✓"), node=bold(task_id.ljust(max_node_name_length)), id=return_value, exp=args['expiry'], )) def handle_exception(task_id, exception, traceback): msg = "{}: {}".format(task_id, exception) io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_exception=handle_exception, handle_result=handle_result, pool_id="lock", workers=args['node_workers'], ) worker_pool.run() error_summary(errors) def bw_lock_remove(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) target_nodes = remove_dummy_nodes(target_nodes) pending_nodes = target_nodes[:] max_node_name_length = max([len(node.name) for node in target_nodes]) io.progress_set_total(len(pending_nodes)) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': remove_lock_if_present, 'task_id': node.name, 'args': (node, args['lock_id'].upper()), } def handle_result(task_id, return_value, duration): io.progress_advance() if return_value is True: io.stdout(_("{x} {node} lock {id} removed").format( x=green("✓"), node=bold(task_id.ljust(max_node_name_length)), id=args['lock_id'].upper(), )) else: io.stderr(_( "{x} {node} has no lock with ID {id}" ).format( x=red("!"), node=bold(task_id.ljust(max_node_name_length)), id=args['lock_id'].upper(), )) def handle_exception(task_id, exception, traceback): msg = "{}: {}".format(task_id, exception) io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_exception=handle_exception, handle_result=handle_result, pool_id="lock_remove", workers=args['node_workers'], ) worker_pool.run() error_summary(errors) def bw_lock_show(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) target_nodes = remove_dummy_nodes(target_nodes) pending_nodes = target_nodes[:] locks_on_node = {} def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': softlock_list, 'task_id': node.name, 'args': (node,), } def handle_result(task_id, return_value, duration): locks_on_node[task_id] = return_value repo.hooks.lock_show(repo, repo.get_node(task_id), return_value) def handle_exception(task_id, exception, traceback): msg = "{}: {}".format(task_id, exception) io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_exception=handle_exception, handle_result=handle_result, pool_id="lock_show", workers=args['node_workers'], ) worker_pool.run() if errors: error_summary(errors) return rows = [[ bold(_("node")), bold(_("ID")), bold(_("created")), bold(_("expires")), bold(_("user")), bold(_("items")), bold(_("comment")), ], ROW_SEPARATOR] for node_name, locks in sorted(locks_on_node.items()): if locks: first_lock = True for lock in locks: lock['formatted_date'] = format_timestamp(lock['date']) lock['formatted_expiry'] = format_timestamp(lock['expiry']) first_item = True for item in lock['items']: rows.append([ node_name if first_item and first_lock else "", lock['id'] if first_item else "", lock['formatted_date'] if first_item else "", lock['formatted_expiry'] if first_item else "", lock['user'] if first_item else "", item, lock['comment'] if first_item else "", ]) # always repeat for grep style first_item = environ.get("BW_TABLE_STYLE") == 'grep' # always repeat for grep style first_lock = environ.get("BW_TABLE_STYLE") == 'grep' else: rows.append([ node_name, _("(none)"), "", "", "", "", "", ]) rows.append(ROW_SEPARATOR) page_lines(render_table( rows[:-1], # remove trailing ROW_SEPARATOR alignments={1: 'center'}, )) bundlewrap-3.8.0/bundlewrap/cmdline/metadata.py000066400000000000000000000051201360562404000215730ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from decimal import Decimal from json import dumps from ..metadata import MetadataJSONEncoder from ..utils import Fault from ..utils.cmdline import get_node, get_target_nodes from ..utils.dicts import value_at_key_path from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import bold, force_text, mark_for_translation as _, red from ..utils.ui import io, page_lines def bw_metadata(repo, args): if args['table']: if not args['keys']: io.stdout(_("{x} at least one key is required with --table").format(x=red("!!!"))) exit(1) target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) key_paths = [path.strip().split(" ") for path in " ".join(args['keys']).split(",")] table = [[bold(_("node"))] + [bold(" ".join(path)) for path in key_paths], ROW_SEPARATOR] for node in target_nodes: values = [] for key_path in key_paths: metadata = node.metadata try: value = value_at_key_path(metadata, key_path) except KeyError: value = red(_("")) if isinstance(value, (dict, list, tuple)): value = ", ".join([str(item) for item in value]) elif isinstance(value, set): value = ", ".join(sorted(value)) elif isinstance(value, (bool, float, int, Decimal, Fault)) or value is None: value = str(value) values.append(value) table.append([bold(node.name)] + values) page_lines(render_table(table)) else: node = get_node(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) if args['blame']: key_paths = [path.strip() for path in " ".join(args['keys']).split(",")] table = [[bold(_("path")), bold(_("source"))], ROW_SEPARATOR] for path, blamed in sorted(node.metadata_blame.items()): joined_path = " ".join(path) for key_path in key_paths: if joined_path.startswith(key_path): table.append([joined_path, ", ".join(blamed)]) break page_lines(render_table(table)) else: for line in dumps( value_at_key_path(node.metadata, args['keys']), cls=MetadataJSONEncoder, indent=4, sort_keys=True, ).splitlines(): io.stdout(force_text(line)) bundlewrap-3.8.0/bundlewrap/cmdline/nodes.py000066400000000000000000000054121360562404000211270ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os import environ from sys import exit from ..utils import names from ..utils.cmdline import get_target_nodes from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import bold, mark_for_translation as _, red from ..utils.ui import io, page_lines from ..group import GROUP_ATTR_DEFAULTS NODE_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['bundles', 'groups', 'hostname']) NODE_ATTRS_LISTS = ('bundles', 'groups') def _attribute_table( entities, entity_label, selected_attrs, available_attrs, available_attrs_lists, inline, ): rows = [[entity_label], ROW_SEPARATOR] selected_attrs = [attr.strip() for attr in selected_attrs.split(",")] if selected_attrs == ['all']: selected_attrs = available_attrs for attr in selected_attrs: if attr not in available_attrs: io.stderr(_("{x} unknown attribute: {attr}").format(x=red("!!!"), attr=attr)) exit(1) rows[0].append(bold(attr)) has_list_attrs = False for entity in entities: attr_values = [[entity.name]] for attr in selected_attrs: if attr in available_attrs_lists: if inline: attr_values.append([",".join(names(getattr(entity, attr)))]) else: has_list_attrs = True attr_values.append(sorted(names(getattr(entity, attr)))) else: attr_values.append([str(getattr(entity, attr))]) number_of_lines = max([len(value) for value in attr_values]) if environ.get("BW_TABLE_STYLE") == 'grep': # repeat entity name for each line attr_values[0] = attr_values[0] * number_of_lines for line in range(number_of_lines): row = [] for attr_index in range(len(selected_attrs) + 1): try: row.append(attr_values[attr_index][line]) except IndexError: row.append("") rows.append(row) if has_list_attrs: rows.append(ROW_SEPARATOR) if environ.get("BW_TABLE_STYLE") == 'grep': rows = rows[2:] page_lines(render_table( rows[:-1] if has_list_attrs else rows, # remove trailing ROW_SEPARATOR )) def bw_nodes(repo, args): if args['target'] is not None: nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) else: nodes = repo.nodes if not args['attrs']: for node in nodes: io.stdout(node.name) else: _attribute_table( nodes, bold(_("node")), args['attrs'], NODE_ATTRS, NODE_ATTRS_LISTS, args['inline'], ) bundlewrap-3.8.0/bundlewrap/cmdline/parser.py000066400000000000000000000745401360562404000213230ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from argparse import ArgumentParser, SUPPRESS from os import environ, getcwd from .. import VERSION_STRING from ..utils.text import mark_for_translation as _ from .apply import bw_apply from .debug import bw_debug from .groups import bw_groups from .hash import bw_hash from .items import bw_items from .lock import bw_lock_add, bw_lock_remove, bw_lock_show from .metadata import bw_metadata from .nodes import bw_nodes from .plot import bw_plot_group, bw_plot_node, bw_plot_node_groups from .repo import bw_repo_bundle_create, bw_repo_create, bw_repo_plugin_install, \ bw_repo_plugin_list, bw_repo_plugin_search, bw_repo_plugin_remove, bw_repo_plugin_update from .run import bw_run from .stats import bw_stats from .test import bw_test from .verify import bw_verify from .zen import bw_zen def build_parser_bw(): parser = ArgumentParser( prog="bw", description=_("BundleWrap - Config Management with Python"), ) parser.add_argument( "-a", "--add-host-keys", action='store_true', default=False, dest='add_ssh_host_keys', help=_("set StrictHostKeyChecking=no instead of yes for SSH"), ) parser.add_argument( "-A", "--adhoc-nodes", action='store_true', default=False, dest='adhoc_nodes', help=_( "treat unknown node names as adhoc 'virtual' nodes that receive configuration only " "through groups whose member_patterns match the node name given on the command line " "(which also has to be a resolvable hostname)"), ) parser.add_argument( "-d", "--debug", action='store_true', default=False, dest='debug', help=_("print debugging info"), ) parser.add_argument( "-r", "--repo-path", default=environ.get('BW_REPO_PATH', getcwd()), dest='repo_path', help=_("Look for repository at this path (defaults to current working directory)"), metavar=_("DIRECTORY"), type=str, ) # hidden option to dump profiling info, can be inpected with # SnakeViz or whatever parser.add_argument( "--profile", default=None, dest='profile', help=SUPPRESS, metavar=_("FILE"), type=str, ) parser.add_argument( "--version", action='version', version=VERSION_STRING, ) subparsers = parser.add_subparsers( title=_("subcommands"), help=_("use 'bw --help' for more info"), ) # bw apply help_apply = _("Applies the configuration defined in your repository to your nodes") parser_apply = subparsers.add_parser("apply", description=help_apply, help=help_apply) parser_apply.set_defaults(func=bw_apply) parser_apply.add_argument( 'target', metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), type=str, help=_("target nodes, groups and/or bundle selectors"), ) parser_apply.add_argument( "-f", "--force", action='store_true', default=False, dest='force', help=_("ignore existing hard node locks"), ) parser_apply.add_argument( "-i", "--interactive", action='store_true', default=False, dest='interactive', help=_("ask before applying each item"), ) parser_apply.add_argument( "-o", "--only", default="", dest='autoonly', help=_( "e.g. 'file:/foo,tag:foo,bundle:bar' " "to skip EVERYTHING BUT all instances of file:/foo " "and items with tag 'foo', " "or in bundle 'bar', " "or a dependency of any of these" ), metavar=_("SELECTOR"), type=str, ) bw_apply_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_apply.add_argument( "-p", "--parallel-nodes", default=bw_apply_p_default, dest='node_workers', help=_("number of nodes to apply to simultaneously " "(defaults to {})").format(bw_apply_p_default), type=int, ) bw_apply_p_items_default = int(environ.get("BW_ITEM_WORKERS", "4")) parser_apply.add_argument( "-P", "--parallel-items", default=bw_apply_p_items_default, dest='item_workers', help=_("number of items to apply simultaneously on each node " "(defaults to {})").format(bw_apply_p_items_default), type=int, ) parser_apply.add_argument( "-s", "--skip", default="", dest='autoskip', help=_( "e.g. 'file:/foo,tag:foo,bundle:bar' " "to skip all instances of file:/foo " "and items with tag 'foo', " "or in bundle 'bar'" ), metavar=_("SELECTOR"), type=str, ) parser_apply.add_argument( "-S", "--no-summary", action='store_false', dest='summary', help=_("don't show stats summary"), ) parser_apply.add_argument( "-r", "--resume-file", default=None, dest='resume_file', help=_( "path to a file that a list of completed nodes will be added to; " "if the file already exists, any nodes therein will be skipped" ), metavar=_("PATH"), type=str, ) # bw debug help_debug = _("Start an interactive Python shell for this repository") parser_debug = subparsers.add_parser("debug", description=help_debug, help=help_debug) parser_debug.set_defaults(func=bw_debug) parser_debug.add_argument( "-c", "--command", default=None, dest='command', metavar=_("COMMAND"), required=False, type=str, help=_("command to execute in lieu of REPL"), ) parser_debug.add_argument( "-n", "--node", default=None, dest='node', metavar=_("NODE"), required=False, type=str, help=_("name of node to inspect"), ) # bw groups help_groups = _("Lists groups in this repository") parser_groups = subparsers.add_parser("groups", description=help_groups, help=help_groups) parser_groups.set_defaults(func=bw_groups) parser_groups.add_argument( "-i", "--inline", action='store_true', dest='inline', help=_("keep lists on a single line (for grep)"), ) parser_groups.add_argument( 'groups', default=None, metavar=_("GROUP1,GROUP2..."), nargs='?', type=str, help=_("show the given groups and their subgroups"), ) parser_groups.add_argument( 'attrs', default=None, metavar=_("ATTR1,ATTR2..."), nargs='?', type=str, help=_("show table with the given attributes for each group " "(e.g. 'all', 'members', 'os', ...)"), ) # bw hash help_hash = _("Shows a SHA1 hash that summarizes the entire configuration for this repo, node, group, or item.") parser_hash = subparsers.add_parser("hash", description=help_hash, help=help_hash) parser_hash.set_defaults(func=bw_hash) parser_hash.add_argument( "-d", "--dict", action='store_true', default=False, dest='dict', help=_("instead show the data this hash is derived from"), ) parser_hash.add_argument( "-g", "--group", action='store_true', default=False, dest='group_membership', help=_("hash group membership instead of configuration"), ) parser_hash.add_argument( "-m", "--metadata", action='store_true', default=False, dest='metadata', help=_("hash metadata instead of configuration (not available for items)"), ) parser_hash.add_argument( 'node_or_group', metavar=_("NODE|GROUP"), type=str, nargs='?', help=_("show config hash for this node or group"), ) parser_hash.add_argument( 'item', metavar=_("ITEM"), type=str, nargs='?', help=_("show config hash for this item on the given node"), ) # bw items help_items = _("List and preview items for a specific node") parser_items = subparsers.add_parser("items", description=help_items, help=help_items) parser_items.set_defaults(func=bw_items) parser_items.add_argument( 'node', metavar=_("NODE"), type=str, help=_("list items for this node"), ) parser_items.add_argument( 'item', metavar=_("ITEM"), nargs='?', type=str, help=_("show configuration for this item"), ) parser_items.add_argument( 'attr', metavar=_("ATTRIBUTE"), nargs='?', type=str, help=_("show only this item attribute"), ) parser_items.add_argument( "-f", "--preview", "--file-preview", # TODO 4.0 remove action='store_true', dest='preview', help=_("print preview of given ITEM"), ) parser_items.add_argument( "-w", "--write-file-previews", default=None, dest='file_preview_path', metavar=_("DIRECTORY"), required=False, type=str, help=_("create DIRECTORY and fill it with rendered file previews"), ) parser_items.add_argument( "--repr", action='store_true', dest='show_repr', help=_("show more verbose representation of each item"), ) parser_items.add_argument( "--state", action='store_true', dest='show_sdict', help=_("show actual item status on node instead of should-be configuration"), ) # bw lock help_lock = _("Manage locks on nodes used to prevent collisions between BundleWrap users") parser_lock = subparsers.add_parser("lock", description=help_lock, help=help_lock) parser_lock_subparsers = parser_lock.add_subparsers() # bw lock add help_lock_add = _("Add a new lock to one or more nodes") parser_lock_add = parser_lock_subparsers.add_parser( "add", description=help_lock_add, help=help_lock_add, ) parser_lock_add.set_defaults(func=bw_lock_add) parser_lock_add.add_argument( 'target', metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), type=str, help=_("target nodes, groups and/or bundle selectors"), ) parser_lock_add.add_argument( "-c", "--comment", default="", dest='comment', help=_("brief description of the purpose of the lock"), type=str, ) bw_lock_add_e_default = environ.get("BW_SOFTLOCK_EXPIRY", "8h") parser_lock_add.add_argument( "-e", "--expires-in", default=bw_lock_add_e_default, dest='expiry', help=_("how long before the lock is ignored and removed automatically " "(defaults to \"{}\")").format(bw_lock_add_e_default), type=str, ) parser_lock_add.add_argument( "-i", "--items", default="*", dest='items', help=_("comma-separated list of item selectors the lock applies to " "(defaults to \"*\" meaning all)"), type=str, ) bw_lock_add_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_lock_add.add_argument( "-p", "--parallel-nodes", default=bw_lock_add_p_default, dest='node_workers', help=_("number of nodes to lock simultaneously " "(defaults to {})").format(bw_lock_add_p_default), type=int, ) # bw lock remove help_lock_remove = _("Remove a lock from a node") parser_lock_remove = parser_lock_subparsers.add_parser( "remove", description=help_lock_remove, help=help_lock_remove, ) parser_lock_remove.set_defaults(func=bw_lock_remove) parser_lock_remove.add_argument( 'target', metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), type=str, help=_("target nodes, groups and/or bundle selectors"), ) parser_lock_remove.add_argument( 'lock_id', metavar=_("LOCK_ID"), type=str, help=_("ID of the lock to remove (obtained with `bw lock show`)"), ) bw_lock_remove_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_lock_remove.add_argument( "-p", "--parallel-nodes", default=bw_lock_remove_p_default, dest='node_workers', help=_("number of nodes to remove lock from simultaneously " "(defaults to {})").format(bw_lock_remove_p_default), type=int, ) # bw lock show help_lock_show = _("Show details of locks present on a node") parser_lock_show = parser_lock_subparsers.add_parser( "show", description=help_lock_show, help=help_lock_show, ) parser_lock_show.set_defaults(func=bw_lock_show) parser_lock_show.add_argument( 'target', metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), type=str, help=_("target node"), ) bw_lock_show_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_lock_show.add_argument( "-p", "--parallel-nodes", default=bw_lock_show_p_default, dest='node_workers', help=_("number of nodes to retrieve locks from simultaneously " "(defaults to {})").format(bw_lock_show_p_default), type=int, ) # bw metadata help_metadata = ("View a JSON representation of a node's metadata") parser_metadata = subparsers.add_parser( "metadata", description=help_metadata, help=help_metadata, ) parser_metadata.set_defaults(func=bw_metadata) parser_metadata.add_argument( 'target', metavar=_("NODE"), type=str, help=_("node to print JSON-formatted metadata for"), ) parser_metadata.add_argument( 'keys', default=[], metavar=_("KEY"), nargs='*', type=str, help=_("print only partial metadata from the given space-separated key path (e.g. `bw metadata mynode users jdoe` to show `mynode.metadata['users']['jdoe']`)"), ) parser_metadata.add_argument( "--blame", action='store_true', dest='blame', help=_("show where each piece of metadata comes from"), ) parser_metadata.add_argument( "-t", "--table", action='store_true', dest='table', help=_( "show a table of selected metadata values from multiple nodes instead; " "allows for multiple comma-separated paths in KEY; " "allows for node selectors in NODE (e.g. 'NODE1,NODE2,GROUP1,bundle:BUNDLE1...')" ), ) # bw nodes help_nodes = _("List nodes in this repository") parser_nodes = subparsers.add_parser("nodes", description=help_nodes, help=help_nodes) parser_nodes.set_defaults(func=bw_nodes) parser_nodes.add_argument( "-i", "--inline", action='store_true', dest='inline', help=_("keep lists on a single line (for grep)"), ) parser_nodes.add_argument( 'target', default=None, metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), nargs='?', type=str, help=_("filter according to nodes, groups and/or bundle selectors"), ) parser_nodes.add_argument( 'attrs', default=None, metavar=_("ATTR1,ATTR2..."), nargs='?', type=str, help=_("show table with the given attributes for each node " "(e.g. 'all', 'groups', 'bundles', 'hostname', 'os', ...)"), ) # bw plot help_plot = _("Generates DOT output that can be piped into `dot -Tsvg -ooutput.svg`. " "The resulting output.svg can be viewed using most browsers.") parser_plot = subparsers.add_parser("plot", description=help_plot, help=help_plot) parser_plot_subparsers = parser_plot.add_subparsers() # bw plot group help_plot_group = _("Plot subgroups and node members for the given group " "or the entire repository") parser_plot_subparsers_group = parser_plot_subparsers.add_parser( "group", description=help_plot_group, help=help_plot_group, ) parser_plot_subparsers_group.set_defaults(func=bw_plot_group) parser_plot_subparsers_group.add_argument( 'group', default=None, metavar=_("GROUP"), nargs='?', type=str, help=_("group to plot"), ) parser_plot_subparsers_group.add_argument( "-N", "--no-nodes", action='store_false', dest='show_nodes', help=_("do not include nodes in output"), ) # bw plot node help_plot_node = _("Plot items and their dependencies for the given node") parser_plot_subparsers_node = parser_plot_subparsers.add_parser( "node", description=help_plot_node, help=help_plot_node, ) parser_plot_subparsers_node.set_defaults(func=bw_plot_node) parser_plot_subparsers_node.add_argument( 'node', metavar=_("NODE"), type=str, help=_("node to plot"), ) parser_plot_subparsers_node.add_argument( "--no-cluster", action='store_false', dest='cluster', help=_("do not cluster items by bundle"), ) parser_plot_subparsers_node.add_argument( "--no-depends-auto", action='store_false', dest='depends_auto', help=_("do not show auto-generated dependencies and items"), ) parser_plot_subparsers_node.add_argument( "--no-depends-conc", action='store_false', dest='depends_concurrency', help=_("do not show concurrency blocker dependencies"), ) parser_plot_subparsers_node.add_argument( "--no-depends-regular", action='store_false', dest='depends_regular', help=_("do not show regular user-defined dependencies"), ) parser_plot_subparsers_node.add_argument( "--no-depends-reverse", action='store_false', dest='depends_reverse', help=_("do not show reverse dependencies ('needed_by')"), ) parser_plot_subparsers_node.add_argument( "--no-depends-static", action='store_false', dest='depends_static', help=_("do not show static dependencies"), ) # bw plot groups-for-node help_plot_node_groups = _("Show where a specific node gets its groups from") parser_plot_subparsers_node_groups = parser_plot_subparsers.add_parser( "groups-for-node", description=help_plot_node_groups, help=help_plot_node_groups, ) parser_plot_subparsers_node_groups.set_defaults(func=bw_plot_node_groups) parser_plot_subparsers_node_groups.add_argument( 'node', metavar=_("NODE"), type=str, help=_("node to plot"), ) # bw repo help_repo = _("Various subcommands to manipulate your repository") parser_repo = subparsers.add_parser("repo", description=help_repo, help=help_repo) parser_repo_subparsers = parser_repo.add_subparsers() # bw repo bundle parser_repo_subparsers_bundle = parser_repo_subparsers.add_parser("bundle") parser_repo_subparsers_bundle_subparsers = parser_repo_subparsers_bundle.add_subparsers() # bw repo bundle create parser_repo_subparsers_bundle_create = \ parser_repo_subparsers_bundle_subparsers.add_parser("create") parser_repo_subparsers_bundle_create.set_defaults(func=bw_repo_bundle_create) parser_repo_subparsers_bundle_create.add_argument( 'bundle', metavar=_("BUNDLE"), type=str, help=_("name of bundle to create"), ) # bw repo create parser_repo_subparsers_create = parser_repo_subparsers.add_parser("create") parser_repo_subparsers_create.set_defaults(func=bw_repo_create) # bw repo plugin parser_repo_subparsers_plugin = parser_repo_subparsers.add_parser("plugin") parser_repo_subparsers_plugin_subparsers = parser_repo_subparsers_plugin.add_subparsers() # bw repo plugin install parser_repo_subparsers_plugin_install = parser_repo_subparsers_plugin_subparsers.add_parser("install") parser_repo_subparsers_plugin_install.set_defaults(func=bw_repo_plugin_install) parser_repo_subparsers_plugin_install.add_argument( 'plugin', metavar=_("PLUGIN_NAME"), type=str, help=_("name of plugin to install"), ) parser_repo_subparsers_plugin_install.add_argument( "-f", "--force", action='store_true', dest='force', help=_("overwrite existing files when installing"), ) # bw repo plugin list parser_repo_subparsers_plugin_list = parser_repo_subparsers_plugin_subparsers.add_parser("list") parser_repo_subparsers_plugin_list.set_defaults(func=bw_repo_plugin_list) # bw repo plugin remove parser_repo_subparsers_plugin_remove = parser_repo_subparsers_plugin_subparsers.add_parser("remove") parser_repo_subparsers_plugin_remove.set_defaults(func=bw_repo_plugin_remove) parser_repo_subparsers_plugin_remove.add_argument( 'plugin', metavar=_("PLUGIN_NAME"), type=str, help=_("name of plugin to remove"), ) parser_repo_subparsers_plugin_remove.add_argument( "-f", "--force", action='store_true', dest='force', help=_("remove files even if locally modified"), ) # bw repo plugin search parser_repo_subparsers_plugin_search = parser_repo_subparsers_plugin_subparsers.add_parser("search") parser_repo_subparsers_plugin_search.set_defaults(func=bw_repo_plugin_search) parser_repo_subparsers_plugin_search.add_argument( 'term', metavar=_("SEARCH_STRING"), nargs='?', type=str, help=_("look for this string in plugin names and descriptions"), ) # bw repo plugin update parser_repo_subparsers_plugin_update = parser_repo_subparsers_plugin_subparsers.add_parser("update") parser_repo_subparsers_plugin_update.set_defaults(func=bw_repo_plugin_update) parser_repo_subparsers_plugin_update.add_argument( 'plugin', default=None, metavar=_("PLUGIN_NAME"), nargs='?', type=str, help=_("name of plugin to update"), ) parser_repo_subparsers_plugin_update.add_argument( "-c", "--check-only", action='store_true', dest='check_only', help=_("only show what would be updated"), ) parser_repo_subparsers_plugin_update.add_argument( "-f", "--force", action='store_true', dest='force', help=_("overwrite local modifications when updating"), ) # bw run help_run = _("Run a one-off command on a number of nodes") parser_run = subparsers.add_parser("run", description=help_run, help=help_run) parser_run.set_defaults(func=bw_run) parser_run.add_argument( 'target', metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), type=str, help=_("target nodes, groups and/or bundle selectors"), ) parser_run.add_argument( 'command', metavar=_("COMMAND"), type=str, help=_("command to run"), ) parser_run.add_argument( "--stderr-table", action='store_true', dest='stderr_table', help=_("include command stderr in stats table"), ) parser_run.add_argument( "--stdout-table", action='store_true', dest='stdout_table', help=_("include command stdout in stats table"), ) bw_run_p_default = int(environ.get("BW_NODE_WORKERS", "1")) parser_run.add_argument( "-p", "--parallel-nodes", default=bw_run_p_default, dest='node_workers', help=_("number of nodes to run command on simultaneously " "(defaults to {})").format(bw_run_p_default), type=int, ) parser_run.add_argument( "-r", "--resume-file", default=None, dest='resume_file', help=_( "path to a file that a list of completed nodes will be added to; " "if the file already exists, any nodes therein will be skipped" ), metavar=_("PATH"), type=str, ) parser_run.add_argument( "-S", "--no-summary", action='store_false', dest='summary', help=_("don't show stats summary"), ) # bw stats help_stats = _("Show some statistics about your repository") parser_stats = subparsers.add_parser("stats", description=help_stats, help=help_stats) parser_stats.set_defaults(func=bw_stats) # bw test help_test = _("Test your repository for consistency " "(you can use this with a CI tool like Jenkins). " "If *any* options other than -i are given, *only* the " "tests selected by those options will be run. Otherwise, a " "default selection of tests will be run (that selection may " "change in future releases). Currently, the default is -IJKM " "if specific nodes are given and -HIJKMS if testing the " "entire repo.") parser_test = subparsers.add_parser("test", description=help_test, help=help_test) parser_test.set_defaults(func=bw_test) parser_test.add_argument( 'target', default=None, metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), nargs='?', type=str, help=_("target nodes, groups and/or bundle selectors (defaults to all)"), ) parser_test.add_argument( "-c", "--plugin-conflicts", action='store_true', dest='plugin_conflicts', help=_("check for local modifications to files installed by plugins"), ) parser_test.add_argument( "-d", "--config-determinism", default=0, dest='determinism_config', help=_("verify determinism of configuration by running `bw hash` N times " "and checking for consistent results (with N > 1)"), metavar="N", type=int, ) parser_test.add_argument( "-e", "--empty-groups", action='store_true', dest='empty_groups', help=_("check for empty groups"), ) parser_test.add_argument( "-H", "--hooks-repo", action='store_true', dest='hooks_repo', help=_("run repo-level test hooks"), ) parser_test.add_argument( "-i", "--ignore-missing-faults", action='store_true', dest='ignore_missing_faults', help=_("do not fail when encountering a missing Fault"), ) parser_test.add_argument( "-I", "--items", action='store_true', dest='items', help=_("run item-level tests (like rendering templates)"), ) parser_test.add_argument( "-J", "--hooks-node", action='store_true', dest='hooks_node', help=_("run node-level test hooks"), ) parser_test.add_argument( "-K", "--metadata-keys", action='store_true', dest='metadata_keys', help=_("validate metadata keys"), ) parser_test.add_argument( "-m", "--metadata-determinism", default=0, dest='determinism_metadata', help=_("verify determinism of metadata by running `bw hash -m` N times " "and checking for consistent results (with N > 1)"), metavar="N", type=int, ) parser_test.add_argument( "-M", "--metadata-collisions", action='store_true', dest='metadata_collisions', help=_("check for conflicting metadata keys in group metadata"), ) parser_test.add_argument( "-o", "--orphaned-bundles", action='store_true', dest='orphaned_bundles', help=_("check for bundles not assigned to any node"), ) parser_test.add_argument( "-s", "--secret-rotation", default=None, dest='ignore_secret_identifiers', help=_("ensure every string passed to repo.vault.[human_]password_for() is used at least " "twice (using it only once means you're probably managing only one end of an " "authentication, making it dangerous to rotate your .secrets.cfg); PATTERNS is a " "comma-separated list of regex patterns for strings to ignore in this check " "(just pass an empty string if you don't need to ignore anything)"), metavar="PATTERNS", type=str, ) parser_test.add_argument( "-S", "--subgroup-loops", action='store_true', dest='subgroup_loops', help=_("check for loops in subgroup hierarchies"), ) # bw verify help_verify = _("Inspect the health or 'correctness' of a node without changing it") parser_verify = subparsers.add_parser("verify", description=help_verify, help=help_verify) parser_verify.set_defaults(func=bw_verify) parser_verify.add_argument( 'target', metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), type=str, help=_("target nodes, groups and/or bundle selectors"), ) parser_verify.add_argument( "-a", "--show-all", action='store_true', dest='show_all', help=_("show correct items as well as incorrect ones"), ) bw_verify_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_verify.add_argument( "-p", "--parallel-nodes", default=bw_verify_p_default, dest='node_workers', help=_("number of nodes to verify simultaneously " "(defaults to {})").format(bw_verify_p_default), type=int, ) bw_verify_p_items_default = int(environ.get("BW_ITEM_WORKERS", "4")) parser_verify.add_argument( "-P", "--parallel-items", default=bw_verify_p_items_default, dest='item_workers', help=_("number of items to verify simultaneously on each node " "(defaults to {})").format(bw_verify_p_items_default), type=int, ) parser_verify.add_argument( "-S", "--no-summary", action='store_false', dest='summary', help=_("don't show stats summary"), ) # bw zen parser_zen = subparsers.add_parser("zen") parser_zen.set_defaults(func=bw_zen) return parser bundlewrap-3.8.0/bundlewrap/cmdline/plot.py000066400000000000000000000024731360562404000210010ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from ..deps import prepare_dependencies from ..utils.plot import graph_for_items, plot_group, plot_node_groups from ..utils.cmdline import get_group, get_node from ..utils.ui import io def bw_plot_group(repo, args): group = get_group(repo, args['group']) if args['group'] else None if args['show_nodes']: nodes = group.nodes if group else repo.nodes else: nodes = [] if group: groups = [group] groups.extend(group.subgroups) else: groups = repo.groups for line in plot_group(groups, nodes, args['show_nodes']): io.stdout(line) def bw_plot_node(repo, args): node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) for line in graph_for_items( node.name, prepare_dependencies(node.items, node.os, node.os_version), cluster=args['cluster'], concurrency=args['depends_concurrency'], static=args['depends_static'], regular=args['depends_regular'], reverse=args['depends_reverse'], auto=args['depends_auto'], ): io.stdout(line) def bw_plot_node_groups(repo, args): node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) for line in plot_node_groups(node): io.stdout(line) bundlewrap-3.8.0/bundlewrap/cmdline/repo.py000066400000000000000000000056561360562404000207760ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from sys import exit from ..exceptions import NoSuchPlugin, PluginLocalConflict from ..plugins import PluginManager from ..repo import Repository from ..utils.text import blue, bold, mark_for_translation as _, red from ..utils.ui import io def bw_repo_bundle_create(repo, args): repo.create_bundle(args['bundle']) def bw_repo_create(path, args): Repository.create(path) def bw_repo_plugin_install(repo, args): pm = PluginManager(repo.path) try: manifest = pm.install(args['plugin'], force=args['force']) io.stdout(_("{x} Installed '{plugin}' (v{version})").format( x=blue("i"), plugin=args['plugin'], version=manifest['version'], )) if 'help' in manifest: io.stdout("") for line in manifest['help'].split("\n"): io.stdout(line) except NoSuchPlugin: io.stderr(_("{x} No such plugin: {plugin}").format(x=red("!!!"), plugin=args['plugin'])) exit(1) except PluginLocalConflict as e: io.stderr(_("{x} Plugin installation failed: {reason}").format( reason=e.message, x=red("!!!"), )) exit(1) def bw_repo_plugin_list(repo, args): pm = PluginManager(repo.path) for plugin, version in pm.list(): io.stdout(_("{plugin} (v{version})").format(plugin=plugin, version=version)) def bw_repo_plugin_remove(repo, args): pm = PluginManager(repo.path) try: pm.remove(args['plugin'], force=args['force']) except NoSuchPlugin: io.stdout(_("{x} Plugin '{plugin}' is not installed").format( x=red("!!!"), plugin=args['plugin'], )) exit(1) def bw_repo_plugin_search(repo, args): pm = PluginManager(repo.path) for plugin, desc in pm.search(args['term']): io.stdout(_("{plugin} {desc}").format(desc=desc, plugin=bold(plugin))) def bw_repo_plugin_update(repo, args): pm = PluginManager(repo.path) if args['plugin']: old_version, new_version = pm.update( args['plugin'], check_only=args['check_only'], force=args['force'], ) if old_version != new_version: io.stdout(_("{plugin} {old_version} → {new_version}").format( new_version=new_version, old_version=old_version, plugin=bold(args['plugin']), )) else: for plugin, version in pm.list(): old_version, new_version = pm.update( plugin, check_only=args['check_only'], force=args['force'], ) if old_version != new_version: io.stdout(_("{plugin} {old_version} → {new_version}").format( new_version=new_version, old_version=old_version, plugin=bold(plugin), )) bundlewrap-3.8.0/bundlewrap/cmdline/run.py000066400000000000000000000125101360562404000206200ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime try: from itertools import zip_longest except ImportError: # Python 2 from itertools import izip_longest as zip_longest from sys import exit from ..concurrency import WorkerPool from ..exceptions import SkipNode from ..utils import SkipList from ..utils.cmdline import get_target_nodes from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import ( blue, bold, error_summary, format_duration, green, mark_for_translation as _, red, yellow, ) from ..utils.ui import io def run_on_node(node, command, skip_list): if node.dummy: io.stdout(_("{x} {node} is a dummy node").format(node=bold(node.name), x=yellow("»"))) return None if node.name in skip_list: io.stdout(_("{x} {node} skipped by --resume-file").format(node=bold(node.name), x=yellow("»"))) return None try: node.repo.hooks.node_run_start( node.repo, node, command, ) except SkipNode as exc: io.stdout(_("{x} {node} skipped by hook ({reason})").format( node=bold(node.name), reason=str(exc) or _("no reason given"), x=yellow("»"), )) return None with io.job(_("{} running command...").format(bold(node.name))): result = node.run( command, may_fail=True, log_output=True, ) node.repo.hooks.node_run_end( node.repo, node, command, duration=result.duration, return_code=result.return_code, stdout=result.stdout, stderr=result.stderr, ) return result def stats_summary(results, include_stdout, include_stderr): rows = [[ bold(_("node")), bold(_("return code")), bold(_("time")), ], ROW_SEPARATOR] if include_stdout: rows[0].append(bold(_("stdout"))) if include_stderr: rows[0].append(bold(_("stderr"))) for node_name, result in sorted(results.items()): row = [node_name] if result is None: # node has been skipped continue elif result.return_code == 0: row.append(green(str(result.return_code))) else: row.append(red(str(result.return_code))) row.append(format_duration(result.duration, msec=True)) rows.append(row) if include_stdout or include_stderr: stdout = result.stdout.decode('utf-8', errors='replace').strip().split("\n") stderr = result.stderr.decode('utf-8', errors='replace').strip().split("\n") if include_stdout: row.append(stdout[0]) if include_stderr: row.append(stderr[0]) for stdout_line, stderr_line in list(zip_longest(stdout, stderr, fillvalue=""))[1:]: continuation_row = ["", "", ""] if include_stdout: continuation_row.append(stdout_line) if include_stderr: continuation_row.append(stderr_line) rows.append(continuation_row) rows.append(ROW_SEPARATOR) if include_stdout or include_stderr: # remove last ROW_SEPARATOR rows = rows[:-1] if len(rows) > 2: # table might be empty if all nodes have been skipped for line in render_table(rows, alignments={1: 'right', 2: 'right'}): io.stdout("{x} {line}".format(x=blue("i"), line=line)) def bw_run(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) pending_nodes = target_nodes[:] io.progress_set_total(len(pending_nodes)) repo.hooks.run_start( repo, args['target'], target_nodes, args['command'], ) start_time = datetime.now() results = {} skip_list = SkipList(args['resume_file']) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': run_on_node, 'task_id': node.name, 'args': ( node, args['command'], skip_list, ), } def handle_result(task_id, return_value, duration): io.progress_advance() results[task_id] = return_value if return_value is None or return_value.return_code == 0: skip_list.add(task_id) def handle_exception(task_id, exception, traceback): io.progress_advance() msg = "{} {}".format(bold(task_id), exception) io.stderr(traceback) io.stderr(repr(exception)) io.stderr("{} {}".format(red("!"), msg)) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_result=handle_result, handle_exception=handle_exception, cleanup=skip_list.dump, pool_id="run", workers=args['node_workers'], ) worker_pool.run() if args['summary']: stats_summary(results, args['stdout_table'], args['stderr_table']) error_summary(errors) repo.hooks.run_end( repo, args['target'], target_nodes, args['command'], duration=datetime.now() - start_time, ) exit(1 if errors else 0) bundlewrap-3.8.0/bundlewrap/cmdline/stats.py000066400000000000000000000023021360562404000211500ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from operator import itemgetter from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import bold, mark_for_translation as _ from ..utils.ui import page_lines def bw_stats(repo, args): items = {} metaprocs = set() for node in repo.nodes: for metadata_processor_name, metadata_processor in node.metadata_processors: metaprocs.add(metadata_processor_name) for item in node.items: items.setdefault(item.ITEM_TYPE_NAME, 0) items[item.ITEM_TYPE_NAME] += 1 rows = [ [ bold(_("count")), bold(_("type")), ], ROW_SEPARATOR, [str(len(repo.nodes)), _("nodes")], [str(len(repo.groups)), _("groups")], [str(len(repo.bundle_names)), _("bundles")], [str(len(metaprocs)), _("metadata processors")], [str(sum([len(list(node.items)) for node in repo.nodes])), _("items")], ROW_SEPARATOR, ] for item_type, count in sorted(items.items(), key=itemgetter(1), reverse=True): rows.append([str(count), item_type]) page_lines(render_table(rows, alignments={0: 'right'})) bundlewrap-3.8.0/bundlewrap/cmdline/test.py000066400000000000000000000315201360562404000207750ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from copy import copy from re import compile as compile_regex from sys import exit from ..deps import DummyItem from ..exceptions import FaultUnavailable, ItemDependencyLoop from ..itemqueue import ItemTestQueue from ..metadata import check_for_unsolvable_metadata_key_conflicts, check_metadata_keys from ..plugins import PluginManager from ..repo import Repository from ..utils.cmdline import count_items, get_target_nodes from ..utils.plot import explain_item_dependency_loop from ..utils.text import bold, green, mark_for_translation as _, red, yellow from ..utils.ui import io, QUIT_EVENT def test_items(nodes, ignore_missing_faults): io.progress_set_total(count_items(nodes)) for node in nodes: if QUIT_EVENT.is_set(): break if not node.items: io.stdout(_("{x} {node} has no items").format(node=bold(node.name), x=yellow("!"))) continue item_queue = ItemTestQueue(node.items, node.os, node.os_version) while not QUIT_EVENT.is_set(): try: item = item_queue.pop() except IndexError: # no items left break if isinstance(item, DummyItem): continue try: item._test() except FaultUnavailable: if ignore_missing_faults: io.progress_advance() io.stderr(_("{x} {node} {bundle} {item} ({msg})").format( bundle=bold(item.bundle.name), item=item.id, msg=yellow(_("Fault unavailable")), node=bold(node.name), x=yellow("»"), )) else: io.stderr(_("{x} {node} {bundle} {item} missing Fault:").format( bundle=bold(item.bundle.name), item=item.id, node=bold(node.name), x=red("!"), )) raise except Exception: io.stderr(_("{x} {node} {bundle} {item}").format( bundle=bold(item.bundle.name), item=item.id, node=bold(node.name), x=red("!"), )) raise else: if item.id.count(":") < 2: # don't count canned actions io.progress_advance() io.stdout("{x} {node} {bundle} {item}".format( bundle=bold(item.bundle.name), item=item.id, node=bold(node.name), x=green("✓"), )) if item_queue.items_with_deps and not QUIT_EVENT.is_set(): exception = ItemDependencyLoop(item_queue.items_with_deps) for line in explain_item_dependency_loop(exception, node.name): io.stderr(line) exit(1) io.progress_set_total(0) def test_subgroup_loops(repo): checked_groups = [] for group in repo.groups: if QUIT_EVENT.is_set(): break if group in checked_groups: continue with io.job(_("{group} checking for subgroup loops").format(group=bold(group.name))): checked_groups.extend(group.subgroups) # the subgroups property has the check built in io.stdout(_("{x} {group} has no subgroup loops").format( x=green("✓"), group=bold(group.name), )) def test_metadata_collisions(node): with io.job(_("{node} checking for metadata collisions").format(node=bold(node.name))): check_for_unsolvable_metadata_key_conflicts(node) io.stdout(_("{x} {node} has no metadata collisions").format( x=green("✓"), node=bold(node.name), )) def test_metadata_keys(node): with io.job(_("{node} checking metadata keys").format(node=bold(node.name))): check_metadata_keys(node) io.stdout(_("{x} {node} has valid metadata keys").format( x=green("✓"), node=bold(node.name), )) def test_orphaned_bundles(repo): orphaned_bundles = set(repo.bundle_names) for node in repo.nodes: if QUIT_EVENT.is_set(): break for bundle in node.bundles: if QUIT_EVENT.is_set(): break orphaned_bundles.discard(bundle.name) for bundle in sorted(orphaned_bundles): io.stderr(_("{x} {bundle} is an unused bundle").format( bundle=bold(bundle), x=red("✘"), )) if orphaned_bundles: exit(1) def test_secret_identifiers(repo, ignore_patterns): # create a new object to make sure we don't double-count any calls # from previous tests pristine_repo = Repository(repo.path) pristine_repo.hash() # shortest way to create all configuration patterns = set() for raw_pattern in ignore_patterns.split(","): if raw_pattern: patterns.add(compile_regex(raw_pattern)) for identifier, call_count in pristine_repo.vault._call_log.items(): if call_count == 1: for pattern in patterns: if pattern.search(identifier): break else: io.stderr(_( "{x} identifier passed only once to repo.vault.[human_]password_for(): {i}" ).format( i=bold(identifier), x=red("✘"), )) exit(1) io.stdout(_( "{x} all arguments to repo.vault.[human_]password_for() used at least twice" ).format(x=green("✓"))) def test_empty_groups(repo): empty_groups = set() for group in repo.groups: if QUIT_EVENT.is_set(): break if not group.nodes: empty_groups.add(group) for group in sorted(empty_groups): io.stderr(_("{x} {group} is an empty group").format( group=bold(group), x=red("✘"), )) if empty_groups: exit(1) def test_plugin_conflicts(repo): pm = PluginManager(repo.path) for plugin, version in pm.list(): if QUIT_EVENT.is_set(): break local_changes = pm.local_modifications(plugin) if local_changes: io.stderr(_("{x} Plugin '{plugin}' has local modifications:").format( plugin=plugin, x=red("✘"), )) for path, actual_checksum, should_checksum in local_changes: io.stderr(_("\t{path} ({actual_checksum}) should be {should_checksum}").format( actual_checksum=actual_checksum, path=path, should_checksum=should_checksum, )) exit(1) else: io.stdout(_("{x} Plugin '{plugin}' has no local modifications.").format( plugin=plugin, x=green("✓"), )) def test_determinism_config(repo, nodes, iterations): """ Generate configuration a couple of times for every node and see if anything changes between iterations """ hashes = {} io.progress_set_total(len(nodes) * iterations) for i in range(iterations): if QUIT_EVENT.is_set(): break if i == 0: # optimization: for the first iteration, just use the repo # we already have iteration_repo = repo else: iteration_repo = Repository(repo.path) iteration_nodes = [iteration_repo.get_node(node.name) for node in nodes] for node in iteration_nodes: if QUIT_EVENT.is_set(): break with io.job(_("{node} generating configuration ({i}/{n})").format( i=i + 1, n=iterations, node=bold(node.name), )): result = node.hash() hashes.setdefault(node.name, result) if hashes[node.name] != result: io.stderr(_( "{x} Configuration for node {node} changed when generated repeatedly " "(use `bw hash -d {node}` to debug)" ).format(node=node.name, x=red("✘"))) exit(1) io.progress_advance() io.progress_set_total(0) io.stdout(_("{x} Configuration remained the same after being generated {n} times").format( n=iterations, x=green("✓"), )) def test_determinism_metadata(repo, nodes, iterations): """ Generate metadata a couple of times for every node and see if anything changes between iterations """ hashes = {} io.progress_set_total(len(nodes) * iterations) for i in range(iterations): if QUIT_EVENT.is_set(): break if i == 0: # optimization: for the first iteration, just use the repo # we already have iteration_repo = repo else: iteration_repo = Repository(repo.path) iteration_nodes = [iteration_repo.get_node(node.name) for node in nodes] for node in iteration_nodes: if QUIT_EVENT.is_set(): break with io.job(_("{node} generating metadata ({i}/{n})").format( i=i + 1, n=iterations, node=bold(node.name), )): result = node.metadata_hash() hashes.setdefault(node.name, result) if hashes[node.name] != result: io.stderr(_( "{x} Metadata for node {node} changed when generated repeatedly " "(use `bw hash -d {node}` to debug)" ).format(node=node.name, x=red("✘"))) exit(1) io.progress_advance() io.progress_set_total(0) io.stdout(_("{x} Metadata remained the same after being generated {n} times").format( n=iterations, x=green("✓"), )) def bw_test(repo, args): options_selected = ( args['determinism_config'] > 1 or args['determinism_metadata'] > 1 or args['hooks_node'] or args['hooks_repo'] or args['ignore_secret_identifiers'] is not None or args['items'] or args['metadata_keys'] or args['metadata_collisions'] or args['orphaned_bundles'] or args['empty_groups'] or args['plugin_conflicts'] or args['subgroup_loops'] ) if args['target']: nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) if not options_selected: args['hooks_node'] = True args['items'] = True args['metadata_collisions'] = True args['metadata_keys'] = True else: nodes = copy(list(repo.nodes)) if not options_selected: args['hooks_node'] = True args['hooks_repo'] = True args['items'] = True args['metadata_collisions'] = True args['metadata_keys'] = True args['subgroup_loops'] = True if args['ignore_secret_identifiers'] is not None and not QUIT_EVENT.is_set(): test_secret_identifiers(repo, args['ignore_secret_identifiers']) if args['plugin_conflicts'] and not QUIT_EVENT.is_set(): test_plugin_conflicts(repo) if args['subgroup_loops'] and not QUIT_EVENT.is_set(): test_subgroup_loops(repo) if args['empty_groups'] and not QUIT_EVENT.is_set(): test_empty_groups(repo) if args['orphaned_bundles'] and not QUIT_EVENT.is_set(): test_orphaned_bundles(repo) if args['metadata_keys'] and not QUIT_EVENT.is_set(): io.progress_set_total(len(nodes)) for node in nodes: if QUIT_EVENT.is_set(): break test_metadata_keys(node) io.progress_advance() io.progress_set_total(0) if args['metadata_collisions'] and not QUIT_EVENT.is_set(): io.progress_set_total(len(nodes)) for node in nodes: if QUIT_EVENT.is_set(): break test_metadata_collisions(node) io.progress_advance() io.progress_set_total(0) if args['items']: test_items(nodes, args['ignore_missing_faults']) if args['determinism_metadata'] > 1 and not QUIT_EVENT.is_set(): test_determinism_metadata(repo, nodes, args['determinism_metadata']) if args['determinism_config'] > 1 and not QUIT_EVENT.is_set(): test_determinism_config(repo, nodes, args['determinism_config']) if args['hooks_node'] and not QUIT_EVENT.is_set(): io.progress_set_total(len(nodes)) for node in nodes: if QUIT_EVENT.is_set(): break repo.hooks.test_node(repo, node) io.progress_advance() io.progress_set_total(0) if args['hooks_repo'] and not QUIT_EVENT.is_set(): repo.hooks.test(repo) bundlewrap-3.8.0/bundlewrap/cmdline/verify.py000066400000000000000000000104471360562404000213270ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime from sys import exit from ..concurrency import WorkerPool from ..utils.cmdline import count_items, get_target_nodes from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import ( blue, bold, cyan, cyan_unless_zero, error_summary, format_duration, green, green_unless_zero, mark_for_translation as _, red, red_unless_zero, ) from ..utils.ui import io def stats_summary(node_stats, total_duration): for node in node_stats.keys(): node_stats[node]['total'] = sum([ node_stats[node]['good'], node_stats[node]['bad'], node_stats[node]['unknown'], ]) try: node_stats[node]['health'] = \ (node_stats[node]['good'] / float(node_stats[node]['total'])) * 100.0 except ZeroDivisionError: node_stats[node]['health'] = 0 totals = { 'items': 0, 'good': 0, 'bad': 0, 'unknown': 0, } node_ranking = [] for node_name, stats in node_stats.items(): totals['items'] += stats['total'] totals['good'] += stats['good'] totals['bad'] += stats['bad'] totals['unknown'] += stats['unknown'] node_ranking.append(( stats['health'], node_name, stats['total'], stats['good'], stats['bad'], stats['unknown'], stats['duration'], )) node_ranking = sorted(node_ranking, reverse=True) try: totals['health'] = (totals['good'] / float(totals['items'])) * 100.0 except ZeroDivisionError: totals['health'] = 0 rows = [[ bold(_("node")), _("items"), green(_("good")), red(_("bad")), cyan(_("unknown")), _("health"), _("duration"), ], ROW_SEPARATOR] for health, node_name, items, good, bad, unknown, duration in node_ranking: rows.append([ node_name, str(items), green_unless_zero(good), red_unless_zero(bad), cyan_unless_zero(unknown), "{0:.1f}%".format(health), format_duration(duration), ]) if len(node_ranking) > 1: rows.append(ROW_SEPARATOR) rows.append([ bold(_("total ({} nodes)").format(len(node_stats.keys()))), str(totals['items']), green_unless_zero(totals['good']), red_unless_zero(totals['bad']), cyan_unless_zero(totals['unknown']), "{0:.1f}%".format(totals['health']), format_duration(total_duration), ]) alignments = { 1: 'right', 2: 'right', 3: 'right', 4: 'right', 5: 'right', 6: 'right', 7: 'right', } for line in render_table(rows, alignments=alignments): io.stdout("{x} {line}".format(x=blue("i"), line=line)) def bw_verify(repo, args): errors = [] node_stats = {} pending_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) start_time = datetime.now() io.progress_set_total(count_items(pending_nodes)) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': node.verify, 'task_id': node.name, 'kwargs': { 'show_all': args['show_all'], 'workers': args['item_workers'], }, } def handle_result(task_id, return_value, duration): node_stats[task_id] = return_value def handle_exception(task_id, exception, traceback): msg = "{}: {}".format( task_id, exception, ) io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_result=handle_result, handle_exception=handle_exception, pool_id="verify", workers=args['node_workers'], ) worker_pool.run() if args['summary'] and node_stats: stats_summary(node_stats, datetime.now() - start_time) error_summary(errors) exit(1 if errors else 0) bundlewrap-3.8.0/bundlewrap/cmdline/zen.py000066400000000000000000000024661360562404000206210ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from ..utils.text import mark_for_translation as _ from ..utils.ui import io ZEN = _(""" , @@ @@@@ @@@@@ @@@@@ @@@@@ @@@@@ @@@@@ @@@@@ '@@@@@@, .@@@@@@+ +@@@@@@. @@@@@@, `@@@@@@@ +@@@@@@, `@@@@@@# @@@@@@@@+ :@@@@@@' `@@@@@@@ ;@@@@@@: @@@@@@@@@@@` #@@@@@@. :@@@@@@' @@@@@@@` @@@@@ ;@@@@@@; .@@@@@@# #@@@@@@` ,@@@@@@+ @@@@@ `@@@@@@#'@@@@@@: .@@@@@@+ +@@@@@@. @@@@@ +@@@@@@@@@ +@@@@@@, `@@@@@@# @@@@@ ,@@@@@@+ `@@@@@@@@@` ;@@@@@@: @@@@@ @@@@@@@` :@@@@@@'@@@@@@' @@@@@@@` @@@@@ ;@@@@@@#@@@@@@` `@@@@@@@@@@@@@+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@# +@@@@@@@@. @@@@@@@@@@@@@@@@@@@@@@@@@@@, .@@@# The Zen of BundleWrap ───────────────────── BundleWrap is a tool, not a solution. BundleWrap will not write your configuration for you. BundleWrap is Python all the way down. BundleWrap will adapt rather than grow. BundleWrap is the single point of truth. """) def bw_zen(repo, args): io.stdout(ZEN) bundlewrap-3.8.0/bundlewrap/concurrency.py000066400000000000000000000152201360562404000207340ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from concurrent.futures import ThreadPoolExecutor, wait, FIRST_COMPLETED from datetime import datetime from random import randint from sys import exit from traceback import format_tb from .utils.text import mark_for_translation as _ from .utils.ui import io, QUIT_EVENT JOIN_TIMEOUT = 5 # seconds class WorkerPool(object): """ Manages a bunch of worker threads. """ def __init__( self, tasks_available, next_task, handle_result=None, handle_exception=None, cleanup=None, pool_id=None, workers=4, ): if workers < 1: raise ValueError(_("at least one worker is required")) self.tasks_available = tasks_available self.next_task = next_task self.handle_result = handle_result self.handle_exception = handle_exception self.cleanup = cleanup self.number_of_workers = workers self.idle_workers = set(range(self.number_of_workers)) self.pool_id = "unnamed_pool_{}".format(randint(1, 99999)) if pool_id is None else pool_id self.pending_futures = {} def _get_result(self): """ Blocks until a result from a worker is received. """ io.debug(_("worker pool {pool} waiting for next task to complete").format( pool=self.pool_id, )) while True: # we must use a timeout here to allow Python <3.3 to call # its SIGINT handler # see also http://stackoverflow.com/q/25676835 completed, pending = wait( self.pending_futures.keys(), return_when=FIRST_COMPLETED, timeout=0.1, ) if completed: break future = completed.pop() start_time = self.pending_futures[future]['start_time'] task_id = self.pending_futures[future]['task_id'] worker_id = self.pending_futures[future]['worker_id'] del self.pending_futures[future] self.idle_workers.add(worker_id) exception = future.exception() if exception: io.debug(_( "exception raised while executing task {task} on worker #{worker} " "of worker pool {pool}" ).format( pool=self.pool_id, task=task_id, worker=worker_id, )) if not hasattr(exception, '__traceback__'): # Python 2 exception.__traceback__ = future.exception_info()[1] exception.__task_id = task_id raise exception else: io.debug(_( "worker pool {pool} delivering result of {task} on worker #{worker}" ).format( pool=self.pool_id, task=task_id, worker=worker_id, )) return (task_id, future.result(), datetime.now() - start_time) def start_task(self, target=None, task_id=None, args=None, kwargs=None): """ target any callable (includes bound methods) task_id something to remember this worker by args list of positional arguments passed to target kwargs dictionary of keyword arguments passed to target """ if args is None: args = [] else: args = list(args) if kwargs is None: kwargs = {} task_id = "unnamed_task_{}".format(randint(1, 99999)) if task_id is None else task_id worker_id = self.idle_workers.pop() io.debug(_("worker pool {pool} is starting task {task} on worker #{worker}").format( pool=self.pool_id, task=task_id, worker=worker_id, )) self.pending_futures[self.executor.submit(target, *args, **kwargs)] = { 'start_time': datetime.now(), 'task_id': task_id, 'worker_id': worker_id, } def run(self): io.debug(_("spinning up worker pool {pool}").format(pool=self.pool_id)) processed_results = [] exit_code = None self.executor = ThreadPoolExecutor(max_workers=self.number_of_workers) try: while ( (self.tasks_available() and not QUIT_EVENT.is_set()) or self.workers_are_running ): while ( self.tasks_available() and self.workers_are_available and not QUIT_EVENT.is_set() ): task = self.next_task() if task is not None: self.start_task(**task) if self.workers_are_running: try: result = self._get_result() except SystemExit as exc: if exit_code is None: # Don't overwrite exit code if it has already been set. # This may be a worker exiting with 0 only because # a previous worker raised SystemExit with 1. # We must preserve that original exit code. exit_code = exc.code # just make sure QUIT_EVENT is set and continue # waiting for pending results QUIT_EVENT.set() except Exception as exc: traceback = "".join(format_tb(exc.__traceback__)) if self.handle_exception is None: raise exc else: processed_results.append( self.handle_exception(exc.__task_id, exc, traceback) ) else: if self.handle_result is not None: processed_results.append(self.handle_result(*result)) if QUIT_EVENT.is_set(): # we have reaped all our workers, let's stop this thread # before it does anything else exit(0 if exit_code is None else exit_code) return processed_results finally: io.debug(_("shutting down worker pool {pool}").format(pool=self.pool_id)) if self.cleanup: self.cleanup() self.executor.shutdown() io.debug(_("worker pool {pool} has been shut down").format(pool=self.pool_id)) @property def workers_are_available(self): return bool(self.idle_workers) @property def workers_are_running(self): return bool(self.pending_futures) bundlewrap-3.8.0/bundlewrap/deps.py000066400000000000000000000576331360562404000173530ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from .exceptions import BundleError, ItemDependencyError, NoSuchItem from .items import Item from .items.actions import Action from .utils.text import bold, mark_for_translation as _ from .utils.ui import io class DummyItem(object): bundle = None triggered = False def __init__(self, *args, **kwargs): self.needed_by = [] self.needs = [] self.preceded_by = [] self.precedes = [] self.tags = [] self.triggered_by = [] self.triggers = [] self._deps = [] self._precedes_items = [] def __lt__(self, other): return self.id < other.id def _precedes_incorrect_item(self): return False def apply(self, *args, **kwargs): return (Item.STATUS_OK, []) def test(self): pass class BundleItem(DummyItem): """ Represents a dependency on all items in a certain bundle. """ ITEM_TYPE_NAME = 'bundle' def __init__(self, bundle): self.bundle = bundle super(BundleItem, self).__init__() def __repr__(self): return "".format(self.bundle.name) @property def id(self): return "bundle:{}".format(self.bundle.name) class TagItem(DummyItem): """ This item depends on all items with the given tag. """ ITEM_TYPE_NAME = 'tag' def __init__(self, tag_name): self.tag_name = tag_name super(TagItem, self).__init__() def __repr__(self): return "".format(self.tag_name) @property def id(self): return "tag:{}".format(self.tag_name) class TypeItem(DummyItem): """ Represents a dependency on all items of a certain type. """ ITEM_TYPE_NAME = 'type' def __init__(self, item_type): self.item_type = item_type super(TypeItem, self).__init__() def __repr__(self): return "".format(self.item_type) @property def id(self): return "{}:".format(self.item_type) def find_item(item_id, items): """ Returns the first item with the given ID within the given list of items. """ try: item = list(filter(lambda item: item.id == item_id, items))[0] except IndexError: raise NoSuchItem(_("item not found: {}").format(item_id)) return item def _find_items_of_types(item_types, items, include_dummy=False): """ Returns a subset of items with any of the given types. """ for item_id, item in items.items(): if item_id.split(":", 1)[0] in item_types and ( include_dummy or not isinstance(item, DummyItem) ): yield item def _flatten_dependencies(items): """ This will cause all dependencies - direct AND inherited - to be listed in item._flattened_deps. """ for item in items.values(): if not hasattr(item, '_flattened_deps'): _flatten_deps_for_item(item, items) for item in list(items.values()): item._incoming_deps = set() for other_item in list(items.values()): if isinstance(other_item, DummyItem): continue if item.id in other_item._flattened_deps: item._incoming_deps.add(other_item) return items def _flatten_deps_for_item(item, items): """ Recursively retrieves and returns a list of all inherited dependencies of the given item. This can handle loops, but will ignore them. """ item._flattened_deps = set(item._deps) for dep in item._deps: try: dep_item = items[dep] except KeyError: raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' has a dependency (needs) " "on '{dep}', which doesn't exist" ).format( item=item.id, bundle=item.bundle.name, dep=dep, )) # Don't recurse if we have already resolved nested dependencies # for this item. Also serves as a guard against infinite # recursion when there are loops. if not hasattr(dep_item, '_flattened_deps'): _flatten_deps_for_item(dep_item, items) item._flattened_deps |= set(dep_item._flattened_deps) item._flattened_deps = sorted(item._flattened_deps) def _has_trigger_path(items, item, target_item_id): """ Returns True if the given item directly or indirectly (trough other items) triggers the item with the given target item id. """ if target_item_id in item.triggers: return True for triggered_id in item.triggers: try: triggered_item = find_item(triggered_id, items) except NoSuchItem: # the triggered item may already have been skipped by # `bw apply -s` continue if _has_trigger_path(items, triggered_item, target_item_id): return True return False def _inject_bundle_items(items): """ Adds virtual items that depend on every item in a bundle. """ bundle_items = {} for item in items.values(): if item.bundle is None: continue if item.bundle.name not in bundle_items: bundle_items[item.bundle.name] = BundleItem(item.bundle) bundle_items[item.bundle.name]._deps.append(item.id) items.update({item.id: item for item in bundle_items.values()}) return items def _inject_canned_actions(items): """ Looks for canned actions like "svc_upstart:mysql:reload" in item triggers and adds them to the list of items. """ added_actions = {} for item in items.values(): for triggered_item_id in item.triggers: if triggered_item_id in added_actions: # action has already been triggered continue try: type_name, item_name, action_name = triggered_item_id.split(":") except ValueError: # not a canned action continue target_item_id = "{}:{}".format(type_name, item_name) try: target_item = items[target_item_id] except KeyError: raise BundleError(_( "{item} in bundle '{bundle}' triggers unknown item '{target_item}'" ).format( bundle=item.bundle.name, item=item.id, target_item=target_item_id, )) try: action_attrs = target_item.get_canned_actions()[action_name] except KeyError: raise BundleError(_( "{item} in bundle '{bundle}' triggers unknown " "canned action '{action}' on {target_item}" ).format( action=action_name, bundle=item.bundle.name, item=item.id, target_item=target_item_id, )) action_attrs.update({'triggered': True}) action = Action( item.bundle, triggered_item_id, action_attrs, skip_name_validation=True, ) action._prepare_deps(items) added_actions[triggered_item_id] = action items.update({item.id: item for item in added_actions.values()}) return items def _inject_concurrency_blockers(items, node_os, node_os_version): """ Looks for items with BLOCK_CONCURRENT set and inserts daisy-chain dependencies to force a sequential apply. """ # find every item type that cannot be applied in parallel item_types = set() for item in items.values(): item._concurrency_deps = [] # used for DOT (graphviz) output only if ( not isinstance(item, DummyItem) and item.block_concurrent(node_os, node_os_version) ): item_types.add(item.__class__) # Now that we have collected all relevant types, # we must group them together when they overlap. E.g.: # # Type1.block_concurrent(...) == ["type1", "type2"] # Type2.block_concurrent(...) == ["type2", "type3"] # Type4.block_concurrent(...) == ["type4"] # # becomes # # ["type1", "type2", "type3"] # ["type4"] # # because the first two types overlap in blocking type2. This is # necessary because existing dependencies from type3 to type1 need # to be taken into account when generating the daisy-chains # connecting the three types. If we processed blockers for Type1 and # Type2 independently, we might end up with two very different # chains for Type2, which may cause circular dependencies. chain_groups = [] for item_type in item_types: block_concurrent = [item_type.ITEM_TYPE_NAME] block_concurrent.extend(item_type.block_concurrent(node_os, node_os_version)) for blocked_types in chain_groups: for blocked_type in block_concurrent: if blocked_type in blocked_types: blocked_types.extend(block_concurrent) break else: chain_groups.append(block_concurrent) # daisy-chain all items of the chain group while respecting existing # dependencies between them for blocked_types in chain_groups: blocked_types = set(blocked_types) type_items = list(_find_items_of_types( blocked_types, items, )) processed_items = [] for item in type_items: # disregard deps to items of other types item.__deps = list(filter( lambda dep: dep.split(":", 1)[0] in blocked_types, item._flattened_deps, )) previous_item = None while len(processed_items) < len(type_items): # find the first item without same-type deps we haven't # processed yet try: item = list(filter( lambda item: not item.__deps and item not in processed_items, type_items, ))[0] except IndexError: # this can happen if the flattened deps of all items of # this type already contain a dependency on another # item of this type break if previous_item is not None: # unless we're at the first item # add dep to previous item -- unless it's already in there if previous_item.id not in item._deps: item._deps.append(previous_item.id) item._concurrency_deps.append(previous_item.id) item._flattened_deps.append(previous_item.id) previous_item = item processed_items.append(item) # Now remove all deps on the processed item. This frees up # items depending *only* on the processed item to be # eligible for the next iteration of this loop. for other_item in type_items: try: other_item.__deps.remove(item.id) except ValueError: pass return items def _inject_tag_items(items): """ Takes a list of items and adds tag items depending on each type of item in the list. Returns the appended list. """ tag_items = {} for item in items.values(): for tag in item.tags: if tag not in tag_items: tag_items[tag] = TagItem(tag) tag_items[tag]._deps.append(item.id) items.update({item.id: item for item in tag_items.values()}) return items def _inject_type_items(items): """ Takes a list of items and adds dummy items depending on each type of item in the list. Returns the appended list. """ type_items = {} for item in items.values(): # create dummy items that depend on each item of their type item_type = item.id.split(":")[0] if item_type not in type_items: type_items[item_type] = TypeItem(item_type) type_items[item_type]._deps.append(item.id) # create DummyItem for every type for dep in item._deps: item_type = dep.split(":")[0] if item_type not in type_items: type_items[item_type] = TypeItem(item_type) items.update({item.id: item for item in type_items.values()}) return items def _inject_reverse_dependencies(items): """ Looks for 'needed_by' deps and creates standard dependencies accordingly. """ def add_dep(item, dep): if dep not in item._deps: item._deps.append(dep) item._reverse_deps.append(dep) for item in items.values(): item._reverse_deps = [] for item in items.values(): for depending_item_id in item.needed_by: # bundle items if depending_item_id.startswith("bundle:"): depending_bundle_name = depending_item_id.split(":")[1] for depending_item in items.values(): if isinstance(depending_item, DummyItem): # Type and TagItems don't have bundles continue if depending_item.bundle.name == depending_bundle_name: add_dep(depending_item, item.id) # tag items if depending_item_id.startswith("tag:"): tag_name = depending_item_id.split(":")[1] for depending_item in items.values(): if tag_name in depending_item.tags: add_dep(depending_item, item.id) # type items if depending_item_id.endswith(":"): target_type = depending_item_id[:-1] for depending_item in _find_items_of_types([target_type], items): add_dep(depending_item, item.id) # single items else: try: depending_item = items[depending_item_id] except KeyError: raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' has a reverse dependency (needed_by) " "on '{dep}', which doesn't exist" ).format( item=item.id, bundle=item.bundle.name, dep=depending_item_id, )) add_dep(depending_item, item.id) return items def _inject_reverse_triggers(items): """ Looks for 'triggered_by' and 'precedes' attributes and turns them into standard triggers (defined on the opposing end). """ for item in items.values(): for triggering_item_id in item.triggered_by: try: triggering_item = items[triggering_item_id] except KeyError: raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' has a reverse trigger (triggered_by) " "on '{dep}', which doesn't exist" ).format( item=item.id, bundle=item.bundle.name, dep=triggering_item_id, )) if triggering_item.id.startswith("bundle:"): # bundle items bundle_name = triggering_item.id.split(":")[1] for actual_triggering_item in items.values(): if triggering_item.bundle.name == bundle_name: actual_triggering_item.triggers.append(item.id) elif triggering_item.id.startswith("tag:"): # tag items tag_name = triggering_item.id.split(":")[1] for actual_triggering_item in items.values(): if tag_name in triggering_item.tags: actual_triggering_item.triggers.append(item.id) elif triggering_item.id.endswith(":"): # type items target_type = triggering_item.id[:-1] for actual_triggering_item in _find_items_of_types([target_type], items): actual_triggering_item.triggers.append(item.id) else: triggering_item.triggers.append(item.id) for preceded_item_id in item.precedes: try: preceded_item = items[preceded_item_id] except KeyError: raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' has a reverse trigger (precedes) " "on '{dep}', which doesn't exist" ).format( item=item.id, bundle=item.bundle.name, dep=preceded_item_id, )) if preceded_item.id.startswith("bundle:"): # bundle items bundle_name = preceded_item.id.split(":")[1] for actual_preceded_item in items.values(): if actual_preceded_item.bundle.name == bundle_name: actual_preceded_item.preceded_by.append(item.id) elif preceded_item.id.startswith("tag:"): # tag items tag_name = preceded_item.id.split(":")[1] for actual_preceded_item in items.values(): if tag_name in actual_preceded_item.tags: actual_preceded_item.preceded_by.append(item.id) elif preceded_item.id.endswith(":"): # type items target_type = preceded_item.id[:-1] for actual_preceded_item in _find_items_of_types([target_type], items): actual_preceded_item.preceded_by.append(item.id) else: preceded_item.preceded_by.append(item.id) return items def _inject_trigger_dependencies(items): """ Injects dependencies from all triggered items to their triggering items. """ for item in items.values(): for triggered_item_id in item.triggers: try: triggered_item = items[triggered_item_id] except KeyError: raise BundleError(_( "unable to find definition of '{item1}' triggered " "by '{item2}' in bundle '{bundle}'" ).format( bundle=item.bundle.name, item1=triggered_item_id, item2=item.id, )) if not triggered_item.triggered: raise BundleError(_( "'{item1}' in bundle '{bundle1}' triggered " "by '{item2}' in bundle '{bundle2}', " "but missing 'triggered' attribute" ).format( item1=triggered_item.id, bundle1=triggered_item.bundle.name, item2=item.id, bundle2=item.bundle.name, )) triggered_item._deps.append(item.id) return items def _inject_preceded_by_dependencies(items): """ Injects dependencies from all triggering items to their preceded_by items and attaches triggering items to preceding items. """ for item in items.values(): if item.preceded_by and item.triggered: raise BundleError(_( "triggered item '{item}' in bundle '{bundle}' must not use " "'preceded_by' (use chained triggers instead)".format( bundle=item.bundle.name, item=item.id, ), )) for triggered_item_id in item.preceded_by: try: triggered_item = items[triggered_item_id] except KeyError: raise BundleError(_( "unable to find definition of '{item1}' preceding " "'{item2}' in bundle '{bundle}'" ).format( bundle=item.bundle.name, item1=triggered_item_id, item2=item.id, )) if not triggered_item.triggered: raise BundleError(_( "'{item1}' in bundle '{bundle1}' precedes " "'{item2}' in bundle '{bundle2}', " "but missing 'triggered' attribute" ).format( item1=triggered_item.id, bundle1=triggered_item.bundle.name, item2=item.id, bundle2=item.bundle.name if item.bundle else "N/A", )) triggered_item._precedes_items.append(item) item._deps.append(triggered_item.id) return items @io.job_wrapper(_("{} processing dependencies").format(bold("{1}"))) def prepare_dependencies(items, node_os, node_os_version): """ Performs all dependency preprocessing on a list of items. """ for item in items: item._check_bundle_collisions(items) item._check_loopback_dependency() item._prepare_deps(items) # transform items into a dict to prevent repeated item.id lookups items = {item.id: item for item in items} items = _inject_bundle_items(items) items = _inject_tag_items(items) items = _inject_type_items(items) items = _inject_canned_actions(items) items = _inject_reverse_triggers(items) items = _inject_reverse_dependencies(items) items = _inject_trigger_dependencies(items) items = _inject_preceded_by_dependencies(items) items = _flatten_dependencies(items) items = _inject_concurrency_blockers(items, node_os, node_os_version) for item in items.values(): if not isinstance(item, DummyItem): item._check_redundant_dependencies() return list(items.values()) def remove_dep_from_items(items, dep): """ Removes the given item id (dep) from the temporary list of dependencies of all items in the given list. """ for item in items: try: item._deps.remove(dep) except ValueError: pass return items def remove_item_dependents(items, dep_item, skipped=False): """ Removes the items depending on the given item from the list of items. """ removed_items = [] for item in items: if dep_item.id in item._deps: if _has_trigger_path(items, dep_item, item.id): # triggered items cannot be removed here since they # may yet be triggered by another item and will be # skipped anyway if they aren't item._deps.remove(dep_item.id) elif skipped and isinstance(item, DummyItem) and \ dep_item.triggered and not dep_item.has_been_triggered: # don't skip dummy items because of untriggered members # see issue #151; separate elif for clarity item._deps.remove(dep_item.id) elif dep_item.id in item._concurrency_deps: # don't skip items just because of concurrency deps # separate elif for clarity item._deps.remove(dep_item.id) else: removed_items.append(item) for item in removed_items: items.remove(item) if removed_items: io.debug( "skipped these items because they depend on {item}, which was " "skipped previously: {skipped}".format( item=dep_item.id, skipped=", ".join([item.id for item in removed_items]), ) ) all_recursively_removed_items = [] for removed_item in removed_items: items, recursively_removed_items = \ remove_item_dependents(items, removed_item, skipped=skipped) all_recursively_removed_items += recursively_removed_items return (items, removed_items + all_recursively_removed_items) def split_items_without_deps(items): """ Takes a list of items and extracts the ones that don't have any dependencies. The extracted deps are returned as a list. """ remaining_items = [] removed_items = [] for item in items: if item._deps: remaining_items.append(item) else: removed_items.append(item) return (remaining_items, removed_items) bundlewrap-3.8.0/bundlewrap/exceptions.py000066400000000000000000000066151360562404000205730ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from sys import version_info class UnicodeException(Exception): def __init__(self, msg=""): if version_info >= (3, 0): super(UnicodeException, self).__init__(msg) else: super(UnicodeException, self).__init__(msg.encode('utf-8')) class ActionFailure(UnicodeException): """ Raised when an action failes to meet the expected rcode/output. """ pass class DontCache(Exception): """ Used in the cached_property decorator to temporily prevent caching the returned result """ def __init__(self, obj): self.obj = obj class FaultUnavailable(UnicodeException): """ Raised when a Fault object cannot be resolved. """ pass class GracefulApplyException(UnicodeException): """ Raised when a problem has been encountered in `bw apply`, but a more verbose error has already been printed. """ pass class NoSuchBundle(UnicodeException): """ Raised when a bundle of unknown name is requested. """ pass class NoSuchGroup(UnicodeException): """ Raised when a group of unknown name is requested. """ pass class NoSuchItem(UnicodeException): """ Raised when an item of unknown name is requested. """ pass class NoSuchNode(UnicodeException): """ Raised when a node of unknown name is requested. """ pass class NoSuchPlugin(UnicodeException): """ Raised when a plugin of unknown name is requested. """ pass class RemoteException(UnicodeException): """ Raised when a shell command on a node fails. """ pass class RepositoryError(UnicodeException): """ Indicates that somethings is wrong with the current repository. """ pass class BundleError(RepositoryError): """ Indicates an error in a bundle. """ pass class ItemDependencyError(RepositoryError): """ Indicates a problem with item dependencies (e.g. loops). """ pass class ItemDependencyLoop(ItemDependencyError): """ Raised when there is a loop in item dependencies. """ def __init__(self, items): self.items = items def __repr__(self): return "".format(len(self.items)) def __str__(self): return "".format(len(self.items)) class NoSuchRepository(RepositoryError): """ Raised when trying to get a Repository object from a directory that is not in fact a repository. """ pass class MissingRepoDependency(RepositoryError): """ Raised when a dependency from requirements.txt is missing. """ pass class PluginError(RepositoryError): """ Indicates an error related to a plugin. """ pass class PluginLocalConflict(PluginError): """ Raised when a plugin tries to overwrite locally-modified files. """ pass class SkipNode(UnicodeException): """ Can be raised by hooks to skip a node. """ pass class TemplateError(RepositoryError): """ Raised when an error occurs while rendering a template. """ pass class UsageException(UnicodeException): """ Raised when command line options don't make sense. """ pass class NodeLockedException(Exception): """ Raised when a node is already locked during an 'apply' run. """ pass bundlewrap-3.8.0/bundlewrap/group.py000066400000000000000000000171011360562404000175360ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals import re from .exceptions import NoSuchGroup, NoSuchNode, RepositoryError from .utils import cached_property, names from .utils.dicts import hash_statedict from .utils.text import mark_for_translation as _, validate_name GROUP_ATTR_DEFAULTS = { 'cmd_wrapper_inner': "export LANG=C; {}", 'cmd_wrapper_outer': "sudo sh -c {}", 'dummy': False, 'kubectl_context': None, 'os': 'linux', # Setting os_version to 0 by default will probably yield less # surprises than setting it to max_int. Users will probably # start at a certain version and then gradually update their # systems, adding conditions like this: # # if node.os_version >= (2,): # new_behavior() # else: # old_behavior() # # If we set os_version to max_int, nodes without an explicit # os_version would automatically adopt the new_behavior() as # soon as it appears in the repo - which is probably not what # people want. 'os_version': (0,), 'use_shadow_passwords': True, } def _build_error_chain(loop_node, last_node, nodes_in_between): """ Used to illustrate subgroup loop paths in error messages. loop_node: name of node that loops back to itself last_node: name of last node pointing back to loop_node, causing the loop nodes_in_between: names of nodes traversed during loop detection, does include loop_node if not a direct loop, but not last_node """ error_chain = [] for visited in nodes_in_between: if (loop_node in error_chain) != (loop_node == visited): error_chain.append(visited) error_chain.append(last_node) error_chain.append(loop_node) return error_chain class Group(object): """ A group of nodes. """ def __init__(self, group_name, infodict=None): if infodict is None: infodict = {} if not validate_name(group_name): raise RepositoryError(_("'{}' is not a valid group name.").format(group_name)) self.name = group_name self.bundle_names = infodict.get('bundles', []) self.immediate_subgroup_names = infodict.get('subgroups', []) self.immediate_subgroup_patterns = infodict.get('subgroup_patterns', []) self.members_add = infodict.get('members_add', None) self.members_remove = infodict.get('members_remove', None) self.metadata = infodict.get('metadata', {}) self.node_patterns = infodict.get('member_patterns', []) self.static_member_names = infodict.get('members', []) for attr in GROUP_ATTR_DEFAULTS: # defaults are applied in node.py setattr(self, attr, infodict.get(attr)) def __lt__(self, other): return self.name < other.name def __repr__(self): return "".format(self.name) def __str__(self): return self.name @cached_property def cdict(self): group_dict = {} for node in self.nodes: group_dict[node.name] = node.hash() return group_dict def group_membership_hash(self): return hash_statedict(sorted(names(self.nodes))) def hash(self): return hash_statedict(self.cdict) def metadata_hash(self): group_dict = {} for node in self.nodes: group_dict[node.name] = node.metadata_hash() return hash_statedict(group_dict) @cached_property def nodes(self): for node in self.repo.nodes: if node.in_group(self.name): yield node @cached_property def _static_nodes(self): result = set() result.update(self._nodes_from_members) result.update(self._nodes_from_patterns) return result @property def _subgroup_names_from_patterns(self): for pattern in self.immediate_subgroup_patterns: compiled_pattern = re.compile(pattern) for group in self.repo.groups: if compiled_pattern.search(group.name) is not None and group != self: yield group.name @property def _nodes_from_members(self): for node_name in self.static_member_names: try: yield self.repo.get_node(node_name) except NoSuchNode: raise RepositoryError(_( "Group '{group}' has '{node}' listed as a member in groups.py, " "but no such node could be found." ).format( group=self.name, node=node_name, )) @property def _nodes_from_patterns(self): for pattern in self.node_patterns: compiled_pattern = re.compile(pattern) for node in self.repo.nodes: if not compiled_pattern.search(node.name) is None: yield node def _check_subgroup_names(self, visited_names): """ Recursively finds subgroups and checks for loops. """ for name in set( list(self.immediate_subgroup_names) + list(self._subgroup_names_from_patterns) ): if name not in visited_names: try: group = self.repo.get_group(name) except NoSuchGroup: raise RepositoryError(_( "Group '{group}' has '{subgroup}' listed as a subgroup in groups.py, " "but no such group could be found." ).format( group=self.name, subgroup=name, )) for group_name in group._check_subgroup_names( visited_names + [self.name], ): yield group_name else: error_chain = _build_error_chain( name, self.name, visited_names, ) raise RepositoryError(_( "Group '{group}' can't be a subgroup of itself. " "({chain})" ).format( group=name, chain=" -> ".join(error_chain), )) if self.name not in visited_names: yield self.name @cached_property def parent_groups(self): for group in self.repo.groups: if self in group.subgroups: yield group @cached_property def immediate_parent_groups(self): for group in self.repo.groups: if self in group.immediate_subgroups: yield group @cached_property def subgroups(self): """ Iterator over all subgroups as group objects. """ for group_name in set(self._check_subgroup_names([self.name])): yield self.repo.get_group(group_name) @cached_property def immediate_subgroups(self): """ Iterator over all immediate subgroups as group objects. """ for group_name in set( list(self.immediate_subgroup_names) + list(self._subgroup_names_from_patterns) ): try: yield self.repo.get_group(group_name) except NoSuchGroup: raise RepositoryError(_( "Group '{group}' has '{subgroup}' listed as a subgroup in groups.py, " "but no such group could be found." ).format( group=self.name, subgroup=group_name, )) bundlewrap-3.8.0/bundlewrap/itemqueue.py000066400000000000000000000100731360562404000204060ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from .deps import ( DummyItem, find_item, prepare_dependencies, remove_item_dependents, remove_dep_from_items, split_items_without_deps, ) from .exceptions import NoSuchItem from .utils.text import mark_for_translation as _ from .utils.ui import io class BaseQueue(object): def __init__(self, items, node_os, node_os_version): self.items_with_deps = prepare_dependencies(items, node_os, node_os_version) self.items_without_deps = [] self._split() self.pending_items = [] def _split(self): self.items_with_deps, self.items_without_deps = \ split_items_without_deps(self.all_items) @property def all_items(self): return self.items_with_deps + self.items_without_deps class ItemQueue(BaseQueue): def item_failed(self, item): """ Called when an item could not be fixed. Yields all items that have been skipped as a result by cascading. """ for skipped_item in self.item_skipped(item, _skipped=False): yield skipped_item def item_fixed(self, item): """ Called when an item has successfully been fixed. """ self.item_ok(item) self._fire_triggers_for_item(item) def item_ok(self, item): """ Called when an item didn't need to be fixed. """ self.pending_items.remove(item) # if an item is applied successfully, all dependencies on it can # be removed from the remaining items self.items_with_deps = remove_dep_from_items( self.items_with_deps, item.id, ) self._split() def item_skipped(self, item, _skipped=True): """ Called when an item has been skipped. Yields all items that have been skipped as a result by cascading. """ self.pending_items.remove(item) if item.cascade_skip: # if an item fails or is skipped, all items that depend on # it shall be removed from the queue self.items_with_deps, skipped_items = remove_item_dependents( self.items_with_deps, item, skipped=_skipped, ) # since we removed them from further processing, we # fake the status of the removed items so they still # show up in the result statistics for skipped_item in skipped_items: if not isinstance(skipped_item, DummyItem): yield skipped_item else: self.items_with_deps = remove_dep_from_items( self.items_with_deps, item.id, ) self._split() def pop(self): """ Gets the next item available for processing and moves it into self.pending_items. Will raise IndexError if no item is available. """ if not self.items_without_deps: raise IndexError item = self.items_without_deps.pop() self.pending_items.append(item) return item def _fire_triggers_for_item(self, item): for triggered_item_id in item.triggers: try: triggered_item = find_item( triggered_item_id, self.all_items, ) triggered_item.has_been_triggered = True except NoSuchItem: io.debug(_( "{item} tried to trigger {triggered_item}, " "but it wasn't available. It must have been skipped previously." ).format( item=item.id, triggered_item=triggered_item_id, )) class ItemTestQueue(BaseQueue): """ A simpler variation of ItemQueue that is used by `bw test` to check for circular dependencies. """ def pop(self): item = self.items_without_deps.pop() self.items_with_deps = remove_dep_from_items(self.items_with_deps, item.id) self._split() return item bundlewrap-3.8.0/bundlewrap/items/000077500000000000000000000000001360562404000171515ustar00rootroot00000000000000bundlewrap-3.8.0/bundlewrap/items/__init__.py000066400000000000000000000700171360562404000212670ustar00rootroot00000000000000# -*- coding: utf-8 -*- """ Note that modules in this package have to use absolute imports because Repository.item_classes loads them as files. """ from __future__ import unicode_literals from copy import copy from datetime import datetime from inspect import cleandoc from os.path import join from textwrap import TextWrapper from bundlewrap.exceptions import BundleError, ItemDependencyError, FaultUnavailable from bundlewrap.utils import cached_property from bundlewrap.utils.dicts import diff_keys, diff_value, hash_statedict, validate_statedict from bundlewrap.utils.text import force_text, mark_for_translation as _ from bundlewrap.utils.text import blue, bold, italic, wrap_question from bundlewrap.utils.ui import io BUILTIN_ITEM_ATTRIBUTES = { 'cascade_skip': None, 'comment': None, 'needed_by': [], 'needs': [], 'preceded_by': [], 'precedes': [], 'error_on_missing_fault': False, 'tags': [], 'triggered': False, 'triggered_by': [], 'triggers': [], 'unless': "", 'when_creating': {}, } wrapper = TextWrapper( break_long_words=False, break_on_hyphens=False, expand_tabs=False, replace_whitespace=False, ) def format_comment(comment): result = "\n\n" for line in wrapper.wrap(cleandoc(comment)): for inlineline in line.split("\n"): result += "# {}\n".format(italic(inlineline)) return result class ItemStatus(object): """ Holds information on a particular Item such as whether it needs fixing and what's broken. """ def __init__(self, cdict, sdict, display_dicts): self.cdict = cdict self.sdict = sdict self.keys_to_fix = [] self.must_be_deleted = (self.sdict is not None and self.cdict is None) self.must_be_created = (self.cdict is not None and self.sdict is None) if not self.must_be_deleted and not self.must_be_created: self.keys_to_fix = diff_keys(cdict, sdict) self.display_cdict, self.display_sdict, self.display_keys_to_fix = display_dicts( copy(cdict), copy(sdict), copy(self.keys_to_fix), ) def __repr__(self): return "".format(self.correct) @property def correct(self): return not self.must_be_deleted and not self.must_be_created and not bool(self.keys_to_fix) def make_normalize(attribute_default): """ This is to ensure you can pass filter() results and such in place of lists and have them converted to the proper type automatically. """ if type(attribute_default) in (list, dict): return type(attribute_default) else: return copy class Item(object): """ A single piece of configuration (e.g. a file, a package, a service). """ BLOCK_CONCURRENT = [] BUNDLE_ATTRIBUTE_NAME = None ITEM_ATTRIBUTES = {} ITEM_TYPE_NAME = None REQUIRED_ATTRIBUTES = [] SKIP_REASON_CMDLINE = 1 SKIP_REASON_DEP_FAILED = 2 SKIP_REASON_FAULT_UNAVAILABLE = 3 SKIP_REASON_INTERACTIVE = 4 SKIP_REASON_INTERACTIVE_ONLY = 5 SKIP_REASON_NO_TRIGGER = 6 SKIP_REASON_SOFTLOCK = 7 SKIP_REASON_UNLESS = 8 SKIP_REASON_DEP_SKIPPED = 9 SKIP_REASON_DESC = { SKIP_REASON_CMDLINE: _("cmdline"), SKIP_REASON_DEP_FAILED: _("dependency failed"), SKIP_REASON_FAULT_UNAVAILABLE: _("Fault unavailable"), SKIP_REASON_INTERACTIVE: _("declined interactively"), SKIP_REASON_INTERACTIVE_ONLY: _("interactive only"), SKIP_REASON_NO_TRIGGER: _("not triggered"), SKIP_REASON_SOFTLOCK: _("soft locked"), SKIP_REASON_UNLESS: _("unless"), SKIP_REASON_DEP_SKIPPED: _("dependency skipped"), } STATUS_OK = 1 STATUS_FIXED = 2 STATUS_FAILED = 3 STATUS_SKIPPED = 4 STATUS_ACTION_SUCCEEDED = 5 WHEN_CREATING_ATTRIBUTES = {} @classmethod def block_concurrent(cls, node_os, node_os_version): """ Return a list of item types that cannot be applied in parallel with this item type. """ return [] def __init__( self, bundle, name, attributes, has_been_triggered=False, skip_validation=False, skip_name_validation=False, ): self.attributes = {} self.bundle = bundle self.has_been_triggered = has_been_triggered self.item_dir = join(bundle.bundle_dir, self.BUNDLE_ATTRIBUTE_NAME) self.item_data_dir = join(bundle.bundle_data_dir, self.BUNDLE_ATTRIBUTE_NAME) self.name = name self.node = bundle.node self.when_creating = {} self._faults_missing_for_attributes = set() self._precedes_items = [] if not skip_validation: if not skip_name_validation: self._validate_name(bundle, name) self.validate_name(bundle, name) self._validate_attribute_names(bundle, self.id, attributes) self._validate_required_attributes(bundle, self.id, attributes) self.validate_attributes(bundle, self.id, attributes) try: attributes = self.patch_attributes(attributes) except FaultUnavailable: self._faults_missing_for_attributes.add(_("unknown")) for attribute_name, attribute_default in BUILTIN_ITEM_ATTRIBUTES.items(): normalize = make_normalize(attribute_default) try: setattr(self, attribute_name, force_text(normalize(attributes.get( attribute_name, copy(attribute_default), )))) except FaultUnavailable: self._faults_missing_for_attributes.add(attribute_name) setattr(self, attribute_name, BUILTIN_ITEM_ATTRIBUTES[attribute_name]) for attribute_name, attribute_default in self.ITEM_ATTRIBUTES.items(): if attribute_name not in BUILTIN_ITEM_ATTRIBUTES: normalize = make_normalize(attribute_default) try: self.attributes[attribute_name] = force_text(normalize(attributes.get( attribute_name, copy(attribute_default), ))) except FaultUnavailable: self._faults_missing_for_attributes.add(attribute_name) for attribute_name, attribute_default in self.WHEN_CREATING_ATTRIBUTES.items(): normalize = make_normalize(attribute_default) try: self.when_creating[attribute_name] = force_text(normalize( attributes.get('when_creating', {}).get( attribute_name, copy(attribute_default), ) )) except FaultUnavailable: self._faults_missing_for_attributes.add('when_creating/' + attribute_name) if self.cascade_skip is None: self.cascade_skip = not (self.unless or self.triggered) if self.id in self.triggers: raise BundleError(_( "item {item} in bundle '{bundle}' can't trigger itself" ).format( bundle=self.bundle.name, item=self.id, )) def __lt__(self, other): return self.id < other.id def __str__(self): return self.id def __repr__(self): return "".format(self.id) def _check_bundle_collisions(self, items): for item in items: if item == self: continue if item.id == self.id: raise BundleError(_( "duplicate definition of {item} in bundles '{bundle1}' and '{bundle2}'" ).format( item=item.id, bundle1=item.bundle.name, bundle2=self.bundle.name, )) def _check_loopback_dependency(self): """ Alerts the user if they have an item depend on itself. """ if self.id in self.needs or self.id in self.needed_by: raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' on node '{node}' cannot depend on itself" ).format( item=self.id, bundle=self.bundle.name, node=self.node.name, )) def _check_redundant_dependencies(self): """ Alerts the user if they have defined a redundant dependency (such as settings 'needs' on a triggered item pointing to the triggering item). """ for dep in self._deps: if self._deps.count(dep) > 1: raise ItemDependencyError(_( "redundant dependency of {item1} in bundle '{bundle}' on {item2}" ).format( bundle=self.bundle.name, item1=self.id, item2=dep, )) @cached_property def cached_cdict(self): if self._faults_missing_for_attributes: self._raise_for_faults() cdict = self.cdict() try: validate_statedict(cdict) except ValueError as e: raise ValueError(_( "{item} from bundle '{bundle}' returned invalid cdict: {msg}" ).format( bundle=self.bundle.name, item=self.id, msg=repr(e), )) return cdict @cached_property def cached_sdict(self): status = self.sdict() try: validate_statedict(status) except ValueError as e: raise ValueError(_( "{item} from bundle '{bundle}' returned invalid status: {msg}" ).format( bundle=self.bundle.name, item=self.id, msg=repr(e), )) return status @cached_property def cached_status(self): return self.get_status() @cached_property def cached_unless_result(self): """ Returns True if 'unless' wants to skip this item. """ if self.unless and (self.ITEM_TYPE_NAME == 'action' or not self.cached_status.correct): unless_result = self.node.run(self.unless, may_fail=True) return unless_result.return_code == 0 else: return False def _triggers_preceding_items(self, interactive=False): """ Preceding items will execute this to figure out if they're triggered. """ if self.cached_unless_result: # 'unless' says we don't need to run return False if self.ITEM_TYPE_NAME == 'action': # so we have an action where 'unless' says it must be run # but the 'interactive' attribute might still override that if self.attributes['interactive'] and not interactive: return False else: return True return not self.cached_status.correct def _prepare_deps(self, items): # merge automatic and user-defined deps self._deps = list(self.needs) + list(self.get_auto_deps(items)) def _raise_for_faults(self): raise FaultUnavailable(_( "{item} on {node} is missing faults " "for these attributes: {attrs} " "(most of the time this means you're missing " "a required key in your .secrets.cfg)" ).format( attrs=", ".join(sorted(self._faults_missing_for_attributes)), item=self.id, node=self.node.name, )) def _skip_with_soft_locks(self, mine, others): """ Returns True/False depending on whether the item should be skipped based on the given set of locks. """ for lock in mine: for selector in lock['items']: if self.covered_by_autoskip_selector(selector): io.debug(_("{item} on {node} whitelisted by lock {lock}").format( item=self.id, lock=lock['id'], node=self.node.name, )) return False for lock in others: for selector in lock['items']: if self.covered_by_autoskip_selector(selector): io.debug(_("{item} on {node} blacklisted by lock {lock}").format( item=self.id, lock=lock['id'], node=self.node.name, )) return True return False def _test(self): with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): if self._faults_missing_for_attributes: self._raise_for_faults() return self.test() @classmethod def _validate_attribute_names(cls, bundle, item_id, attributes): if not isinstance(attributes, dict): raise BundleError(_( "invalid item '{item}' in bundle '{bundle}': not a dict" ).format( item=item_id, bundle=bundle.name, )) invalid_attributes = set(attributes.keys()).difference( set(cls.ITEM_ATTRIBUTES.keys()).union( set(BUILTIN_ITEM_ATTRIBUTES.keys()) ), ) if invalid_attributes: raise BundleError(_( "invalid attribute(s) for '{item}' in bundle '{bundle}': {attrs}" ).format( item=item_id, bundle=bundle.name, attrs=", ".join(invalid_attributes), )) invalid_attributes = set(attributes.get('when_creating', {}).keys()).difference( set(cls.WHEN_CREATING_ATTRIBUTES.keys()) ) if invalid_attributes: raise BundleError(_( "invalid when_creating attribute(s) for '{item}' in bundle '{bundle}': {attrs}" ).format( item=item_id, bundle=bundle.name, attrs=", ".join(invalid_attributes), )) @classmethod def _validate_name(cls, bundle, name): if ":" in name: raise BundleError(_( "invalid name for {type} in bundle '{bundle}': {name} (must not contain colon)" ).format( bundle=bundle.name, name=name, type=cls.ITEM_TYPE_NAME, )) def _validate_required_attributes(cls, bundle, item_id, attributes): missing = [] for attrname in cls.REQUIRED_ATTRIBUTES: if attrname not in attributes: missing.append(attrname) if missing: raise BundleError(_( "{item} in bundle '{bundle}' missing required attribute(s): {attrs}" ).format( item=item_id, bundle=bundle.name, attrs=", ".join(missing), )) def apply( self, autoskip_selector="", autoonly_selector="", my_soft_locks=(), other_peoples_soft_locks=(), interactive=False, interactive_default=True, ): self.node.repo.hooks.item_apply_start( self.node.repo, self.node, self, ) status_code = None status_before = None status_after = None start_time = datetime.now() if not self.covered_by_autoonly_selector(autoonly_selector): io.debug(_( "autoonly does not match {item} on {node}" ).format(item=self.id, node=self.node.name)) status_code = self.STATUS_SKIPPED skip_reason = self.SKIP_REASON_CMDLINE if self.covered_by_autoskip_selector(autoskip_selector): io.debug(_( "autoskip matches {item} on {node}" ).format(item=self.id, node=self.node.name)) status_code = self.STATUS_SKIPPED skip_reason = self.SKIP_REASON_CMDLINE if self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks): status_code = self.STATUS_SKIPPED skip_reason = self.SKIP_REASON_SOFTLOCK for item in self._precedes_items: if item._triggers_preceding_items(interactive=interactive): io.debug(_( "preceding item {item} on {node} has been triggered by {other_item}" ).format(item=self.id, node=self.node.name, other_item=item.id)) self.has_been_triggered = True break else: io.debug(_( "preceding item {item} on {node} has NOT been triggered by {other_item}" ).format(item=self.id, node=self.node.name, other_item=item.id)) if self.triggered and not self.has_been_triggered and status_code is None: io.debug(_( "skipping {item} on {node} because it wasn't triggered" ).format(item=self.id, node=self.node.name)) status_code = self.STATUS_SKIPPED skip_reason = self.SKIP_REASON_NO_TRIGGER if status_code is None and self.cached_unless_result and status_code is None: io.debug(_( "'unless' for {item} on {node} succeeded, not fixing" ).format(item=self.id, node=self.node.name)) status_code = self.STATUS_SKIPPED skip_reason = self.SKIP_REASON_UNLESS if self._faults_missing_for_attributes and status_code is None: if self.error_on_missing_fault: self._raise_for_faults() else: io.debug(_( "skipping {item} on {node} because it is missing faults " "for these attributes: {attrs} " "(most of the time this means you're missing " "a required key in your .secrets.cfg)" ).format( attrs=", ".join(sorted(self._faults_missing_for_attributes)), item=self.id, node=self.node.name, )) status_code = self.STATUS_SKIPPED skip_reason = self.SKIP_REASON_FAULT_UNAVAILABLE if status_code is None: try: status_before = self.cached_status except FaultUnavailable: if self.error_on_missing_fault: self._raise_for_faults() else: io.debug(_( "skipping {item} on {node} because it is missing Faults " "(most of the time this means you're missing " "a required key in your .secrets.cfg)" ).format( item=self.id, node=self.node.name, )) status_code = self.STATUS_SKIPPED skip_reason = self.SKIP_REASON_FAULT_UNAVAILABLE else: if status_before.correct: status_code = self.STATUS_OK if status_code is None: if not interactive: with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): self.fix(status_before) else: if status_before.must_be_created: question_text = _("Doesn't exist. Will be created.") elif status_before.must_be_deleted: question_text = _("Found on node. Will be removed.") else: question_text = self.ask( status_before.display_cdict, status_before.display_sdict, status_before.display_keys_to_fix, ) if self.comment: question_text += format_comment(self.comment) question = wrap_question( self.id, question_text, _("Fix {}?").format(bold(self.id)), prefix="{x} {node} ".format( node=bold(self.node.name), x=blue("?"), ), ) answer = io.ask( question, interactive_default, epilogue="{x} {node}".format( node=bold(self.node.name), x=blue("?"), ), ) if answer: with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): self.fix(status_before) else: status_code = self.STATUS_SKIPPED skip_reason = self.SKIP_REASON_INTERACTIVE if status_code is None: status_after = self.get_status(cached=False) status_code = self.STATUS_FIXED if status_after.correct else self.STATUS_FAILED if status_code == self.STATUS_OK: details = None elif status_code == self.STATUS_SKIPPED: details = skip_reason elif status_before.must_be_created: details = True elif status_before.must_be_deleted: details = False elif status_code == self.STATUS_FAILED: details = status_after.display_keys_to_fix else: details = status_before.display_keys_to_fix self.node.repo.hooks.item_apply_end( self.node.repo, self.node, self, duration=datetime.now() - start_time, status_code=status_code, status_before=status_before, status_after=status_after, ) return (status_code, details) def ask(self, status_should, status_actual, relevant_keys): """ Returns a string asking the user if this item should be implemented. """ result = [] for key in relevant_keys: result.append(diff_value(key, status_actual[key], status_should[key])) return "\n\n".join(result) def cdict(self): """ Return a statedict that describes the target state of this item as configured in the repo. Returning `None` instead means that the item should not exist. MAY be overridden by subclasses. """ return self.attributes def covered_by_autoskip_selector(self, autoskip_selector): """ True if this item should be skipped based on the given selector string (e.g. "tag:foo,bundle:bar"). """ components = [c.strip() for c in autoskip_selector.split(",")] if ( "*" in components or self.id in components or "bundle:{}".format(self.bundle.name) in components or "{}:".format(self.ITEM_TYPE_NAME) in components ): return True for tag in self.tags: if "tag:{}".format(tag) in components: return True return False def covered_by_autoonly_selector(self, autoonly_selector): """ True if this item should be NOT skipped based on the given selector string (e.g. "tag:foo,bundle:bar"). """ if not autoonly_selector: return True components = [c.strip() for c in autoonly_selector.split(",")] if ( self.id in components or "bundle:{}".format(self.bundle.name) in components or "{}:".format(self.ITEM_TYPE_NAME) in components ): return True for tag in self.tags: if "tag:{}".format(tag) in components: return True for depending_item in self._incoming_deps: if ( depending_item.id in components or "bundle:{}".format(depending_item.bundle.name) in components or "{}:".format(depending_item.ITEM_TYPE_NAME) in components ): return True for tag in depending_item.tags: if "tag:{}".format(tag) in components: return True return False def fix(self, status): """ This is supposed to actually implement stuff on the target node. MUST be overridden by subclasses. """ raise NotImplementedError() def get_auto_deps(self, items): """ Return a list of item IDs this item should have dependencies on. Be very careful when using this. There are few circumstances where this is really necessary. Only use this if you really need to examine the actual list of items in order to figure out your dependencies. MAY be overridden by subclasses. """ return [] def get_canned_actions(self): """ Return a dictionary of action definitions (mapping action names to dicts of action attributes, as in bundles). MAY be overridden by subclasses. """ return {} def get_status(self, cached=True): """ Returns an ItemStatus instance describing the current status of the item on the actual node. """ with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): if not cached: del self._cache['cached_sdict'] return ItemStatus(self.cached_cdict, self.cached_sdict, self.display_dicts) def hash(self): return hash_statedict(self.cached_cdict) @property def id(self): if self.ITEM_TYPE_NAME == 'action' and ":" in self.name: # canned actions don't have an "action:" prefix return self.name return "{}:{}".format(self.ITEM_TYPE_NAME, self.name) def verify(self): return self.cached_unless_result, self.cached_status def display_dicts(self, cdict, sdict, keys): """ Given cdict and sdict as implemented above, modify them to better suit interactive presentation. The keys parameter is a list of keys whose values differ between cdict and sdict. MAY be overridden by subclasses. """ return (cdict, sdict, keys) def patch_attributes(self, attributes): """ Allows an item to preprocess the attributes it is initialized with. Returns the modified attributes dictionary. MAY be overridden by subclasses. """ return attributes def preview(self): """ Can return a preview of this item as a Unicode string. BundleWrap will NOT add a trailing newline. MAY be overridden by subclasses. """ raise NotImplementedError() def sdict(self): """ Return a statedict that describes the actual state of this item on the node. Returning `None` instead means that the item does not exist on the node. For the item to validate as correct, the values for all keys in self.cdict() have to match this statedict. MUST be overridden by subclasses. """ raise NotImplementedError() def test(self): """ Used by `bw repo test`. Should do as much as possible to detect what would become a runtime error during a `bw apply`. Files will attempt to render their templates for example. SHOULD be overridden by subclasses """ pass @classmethod def validate_attributes(cls, bundle, item_id, attributes): """ Raises BundleError if something is amiss with the user-specified attributes. SHOULD be overridden by subclasses. """ pass @classmethod def validate_name(cls, bundle, name): """ Raise BundleError if the given name is not valid (e.g. contains invalid characters for this kind of item. MAY be overridden by subclasses. """ pass bundlewrap-3.8.0/bundlewrap/items/actions.py000066400000000000000000000175521360562404000211750ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime from bundlewrap.exceptions import ActionFailure, BundleError from bundlewrap.items import format_comment, Item from bundlewrap.utils import Fault from bundlewrap.utils.ui import io from bundlewrap.utils.text import mark_for_translation as _ from bundlewrap.utils.text import blue, bold, wrap_question class Action(Item): """ A command that is run on a node. """ BUNDLE_ATTRIBUTE_NAME = 'actions' ITEM_ATTRIBUTES = { 'command': None, 'data_stdin': None, 'expected_stderr': None, 'expected_stdout': None, 'expected_return_code': 0, 'interactive': None, } ITEM_TYPE_NAME = 'action' REQUIRED_ATTRIBUTES = ['command'] def _get_result( self, autoonly_selector="", autoskip_selector="", my_soft_locks=(), other_peoples_soft_locks=(), interactive=False, interactive_default=True, ): if self._faults_missing_for_attributes: if self.error_on_missing_fault: self._raise_for_faults() else: io.debug(_( "skipping {item} on {node} because it is missing faults " "for these attributes: {attrs} " "(most of the time this means you're missing " "a required key in your .secrets.cfg)" ).format( attrs=", ".join(sorted(self._faults_missing_for_attributes)), item=self.id, node=self.node.name, )) return (self.STATUS_SKIPPED, self.SKIP_REASON_FAULT_UNAVAILABLE) if not self.covered_by_autoonly_selector(autoonly_selector): io.debug(_( "autoonly does not match {item} on {node}" ).format(item=self.id, node=self.node.name)) return (self.STATUS_SKIPPED, self.SKIP_REASON_CMDLINE) if self.covered_by_autoskip_selector(autoskip_selector): io.debug(_( "autoskip matches {item} on {node}" ).format(item=self.id, node=self.node.name)) return (self.STATUS_SKIPPED, self.SKIP_REASON_CMDLINE) if self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks): return (self.STATUS_SKIPPED, self.SKIP_REASON_SOFTLOCK) if interactive is False and self.attributes['interactive'] is True: return (self.STATUS_SKIPPED, self.SKIP_REASON_INTERACTIVE_ONLY) for item in self._precedes_items: if item._triggers_preceding_items(interactive=interactive): io.debug(_( "preceding item {item} on {node} has been triggered by {other_item}" ).format(item=self.id, node=self.node.name, other_item=item.id)) self.has_been_triggered = True break else: io.debug(_( "preceding item {item} on {node} has NOT been triggered by {other_item}" ).format(item=self.id, node=self.node.name, other_item=item.id)) if self.triggered and not self.has_been_triggered: io.debug(_("skipping {} because it wasn't triggered").format(self.id)) return (self.STATUS_SKIPPED, self.SKIP_REASON_NO_TRIGGER) if self.unless: with io.job(_("{node} {bundle} {item} checking 'unless' condition").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): unless_result = self.bundle.node.run( self.unless, may_fail=True, ) if unless_result.return_code == 0: io.debug(_("{node}:{bundle}:action:{name}: failed 'unless', not running").format( bundle=self.bundle.name, name=self.name, node=self.bundle.node.name, )) return (self.STATUS_SKIPPED, self.SKIP_REASON_UNLESS) question_body = "" if self.attributes['data_stdin'] is not None: question_body += "<" + _("data") + "> | " question_body += self.attributes['command'] if self.comment: question_body += format_comment(self.comment) if ( interactive and self.attributes['interactive'] is not False and not io.ask( wrap_question( self.id, question_body, _("Run action {}?").format( bold(self.name), ), prefix="{x} {node} ".format( node=bold(self.node.name), x=blue("?"), ), ), interactive_default, epilogue="{x} {node}".format( node=bold(self.node.name), x=blue("?"), ), ) ): return (self.STATUS_SKIPPED, self.SKIP_REASON_INTERACTIVE) try: self.run() return (self.STATUS_ACTION_SUCCEEDED, None) except ActionFailure as exc: return (self.STATUS_FAILED, [str(exc)]) def apply(self, *args, **kwargs): return self.get_result(*args, **kwargs) def cdict(self): raise AttributeError(_("actions don't have cdicts")) def get_result(self, *args, **kwargs): self.node.repo.hooks.action_run_start( self.node.repo, self.node, self, ) start_time = datetime.now() status_code = self._get_result(*args, **kwargs) self.node.repo.hooks.action_run_end( self.node.repo, self.node, self, duration=datetime.now() - start_time, status=status_code[0], ) return status_code def run(self): if self.attributes['data_stdin'] is not None: data_stdin = self.attributes['data_stdin'] # Allow users to use either a string/unicode object or raw # bytes -- or Faults. if isinstance(data_stdin, Fault): data_stdin = data_stdin.value if type(data_stdin) is not bytes: data_stdin = data_stdin.encode('UTF-8') else: data_stdin = None with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): result = self.bundle.node.run( self.attributes['command'], data_stdin=data_stdin, may_fail=True, ) if self.attributes['expected_return_code'] is not None and \ not result.return_code == self.attributes['expected_return_code']: raise ActionFailure(_("wrong return code: {}").format(result.return_code)) if self.attributes['expected_stderr'] is not None and \ result.stderr_text != self.attributes['expected_stderr']: raise ActionFailure(_("wrong stderr")) if self.attributes['expected_stdout'] is not None and \ result.stdout_text != self.attributes['expected_stdout']: raise ActionFailure(_("wrong stdout")) return result @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('interactive', None) not in (True, False, None): raise BundleError(_( "invalid interactive setting for action '{item}' in bundle '{bundle}'" ).format(item=item_id, bundle=bundle.name)) def verify(self): if self.unless and self.cached_unless_result: return self.cached_unless_result, None else: raise NotImplementedError bundlewrap-3.8.0/bundlewrap/items/directories.py000066400000000000000000000242751360562404000220510ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from collections import defaultdict from os.path import normpath from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.remote import PathInfo from bundlewrap.utils.text import mark_for_translation as _ from bundlewrap.utils.text import is_subdirectory from bundlewrap.utils.ui import io UNMANAGED_PATH_DESC = _("unmanaged subpaths") def validator_mode(item_id, value): if value is None: return value = str(value) if not value.isdigit(): raise BundleError( _("mode for {item} should be written as digits, got: '{value}'" "").format(item=item_id, value=value) ) for digit in value: if int(digit) > 7 or int(digit) < 0: raise BundleError(_( "invalid mode for {item}: '{value}'" ).format(item=item_id, value=value)) if not len(value) == 3 and not len(value) == 4: raise BundleError(_( "mode for {item} should be three or four digits long, was: '{value}'" ).format(item=item_id, value=value)) ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) ATTRIBUTE_VALIDATORS.update({ 'mode': validator_mode, }) class Directory(Item): """ A directory. """ BUNDLE_ATTRIBUTE_NAME = "directories" ITEM_ATTRIBUTES = { 'group': "root", 'mode': "0755", 'owner': "root", 'purge': False, } ITEM_TYPE_NAME = "directory" def __repr__(self): return "".format( quote(self.name), ) def cdict(self): cdict = { 'paths_to_purge': [], 'type': 'directory', } for optional_attr in ('group', 'mode', 'owner'): if self.attributes[optional_attr] is not None: cdict[optional_attr] = self.attributes[optional_attr] return cdict def display_dicts(self, cdict, sdict, keys): try: keys.remove('paths_to_purge') except ValueError: pass else: keys.append(UNMANAGED_PATH_DESC) cdict[UNMANAGED_PATH_DESC] = cdict['paths_to_purge'] sdict[UNMANAGED_PATH_DESC] = sdict['paths_to_purge'] del cdict['paths_to_purge'] del sdict['paths_to_purge'] return (cdict, sdict, keys) def fix(self, status): if status.must_be_created or 'type' in status.keys_to_fix: # fixing the type fixes everything self._fix_type(status) return for path in status.sdict.get('paths_to_purge', []): self.node.run("rm -rf -- {}".format(quote(path))) for fix_type in ('mode', 'owner', 'group'): if fix_type in status.keys_to_fix: if fix_type == 'group' and 'owner' in status.keys_to_fix: # owner and group are fixed with a single chown continue getattr(self, "_fix_" + fix_type)(status) def _fix_mode(self, status): if self.node.os in self.node.OS_FAMILY_BSD: chmod_command = "chmod {} {}" else: chmod_command = "chmod {} -- {}" self.node.run(chmod_command.format( self.attributes['mode'], quote(self.name), )) if self.node.os not in self.node.OS_FAMILY_BSD: # The bits S_ISUID and S_ISGID are special. POSIX says, # if they are NOT set, the implementation of "chmod" may or # may not clear them. This means that "chmod 0755 foodir" # does not necessarily clear the S_ISUID and/or S_ISGID bit, # while a "chmod 6755 foodir" will always set them. # # GNU coreutils have decided to actually behave this way. # You can't clear a S_ISUID or S_ISGID bit by issuing "chmod # 0755 foodir". You must explicitly do a "chmod u-s foodir" # or "chmod g-s foodir". # # This does not apply to regular files, nor to the sticky # bit (S_ISVTX). Also, FreeBSD, NetBSD, and OpenBSD do clear # these bits on "chmod 0755 foodir". # We only want to run these extra commands if we have found # one of the two special bits to be set. if status.sdict is not None and int(status.sdict['mode'], 8) & 0o6000: if not int(self.attributes['mode'], 8) & 0o4000: self.node.run("chmod u-s {}".format(quote(self.name))) if not int(self.attributes['mode'], 8) & 0o2000: self.node.run("chmod g-s {}".format(quote(self.name))) def _fix_owner(self, status): group = self.attributes['group'] or "" if group: group = ":" + quote(group) if self.node.os in self.node.OS_FAMILY_BSD: command = "chown {}{} {}" else: command = "chown {}{} -- {}" self.node.run(command.format( quote(self.attributes['owner'] or ""), group, quote(self.name), )) _fix_group = _fix_owner def _fix_type(self, status): self.node.run("rm -rf -- {}".format(quote(self.name))) self.node.run("mkdir -p -- {}".format(quote(self.name))) if self.attributes['mode']: self._fix_mode(status) if self.attributes['owner'] or self.attributes['group']: self._fix_owner(status) def _get_paths_to_purge(self): result = self.node.run("find {} -maxdepth 1 -print0".format(quote(self.name))) for line in result.stdout.split(b"\0"): line = line.decode('utf-8') for item_type in ('directory', 'file', 'symlink'): for item in self.node.items: if ( item.id == "{}:{}".format(item_type, line) or item.id.startswith("{}:{}/".format(item_type, line)) ): break else: continue break else: # this file or directory is not managed io.debug(( "found unmanaged path below {dirpath} on {node}, " "marking for removal: {path}" ).format( dirpath=self.name, node=self.node.name, path=line, )) yield line def get_auto_deps(self, items): deps = [] for item in items: if item == self: continue if (( item.ITEM_TYPE_NAME == "file" and is_subdirectory(item.name, self.name) ) or ( item.ITEM_TYPE_NAME in ("file", "symlink") and item.name == self.name )): raise BundleError(_( "{item1} (from bundle '{bundle1}') blocking path to " "{item2} (from bundle '{bundle2}')" ).format( item1=item.id, bundle1=item.bundle.name, item2=self.id, bundle2=self.bundle.name, )) elif item.ITEM_TYPE_NAME == "user" and item.name == self.attributes['owner']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME == "group" and item.name == self.attributes['group']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME in ("directory", "symlink"): if is_subdirectory(item.name, self.name): deps.append(item.id) return deps def sdict(self): path_info = PathInfo(self.node, self.name) if not path_info.exists: return None else: paths_to_purge = [] if self.attributes['purge']: paths_to_purge = list(self._get_paths_to_purge()) return { 'type': 'directory' if path_info.is_directory else path_info.stat['type'], 'mode': path_info.mode, 'owner': path_info.owner, 'group': path_info.group, 'paths_to_purge': paths_to_purge, } def patch_attributes(self, attributes): if 'mode' in attributes and attributes['mode'] is not None: attributes['mode'] = str(attributes['mode']).zfill(4) if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD: # BSD doesn't have a root group, so we have to use a # different default value here attributes['group'] = 'wheel' return attributes @classmethod def validate_attributes(cls, bundle, item_id, attributes): for key, value in attributes.items(): ATTRIBUTE_VALIDATORS[key](item_id, value) @classmethod def validate_name(cls, bundle, name): if normpath(name) != name: raise BundleError(_( "'{path}' is an invalid directory path, " "should be '{normpath}' (bundle '{bundle}')" ).format( bundle=bundle.name, normpath=normpath(name), path=name, )) bundlewrap-3.8.0/bundlewrap/items/files.py000066400000000000000000000427051360562404000206350ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from base64 import b64decode from collections import defaultdict from contextlib import contextmanager from datetime import datetime from os.path import basename, dirname, exists, join, normpath from pipes import quote from subprocess import call from sys import exc_info from traceback import format_exception from jinja2 import Environment, FileSystemLoader from mako.lookup import TemplateLookup from mako.template import Template from bundlewrap.exceptions import BundleError, FaultUnavailable, TemplateError from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item from bundlewrap.items.directories import validator_mode from bundlewrap.utils import cached_property, hash_local_file, sha1, tempfile from bundlewrap.utils.remote import PathInfo from bundlewrap.utils.text import force_text, mark_for_translation as _ from bundlewrap.utils.text import is_subdirectory from bundlewrap.utils.ui import io DIFF_MAX_FILE_SIZE = 1024 * 1024 * 5 # bytes def content_processor_base64(item): # .encode() is required for pypy3 only return b64decode(item._template_content.encode()) def content_processor_jinja2(item): loader = FileSystemLoader(searchpath=[item.item_data_dir, item.item_dir]) env = Environment(loader=loader) template = env.from_string(item._template_content) io.debug("{node}:{bundle}:{item}: rendering with Jinja2...".format( bundle=item.bundle.name, item=item.id, node=item.node.name, )) start = datetime.now() try: content = template.render( item=item, bundle=item.bundle, node=item.node, repo=item.node.repo, **item.attributes['context'] ) except FaultUnavailable: raise except Exception as e: io.debug("".join(format_exception(*exc_info()))) raise TemplateError(_( "Error while rendering template for {node}:{bundle}:{item}: {error}" ).format( bundle=item.bundle.name, error=e, item=item.id, node=item.node.name, )) duration = datetime.now() - start io.debug("{node}:{bundle}:{item}: rendered in {time}s".format( bundle=item.bundle.name, item=item.id, node=item.node.name, time=duration.total_seconds(), )) return content.encode(item.attributes['encoding']) def content_processor_mako(item): template = Template( item._template_content.encode('utf-8'), input_encoding='utf-8', lookup=TemplateLookup(directories=[item.item_data_dir, item.item_dir]), output_encoding=item.attributes['encoding'], ) io.debug("{node}:{bundle}:{item}: rendering with Mako...".format( bundle=item.bundle.name, item=item.id, node=item.node.name, )) start = datetime.now() try: content = template.render( item=item, bundle=item.bundle, node=item.node, repo=item.node.repo, **item.attributes['context'] ) except FaultUnavailable: raise except Exception as e: io.debug("".join(format_exception(*exc_info()))) if isinstance(e, NameError) and str(e) == "Undefined": # Mako isn't very verbose here. Try to give a more useful # error message - even though we can't pinpoint the excat # location of the error. :/ e = _("Undefined variable (look for '${...}')") elif isinstance(e, KeyError): e = _("KeyError: {}").format(str(e)) raise TemplateError(_( "Error while rendering template for {node}:{bundle}:{item}: {error}" ).format( bundle=item.bundle.name, error=e, item=item.id, node=item.node.name, )) duration = datetime.now() - start io.debug("{node}:{bundle}:{item}: rendered in {time}s".format( bundle=item.bundle.name, item=item.id, node=item.node.name, time=duration.total_seconds(), )) return content def content_processor_text(item): return item._template_content.encode(item.attributes['encoding']) CONTENT_PROCESSORS = { 'any': lambda item: b"", 'base64': content_processor_base64, 'binary': None, 'jinja2': content_processor_jinja2, 'mako': content_processor_mako, 'text': content_processor_text, } def get_remote_file_contents(node, path): """ Returns the contents of the given path as a string. """ with tempfile() as tmp_file: node.download(path, tmp_file) with open(tmp_file, 'rb') as f: content = f.read() return content def validator_content_type(item_id, value): if value not in CONTENT_PROCESSORS: raise BundleError(_( "invalid content_type for {item}: '{value}'" ).format(item=item_id, value=value)) ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) ATTRIBUTE_VALIDATORS.update({ 'content_type': validator_content_type, 'mode': validator_mode, }) class File(Item): """ A file. """ BUNDLE_ATTRIBUTE_NAME = "files" ITEM_ATTRIBUTES = { 'content': None, 'content_type': 'text', 'context': None, 'delete': False, 'encoding': "utf-8", 'group': "root", 'mode': "0644", 'owner': "root", 'source': None, 'verify_with': None, } ITEM_TYPE_NAME = "file" def __repr__(self): return "".format(quote(self.name)) @property def _template_content(self): if self.attributes['source'] is not None: filename = join(self.item_data_dir, self.attributes['source']) if not exists(filename): filename = join(self.item_dir, self.attributes['source']) with open(filename, 'rb') as f: return force_text(f.read()) else: return force_text(self.attributes['content']) @cached_property def content(self): return CONTENT_PROCESSORS[self.attributes['content_type']](self) @cached_property def content_hash(self): if self.attributes['content_type'] == 'binary': return hash_local_file(self.template) else: return sha1(self.content) @cached_property def template(self): data_template = join(self.item_data_dir, self.attributes['source']) if exists(data_template): return data_template return join(self.item_dir, self.attributes['source']) def cdict(self): if self.attributes['delete']: return None cdict = {'type': 'file'} if self.attributes['content_type'] != 'any': cdict['content_hash'] = self.content_hash for optional_attr in ('group', 'mode', 'owner'): if self.attributes[optional_attr] is not None: cdict[optional_attr] = self.attributes[optional_attr] return cdict def fix(self, status): if status.must_be_created or status.must_be_deleted or 'type' in status.keys_to_fix: self._fix_type(status) else: for fix_type in ('content_hash', 'mode', 'owner', 'group'): if fix_type in status.keys_to_fix: if fix_type == 'group' and \ 'owner' in status.keys_to_fix: # owner and group are fixed with a single chown continue if fix_type in ('mode', 'owner', 'group') and \ 'content' in status.keys_to_fix: # fixing content implies settings mode and owner/group continue getattr(self, "_fix_" + fix_type)(status) def _fix_content_hash(self, status): with self._write_local_file() as local_path: self.node.upload( local_path, self.name, mode=self.attributes['mode'], owner=self.attributes['owner'] or "", group=self.attributes['group'] or "", may_fail=True, ) def _fix_mode(self, status): if self.node.os in self.node.OS_FAMILY_BSD: command = "chmod {} {}" else: command = "chmod {} -- {}" self.node.run(command.format( self.attributes['mode'], quote(self.name), )) def _fix_owner(self, status): group = self.attributes['group'] or "" if group: group = ":" + quote(group) if self.node.os in self.node.OS_FAMILY_BSD: command = "chown {}{} {}" else: command = "chown {}{} -- {}" self.node.run(command.format( quote(self.attributes['owner'] or ""), group, quote(self.name), )) _fix_group = _fix_owner def _fix_type(self, status): if status.sdict: self.node.run("rm -rf -- {}".format(quote(self.name))) if not status.must_be_deleted: self.node.run("mkdir -p -- {}".format(quote(dirname(self.name)))) self._fix_content_hash(status) def get_auto_deps(self, items): deps = [] for item in items: if item.ITEM_TYPE_NAME == "file" and is_subdirectory(item.name, self.name): raise BundleError(_( "{item1} (from bundle '{bundle1}') blocking path to " "{item2} (from bundle '{bundle2}')" ).format( item1=item.id, bundle1=item.bundle.name, item2=self.id, bundle2=self.bundle.name, )) elif item.ITEM_TYPE_NAME == "user" and item.name == self.attributes['owner']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME == "group" and item.name == self.attributes['group']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME in ("directory", "symlink"): if is_subdirectory(item.name, self.name): deps.append(item.id) return deps def sdict(self): path_info = PathInfo(self.node, self.name) if not path_info.exists: return None else: return { 'type': 'file' if path_info.is_file else path_info.stat['type'], 'content_hash': path_info.sha1 if path_info.is_file else None, 'mode': path_info.mode, 'owner': path_info.owner, 'group': path_info.group, 'size': path_info.size, } def display_dicts(self, cdict, sdict, keys): if ( 'content_hash' in keys and self.attributes['content_type'] not in ('base64', 'binary') and sdict['size'] < DIFF_MAX_FILE_SIZE and len(self.content) < DIFF_MAX_FILE_SIZE and PathInfo(self.node, self.name).is_text_file ): keys.remove('content_hash') keys.append('content') del cdict['content_hash'] del sdict['content_hash'] cdict['content'] = self.content sdict['content'] = get_remote_file_contents(self.node, self.name) if 'type' in keys: try: keys.remove('content_hash') except ValueError: pass return (cdict, sdict, keys) def patch_attributes(self, attributes): if ( 'content' not in attributes and 'source' not in attributes and attributes.get('content_type', 'text') != 'any' and attributes.get('delete', False) is False ): attributes['source'] = basename(self.name) if 'context' not in attributes: attributes['context'] = {} if 'mode' in attributes and attributes['mode'] is not None: attributes['mode'] = str(attributes['mode']).zfill(4) if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD: # BSD doesn't have a root group, so we have to use a # different default value here attributes['group'] = 'wheel' return attributes def preview(self): if ( self.attributes['content_type'] in ('any', 'base64', 'binary') or self.attributes['delete'] is True ): raise ValueError return self.content.decode(self.attributes['encoding']) def test(self): if self.attributes['source'] and not exists(self.template): raise BundleError(_( "{item} from bundle '{bundle}' refers to missing " "file '{path}' in its 'source' attribute" ).format( bundle=self.bundle.name, item=self.id, path=self.template, )) if not self.attributes['delete'] and not self.attributes['content_type'] == 'any': with self._write_local_file(): pass @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('delete', False): for attr in attributes.keys(): if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): raise BundleError(_( "{item} from bundle '{bundle}' cannot have other " "attributes besides 'delete'" ).format(item=item_id, bundle=bundle.name)) if 'content' in attributes and 'source' in attributes: raise BundleError(_( "{item} from bundle '{bundle}' cannot have both 'content' and 'source'" ).format(item=item_id, bundle=bundle.name)) if 'content' in attributes and attributes.get('content_type') == 'binary': raise BundleError(_( "{item} from bundle '{bundle}' cannot have binary inline content " "(use content_type 'base64' instead)" ).format(item=item_id, bundle=bundle.name)) if 'encoding' in attributes and attributes.get('content_type') in ( 'any', 'base64', 'binary', ): raise BundleError(_( "content_type of {item} from bundle '{bundle}' cannot provide different encoding " "(remove the 'encoding' attribute)" ).format(item=item_id, bundle=bundle.name)) if ( attributes.get('content_type', None) == "any" and ( 'content' in attributes or 'encoding' in attributes or 'source' in attributes ) ): raise BundleError(_( "{item} from bundle '{bundle}' with content_type 'any' " "must not define 'content', 'encoding' and/or 'source'" ).format(item=item_id, bundle=bundle.name)) for key, value in attributes.items(): ATTRIBUTE_VALIDATORS[key](item_id, value) @classmethod def validate_name(cls, bundle, name): if normpath(name) == "/": raise BundleError(_("'/' cannot be a file")) if normpath(name) != name: raise BundleError(_( "'{path}' is an invalid file path, should be '{normpath}' (bundle '{bundle}')" ).format( bundle=bundle.name, normpath=normpath(name), path=name, )) @contextmanager def _write_local_file(self): """ Makes the file contents available at the returned temporary path and performs local verification if necessary or requested. The calling method is responsible for cleaning up the file at the returned path (only if not a binary). """ with tempfile() as tmp_file: if self.attributes['content_type'] == 'binary': local_path = self.template else: local_path = tmp_file with open(local_path, 'wb') as f: f.write(self.content) if self.attributes['verify_with']: cmd = self.attributes['verify_with'].format(quote(local_path)) io.debug("calling local verify command for {i}: {c}".format(c=cmd, i=self.id)) if call(cmd, shell=True) == 0: io.debug("{i} passed local validation".format(i=self.id)) else: raise BundleError(_( "{i} failed local validation using: {c}" ).format(c=cmd, i=self.id)) yield local_path bundlewrap-3.8.0/bundlewrap/items/groups.py000066400000000000000000000100261360562404000210410ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from bundlewrap.exceptions import BundleError from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item from bundlewrap.items.users import _USERNAME_VALID_CHARACTERS from bundlewrap.utils.text import mark_for_translation as _ def _parse_group_line(line): """ Parses a line from /etc/group and returns the information as a dictionary. """ result = dict(zip( ('groupname', 'password', 'gid', 'members'), line.strip().split(":"), )) result['gid'] = result['gid'] del result['password'] # nothing useful here return result class Group(Item): """ A group. """ BUNDLE_ATTRIBUTE_NAME = "groups" ITEM_ATTRIBUTES = { 'delete': False, 'gid': None, } ITEM_TYPE_NAME = "group" REQUIRED_ATTRIBUTES = [] @classmethod def block_concurrent(cls, node_os, node_os_version): # https://github.com/bundlewrap/bundlewrap/issues/367 if node_os == 'openbsd': return [cls.ITEM_TYPE_NAME] else: return [] def __repr__(self): return "".format(self.name) def cdict(self): if self.attributes['delete']: return None cdict = {} if self.attributes.get('gid') is not None: cdict['gid'] = self.attributes['gid'] return cdict def fix(self, status): if status.must_be_created: if self.attributes['gid'] is None: command = "groupadd {}".format(self.name) else: command = "groupadd -g {gid} {groupname}".format( gid=self.attributes['gid'], groupname=self.name, ) self.node.run(command, may_fail=True) elif status.must_be_deleted: self.node.run("groupdel {}".format(self.name), may_fail=True) else: self.node.run( "groupmod -g {gid} {groupname}".format( gid=self.attributes['gid'], groupname=self.name, ), may_fail=True, ) def sdict(self): # verify content of /etc/group grep_result = self.node.run( "grep -e '^{}:' /etc/group".format(self.name), may_fail=True, ) if grep_result.return_code != 0: return None else: return _parse_group_line(grep_result.stdout_text) def patch_attributes(self, attributes): if isinstance(attributes.get('gid'), int): attributes['gid'] = str(attributes['gid']) return attributes @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('delete', False): for attr in attributes.keys(): if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): raise BundleError(_( "{item} from bundle '{bundle}' cannot have other " "attributes besides 'delete'" ).format(item=item_id, bundle=bundle.name)) @classmethod def validate_name(cls, bundle, name): for char in name: if char not in _USERNAME_VALID_CHARACTERS: raise BundleError(_( "Invalid character in group name '{name}': {char} (bundle '{bundle}')" ).format( char=char, bundle=bundle.name, name=name, )) if name.endswith("_") or name.endswith("-"): raise BundleError(_( "Group name '{name}' must not end in dash or underscore (bundle '{bundle}')" ).format( bundle=bundle.name, name=name, )) if len(name) > 30: raise BundleError(_( "Group name '{name}' is longer than 30 characters (bundle '{bundle}')" ).format( bundle=bundle.name, name=name, )) bundlewrap-3.8.0/bundlewrap/items/kubernetes.py000066400000000000000000000364471360562404000217100ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from abc import ABCMeta import json from os.path import exists, join import re from bundlewrap.exceptions import BundleError from bundlewrap.metadata import MetadataJSONEncoder from bundlewrap.operations import run_local from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item from bundlewrap.items.files import content_processor_jinja2, content_processor_mako from bundlewrap.utils.dicts import merge_dict, reduce_dict from bundlewrap.utils.ui import io from bundlewrap.utils.text import force_text, mark_for_translation as _ from six import add_metaclass import yaml def log_error(run_result): if run_result.return_code != 0: io.debug(run_result.stdout.decode('utf-8')) io.debug(run_result.stderr.decode('utf-8')) @add_metaclass(ABCMeta) class KubernetesItem(Item): """ A generic Kubernetes item. """ ITEM_ATTRIBUTES = { 'delete': False, 'encoding': "utf-8", # required by content processors 'manifest': None, 'manifest_file': None, 'manifest_processor': None, 'context': None, } KIND = None KUBERNETES_APIVERSION = "v1" NAME_REGEX = r"^[a-z0-9-\.]{1,253}/[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def __init__(self, *args, **kwargs): super(KubernetesItem, self).__init__(*args, **kwargs) self.item_data_dir = join(self.bundle.bundle_data_dir, "manifests") self.item_dir = join(self.bundle.bundle_dir, "manifests") @property def _template_content(self): # required by content processors filename = join(self.item_data_dir, self.attributes['manifest_file']) if not exists(filename): filename = join(self.item_dir, self.attributes['manifest_file']) with open(filename, 'rb') as f: return force_text(f.read()) def cdict(self): if self.attributes['delete']: return None else: return {'manifest': self.manifest} def fix(self, status): if status.must_be_deleted: result = run_local(self._kubectl + ["delete", self.KIND, self.resource_name]) log_error(result) else: result = run_local( self._kubectl + ["apply", "-f", "-"], data_stdin=self.manifest.encode('utf-8'), ) log_error(result) def get_auto_deps(self, items, _secrets=True): deps = [] for item in items: if ( item.ITEM_TYPE_NAME == 'k8s_namespace' and item.name == self.namespace ): if item.attributes['delete'] and not self.attributes['delete']: raise BundleError(_( "{item} (bundle '{bundle}' on {node}) " "cannot exist in namespace marked for deletion" ).format( item=self.id, bundle=self.bundle.name, node=self.node.name, )) deps.append(item.id) elif ( _secrets and item.ITEM_TYPE_NAME == 'k8s_secret' and item.namespace == self.namespace ): deps.append(item.id) return deps @property def _kubectl(self): cmdline = [ "kubectl", "--context={}".format(self.node.kubectl_context), ] if self.namespace: cmdline.append("--namespace={}".format(self.namespace)) return cmdline @property def _manifest_dict(self): if self.attributes['manifest_processor'] == 'jinja2': content_processor = content_processor_jinja2 elif self.attributes['manifest_processor'] == 'mako': content_processor = content_processor_mako else: content_processor = lambda item: item._template_content.encode('utf-8') if self.attributes['manifest'] is not None or self.attributes['manifest_file'] is None: user_manifest = self.attributes['manifest'] or {} elif ( self.attributes['manifest_file'].endswith(".yaml") or self.attributes['manifest_file'].endswith(".yml") ): user_manifest = yaml.load(content_processor(self), Loader=yaml.SafeLoader) elif self.attributes['manifest_file'].endswith(".json"): user_manifest = json.loads(content_processor(self)) merged_manifest = merge_dict( { 'apiVersion': self.KUBERNETES_APIVERSION, 'kind': self.KIND, 'metadata': { 'name': self.name.split("/")[-1], }, }, user_manifest, ) if merged_manifest['apiVersion'] is None: raise BundleError(_( "{item} from bundle '{bundle}' needs an apiVersion in its manifest" ).format(item=self.id, bundle=self.bundle.name)) return merged_manifest @property def manifest(self): return json.dumps( self._manifest_dict, cls=MetadataJSONEncoder, indent=4, sort_keys=True, ) @property def namespace(self): return self.name.split("/", 1)[0] or None def patch_attributes(self, attributes): if 'context' not in attributes: attributes['context'] = {} return attributes def preview(self): if self.attributes['delete'] is True: raise ValueError return yaml.dump(json.loads(self.manifest), default_flow_style=False) @property def resource_name(self): return self._manifest_dict['metadata']['name'] def sdict(self): result = run_local(self._kubectl + ["get", "-o", "json", self.KIND, self.resource_name]) if result.return_code == 0: full_json_response = json.loads(result.stdout.decode('utf-8')) if full_json_response.get("status", {}).get("phase") == "Terminating": # this resource is currently being deleted, consider it gone return None return {'manifest': json.dumps(reduce_dict( full_json_response, json.loads(self.manifest), ), indent=4, sort_keys=True)} elif result.return_code == 1 and "NotFound" in result.stderr.decode('utf-8'): return None else: io.debug(result.stdout.decode('utf-8')) io.debug(result.stderr.decode('utf-8')) raise RuntimeError(_("error getting state of {}, check `bw --debug`".format(self.id))) @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('delete', False): for attr in attributes.keys(): if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): raise BundleError(_( "{item} from bundle '{bundle}' cannot have other " "attributes besides 'delete'" ).format(item=item_id, bundle=bundle.name)) if attributes.get('manifest') and attributes.get('manifest_file'): raise BundleError(_( "{item} from bundle '{bundle}' cannot have both 'manifest' and 'manifest_file'" ).format(item=item_id, bundle=bundle.name)) if attributes.get('manifest_processor') not in (None, 'jinja2', 'mako'): raise BundleError(_( "{item} from bundle '{bundle}' has invalid manifest_processor " "(must be 'jinja2' or 'mako')" ).format(item=item_id, bundle=bundle.name)) @classmethod def validate_name(cls, bundle, name): if not cls.NAME_REGEX_COMPILED.match(name): raise BundleError(_( "name for {item_type}:{name} (bundle '{bundle}') " "on {node} doesn't match {regex}" ).format( item_type=cls.ITEM_TYPE_NAME, name=name, bundle=bundle.name, node=bundle.node.name, regex=cls.NAME_REGEX, )) class KubernetesRawItem(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_raw" ITEM_TYPE_NAME = "k8s_raw" KUBERNETES_APIVERSION = None NAME_REGEX = r"^([a-z0-9-\.]{1,253})?/[a-zA-Z0-9-\.]{1,253}/[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def _check_bundle_collisions(self, items): super(KubernetesRawItem, self)._check_bundle_collisions(items) for item in items: if item == self or not isinstance(item, KubernetesItem): continue if item.KIND == self.KIND and item.resource_name == self.resource_name: raise BundleError(_( "duplicate definition of {item} (from bundle {bundle}) " "as {item2} (from bundle {bundle2}) on {node}" ).format( item=self.id, bundle=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) def get_auto_deps(self, items): deps = super(KubernetesRawItem, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME == 'k8s_crd' and item._manifest_dict.get('spec', {}).get('names', {}).get('kind') == self.KIND ): deps.append(item.id) return deps @property def KIND(self): return self.name.split("/", 2)[1] class KubernetesClusterRole(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_clusterroles" KIND = "ClusterRole" KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1" ITEM_TYPE_NAME = "k8s_clusterrole" NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) @property def namespace(self): return None class KubernetesClusterRoleBinding(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_clusterrolebindings" KIND = "ClusterRoleBinding" KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1" ITEM_TYPE_NAME = "k8s_clusterrolebinding" NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def get_auto_deps(self, items): deps = super(KubernetesClusterRoleBinding, self).get_auto_deps(items) deps.append("k8s_clusterrole:") return deps @property def namespace(self): return None class KubernetesConfigMap(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_configmaps" KIND = "ConfigMap" KUBERNETES_APIVERSION = "v1" ITEM_TYPE_NAME = "k8s_configmap" class KubernetesCronJob(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_cronjobs" KIND = "CronJob" KUBERNETES_APIVERSION = "batch/v1beta1" ITEM_TYPE_NAME = "k8s_cronjob" class KubernetesCustomResourceDefinition(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_crd" KIND = "CustomResourceDefinition" KUBERNETES_APIVERSION = "apiextensions.k8s.io/v1beta1" ITEM_TYPE_NAME = "k8s_crd" NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def get_auto_deps(self, items): return [] @property def namespace(self): return None class KubernetesDaemonSet(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_daemonsets" KIND = "DaemonSet" KUBERNETES_APIVERSION = "v1" ITEM_TYPE_NAME = "k8s_daemonset" def get_auto_deps(self, items): deps = super(KubernetesDaemonSet, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME in ('k8s_pvc', 'k8s_configmap') and item.namespace == self.namespace ): deps.append(item.id) return deps class KubernetesDeployment(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_deployments" KIND = "Deployment" KUBERNETES_APIVERSION = "extensions/v1beta1" ITEM_TYPE_NAME = "k8s_deployment" def get_auto_deps(self, items): deps = super(KubernetesDeployment, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME in ('k8s_pvc', 'k8s_configmap') and item.namespace == self.namespace ): deps.append(item.id) return deps class KubernetesIngress(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_ingresses" KIND = "Ingress" KUBERNETES_APIVERSION = "extensions/v1beta1" ITEM_TYPE_NAME = "k8s_ingress" def get_auto_deps(self, items): deps = super(KubernetesIngress, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME == 'k8s_service' and item.namespace == self.namespace ): deps.append(item.id) return deps class KubernetesNamespace(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_namespaces" KIND = "Namespace" KUBERNETES_APIVERSION = "v1" ITEM_TYPE_NAME = "k8s_namespace" NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def get_auto_deps(self, items): return [] class KubernetesNetworkPolicy(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_networkpolicies" KIND = "NetworkPolicy" KUBERNETES_APIVERSION = "networking.k8s.io/v1" ITEM_TYPE_NAME = "k8s_networkpolicy" NAME_REGEX = r"^([a-z0-9-\.]{1,253})?/[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) class KubernetesPersistentVolumeClain(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_pvc" KIND = "PersistentVolumeClaim" KUBERNETES_APIVERSION = "v1" ITEM_TYPE_NAME = "k8s_pvc" class KubernetesRole(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_roles" KIND = "Role" KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1" ITEM_TYPE_NAME = "k8s_role" class KubernetesRoleBinding(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_rolebindings" KIND = "RoleBinding" KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1" ITEM_TYPE_NAME = "k8s_rolebinding" def get_auto_deps(self, items): deps = super(KubernetesRoleBinding, self).get_auto_deps(items) deps.append("k8s_role:") return deps class KubernetesSecret(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_secrets" KIND = "Secret" KUBERNETES_APIVERSION = "v1" ITEM_TYPE_NAME = "k8s_secret" def get_auto_deps(self, items): return super(KubernetesSecret, self).get_auto_deps(items, _secrets=False) class KubernetesService(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_services" KIND = "Service" KUBERNETES_APIVERSION = "v1" ITEM_TYPE_NAME = "k8s_service" class KubernetesServiceAccount(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_serviceaccounts" KIND = "ServiceAccount" KUBERNETES_APIVERSION = "v1" ITEM_TYPE_NAME = "k8s_serviceaccount" class KubernetesStatefulSet(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_statefulsets" KIND = "StatefulSet" KUBERNETES_APIVERSION = "apps/v1" ITEM_TYPE_NAME = "k8s_statefulset" def get_auto_deps(self, items): deps = super(KubernetesStatefulSet, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME in ('k8s_pvc', 'k8s_configmap') and item.namespace == self.namespace ): deps.append(item.id) return deps bundlewrap-3.8.0/bundlewrap/items/pkg.py000066400000000000000000000045361360562404000203140ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from abc import ABCMeta, abstractmethod from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ from six import add_metaclass @add_metaclass(ABCMeta) class Pkg(Item): """ A generic package. """ ITEM_ATTRIBUTES = { 'installed': True, } _pkg_install_cache = {} @classmethod def block_concurrent(cls, node_os, node_os_version): return [cls.ITEM_TYPE_NAME] def __repr__(self): return "<{} name:{} installed:{}>".format( self.ITEM_TYPE_NAME, self.name, self.attributes['installed'], ) def fix(self, status): try: self._pkg_install_cache.get(self.node.name, set()).remove(self.id) except KeyError: pass if self.attributes['installed'] is False: self.pkg_remove() else: self.pkg_install() @abstractmethod def pkg_all_installed(self): raise NotImplementedError @abstractmethod def pkg_install(self): raise NotImplementedError @abstractmethod def pkg_installed(self): raise NotImplementedError def pkg_installed_cached(self): cache = self._pkg_install_cache.setdefault(self.node.name, set()) if not cache: cache.add(None) # make sure we don't run into this if again for pkgid in self.pkg_all_installed(): cache.add(pkgid) if self.pkg_in_cache(self.id, cache): return True return self.pkg_installed() @staticmethod def pkg_in_cache(pkgid, cache): """ pkg_apt needs to override this for multiarch support. """ return pkgid in cache @abstractmethod def pkg_remove(self): raise NotImplementedError def sdict(self): return { 'installed': self.pkg_installed_cached(), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('installed', True), bool): raise BundleError(_( "expected boolean for 'installed' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/pkg_apt.py000066400000000000000000000046421360562404000211560ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items.pkg import Pkg from bundlewrap.utils.text import mark_for_translation as _ class AptPkg(Pkg): """ A package installed by apt. """ BUNDLE_ATTRIBUTE_NAME = "pkg_apt" ITEM_TYPE_NAME = "pkg_apt" WHEN_CREATING_ATTRIBUTES = { 'start_service': True, } def pkg_all_installed(self): result = self.node.run("dpkg -l | grep '^ii'") for line in result.stdout.decode('utf-8').strip().split("\n"): pkg_name = line[4:].split()[0].replace(":", "_") yield "{}:{}".format(self.ITEM_TYPE_NAME, pkg_name) def pkg_install(self): runlevel = "" if self.when_creating['start_service'] else "RUNLEVEL=1 " self.node.run( runlevel + "DEBIAN_FRONTEND=noninteractive " "apt-get -qy -o Dpkg::Options::=--force-confold --no-install-recommends " "install {}".format(quote(self.name.replace("_", ":"))), may_fail=True, ) def pkg_installed(self): result = self.node.run( "dpkg -s {} | grep '^Status: '".format(quote(self.name.replace("_", ":"))), may_fail=True, ) return result.return_code == 0 and " installed" in result.stdout_text @staticmethod def pkg_in_cache(pkgid, cache): pkgtype, pkgname = pkgid.split(":") if "_" in pkgname: return pkgid in cache else: for cached_pkgid in cache: if cached_pkgid is None: continue if cached_pkgid == pkgid or cached_pkgid.startswith(pkgid + ":"): return True return False def pkg_remove(self): self.node.run( "DEBIAN_FRONTEND=noninteractive " "apt-get -qy purge {}".format(quote(self.name.replace("_", ":"))) ) @classmethod def validate_attributes(cls, bundle, item_id, attributes): super(AptPkg, cls).validate_attributes(bundle, item_id, attributes) if not isinstance(attributes.get('when_creating', {}).get('start_service', True), bool): raise BundleError(_( "expected boolean for 'start_service' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/pkg_dnf.py000066400000000000000000000020611360562404000211320ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.items.pkg import Pkg class DnfPkg(Pkg): """ A package installed by dnf. """ BUNDLE_ATTRIBUTE_NAME = "pkg_dnf" ITEM_TYPE_NAME = "pkg_dnf" @classmethod def block_concurrent(cls, node_os, node_os_version): return ["pkg_dnf", "pkg_yum"] def pkg_all_installed(self): result = self.node.run("dnf -d0 -e0 list installed") for line in result.stdout.decode('utf-8').strip().split("\n"): yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0]) def pkg_install(self): self.node.run("dnf -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.node.run( "dnf -d0 -e0 list installed {}".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.node.run("dnf -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) bundlewrap-3.8.0/bundlewrap/items/pkg_openbsd.py000066400000000000000000000105021360562404000220140ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote import re from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ PKGSPEC_REGEX = re.compile(r"^(.+)-(\d.*)$") def parse_pkg_name(pkgname, line): matches = PKGSPEC_REGEX.match(line) assert matches != None, _("Unexpected OpenBSD package name: {line}").format(line=line) installed_package, installed_version_and_more = matches.groups() assert not installed_version_and_more.endswith("-"), \ _("Unexpected OpenBSD package name (ends in dash): {line}").format(line=line) if installed_package == pkgname: if "-" in installed_version_and_more: tokens = installed_version_and_more.split("-") installed_version = tokens[0] installed_flavor = "-".join(tokens[1:]) else: installed_version = installed_version_and_more installed_flavor = "" return True, installed_version, installed_flavor else: return False, None, None def pkg_install(node, pkgname, flavor, version): # Setting either flavor or version to None means "don't specify this # component". Setting flavor to the empty string means choosing the # "normal" flavor. # # flavor = "", version = None: "pkgname--" # flavor = "foo", version = None: "pkgname--foo" # flavor = None, version = None: "pkgname" (a) # flavor = "", version = "1.0": "pkgname-1.0" (b) # flavor = "foo", version = "1.0": "pkgname-1.0-foo" # flavor = None, version = "1.0": "pkgname-1.0" # flavor = None, version = "-foo": "pkgname--foo" (backwards compat) if flavor is None and version is None: # Case "(a)" full_name = pkgname elif flavor == "" and version is not None: # Case "(b)" full_name = "{}-{}".format(pkgname, version) else: version_part = "-" if version is None else "-{}".format(version) flavor_part = "" if flavor is None else "-{}".format(flavor) full_name = "{}{}{}".format(pkgname, version_part, flavor_part) return node.run("pkg_add -r -I {}".format(full_name), may_fail=True) def pkg_installed(node, pkgname): result = node.run( "pkg_info | cut -f 1 -d ' '", may_fail=True, ) for line in result.stdout.decode('utf-8').strip().splitlines(): found, installed_version, installed_flavor = parse_pkg_name(pkgname, line) if found: return installed_version, installed_flavor return False, None def pkg_remove(node, pkgname): return node.run("pkg_delete -I -D dependencies {}".format(quote(pkgname)), may_fail=True) class OpenBSDPkg(Item): """ A package installed by pkg_add/pkg_delete. """ BUNDLE_ATTRIBUTE_NAME = "pkg_openbsd" ITEM_ATTRIBUTES = { 'installed': True, 'flavor': "", 'version': None, } ITEM_TYPE_NAME = "pkg_openbsd" def __repr__(self): return "".format( self.name, self.attributes['installed'], ) def cdict(self): cdict = self.attributes.copy() if not cdict['installed']: del cdict['flavor'] if cdict['version'] is None or not cdict['installed']: del cdict['version'] return cdict def fix(self, status): if self.attributes['installed'] is False: pkg_remove(self.node, self.name) else: pkg_install( self.node, self.name, self.attributes['flavor'], self.attributes['version'] ) def sdict(self): version, flavor = pkg_installed(self.node, self.name) return { 'installed': bool(version), 'flavor': flavor if flavor is not None else _("none"), 'version': version if version else _("none"), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('installed', True), bool): raise BundleError(_( "expected boolean for 'installed' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/pkg_opkg.py000066400000000000000000000020551360562404000213260ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.items.pkg import Pkg class OpkgPkg(Pkg): """ A package installed by opkg. """ BUNDLE_ATTRIBUTE_NAME = "pkg_opkg" ITEM_TYPE_NAME = "pkg_opkg" @classmethod def block_concurrent(cls, node_os, node_os_version): return ["pkg_opkg"] def pkg_all_installed(self): result = self.node.run("opkg list-installed") for line in result.stdout.decode('utf-8').strip().split("\n"): if line: yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0]) def pkg_install(self): self.node.run("opkg install {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.node.run( "opkg status {} | grep ^Status: | grep installed".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.node.run("opkg remove {}".format(quote(self.name)), may_fail=True) bundlewrap-3.8.0/bundlewrap/items/pkg_pacman.py000066400000000000000000000032631360562404000216270ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os.path import basename, join from pipes import quote from bundlewrap.items.pkg import Pkg class PacmanPkg(Pkg): """ A package installed by pacman. """ BUNDLE_ATTRIBUTE_NAME = "pkg_pacman" ITEM_ATTRIBUTES = { 'installed': True, 'tarball': None, } ITEM_TYPE_NAME = "pkg_pacman" def cdict(self): # TODO/FIXME: this is bad because it ignores tarball # (However, that's not part of the node's state, so bw won't # "fix" it anyway, so ... I guess we can live with that.) return {'installed': self.attributes['installed']} def pkg_all_installed(self): pkgs = self.node.run("pacman -Qq").stdout.decode('utf-8') for line in pkgs.splitlines(): yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()) def pkg_install(self): if self.attributes['tarball']: local_file = join(self.item_dir, self.attributes['tarball']) remote_file = "/tmp/{}".format(basename(local_file)) self.node.upload(local_file, remote_file) self.node.run("pacman --noconfirm -U {}".format(quote(remote_file)), may_fail=True) self.node.run("rm -- {}".format(quote(remote_file))) else: self.node.run("pacman --noconfirm -S {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.node.run( "pacman -Q {}".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.node.run("pacman --noconfirm -Rs {}".format(quote(self.name)), may_fail=True) bundlewrap-3.8.0/bundlewrap/items/pkg_pip.py000066400000000000000000000067651360562404000211720ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os.path import join, split from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def pkg_install(node, pkgname, version=None): if version: pkgname = "{}=={}".format(pkgname, version) pip_path, pkgname = split_path(pkgname) return node.run("{} install -U {}".format(quote(pip_path), quote(pkgname)), may_fail=True) def pkg_installed(node, pkgname): pip_path, pkgname = split_path(pkgname) result = node.run( "{} freeze | grep -i '^{}=='".format(quote(pip_path), pkgname), may_fail=True, ) if result.return_code != 0: return False else: return result.stdout_text.split("=")[-1].strip() def pkg_remove(node, pkgname): pip_path, pkgname = split_path(pkgname) return node.run("{} uninstall -y {}".format(quote(pip_path), quote(pkgname)), may_fail=True) class PipPkg(Item): """ A package installed by pip. """ BUNDLE_ATTRIBUTE_NAME = "pkg_pip" ITEM_ATTRIBUTES = { 'installed': True, 'version': None, } ITEM_TYPE_NAME = "pkg_pip" def __repr__(self): return "".format( self.name, self.attributes['installed'], ) def cdict(self): cdict = {'installed': self.attributes['installed']} if self.attributes.get('version') is not None: cdict['version'] = self.attributes['version'] return cdict def get_auto_deps(self, items): for item in items: if item == self: continue if ( item.ITEM_TYPE_NAME == self.ITEM_TYPE_NAME and item.name.lower() == self.name.lower() ): raise BundleError(_( "{item1} (from bundle '{bundle1}') has name collision with " "{item2} (from bundle '{bundle2}')" ).format( item1=item.id, bundle1=item.bundle.name, item2=self.id, bundle2=self.bundle.name, )) return [] def fix(self, status): if self.attributes['installed'] is False: pkg_remove(self.node, self.name) else: pkg_install(self.node, self.name, version=self.attributes['version']) def sdict(self): install_status = pkg_installed(self.node, self.name) return { 'installed': bool(install_status), 'version': None if install_status is False else install_status, } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('installed', True), bool): raise BundleError(_( "expected boolean for 'installed' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) if 'version' in attributes and attributes.get('installed', True) is False: raise BundleError(_( "cannot set version for uninstalled package on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) def split_path(pkgname): virtualenv, pkgname = split(pkgname) pip_path = join(virtualenv, "bin", "pip") if virtualenv else "pip" return pip_path, pkgname bundlewrap-3.8.0/bundlewrap/items/pkg_snap.py000066400000000000000000000016161360562404000213310ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.items.pkg import Pkg class SnapPkg(Pkg): """ A package installed by snap. """ BUNDLE_ATTRIBUTE_NAME = "pkg_snap" ITEM_TYPE_NAME = "pkg_snap" def pkg_all_installed(self): result = self.node.run("snap list") for line in result.stdout.decode('utf-8').strip().split("\n"): yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(" ")[0]) def pkg_install(self): self.node.run("snap install {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.node.run( "snap list {}".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.node.run("snap remove {}".format(quote(self.name)), may_fail=True) bundlewrap-3.8.0/bundlewrap/items/pkg_yum.py000066400000000000000000000020611360562404000211750ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.items.pkg import Pkg class YumPkg(Pkg): """ A package installed by yum. """ BUNDLE_ATTRIBUTE_NAME = "pkg_yum" ITEM_TYPE_NAME = "pkg_yum" @classmethod def block_concurrent(cls, node_os, node_os_version): return ["pkg_dnf", "pkg_yum"] def pkg_all_installed(self): result = self.node.run("yum -d0 -e0 list installed") for line in result.stdout.decode('utf-8').strip().split("\n"): yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0]) def pkg_install(self): self.node.run("yum -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.node.run( "yum -d0 -e0 list installed {}".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.node.run("yum -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) bundlewrap-3.8.0/bundlewrap/items/pkg_zypper.py000066400000000000000000000037741360562404000217300ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ ZYPPER_OPTS = "--non-interactive " + \ "--non-interactive-include-reboot-patches " + \ "--quiet" def pkg_install(node, pkgname): return node.run("zypper {} install {}".format(ZYPPER_OPTS, quote(pkgname)), may_fail=True) def pkg_installed(node, pkgname): result = node.run( "zypper search --match-exact --installed-only " "--type package {}".format(quote(pkgname)), may_fail=True, ) if result.return_code != 0: return False else: return True def pkg_remove(node, pkgname): return node.run("zypper {} remove {}".format(ZYPPER_OPTS, quote(pkgname)), may_fail=True) class ZypperPkg(Item): """ A package installed by zypper. """ BUNDLE_ATTRIBUTE_NAME = "pkg_zypper" ITEM_ATTRIBUTES = { 'installed': True, } ITEM_TYPE_NAME = "pkg_zypper" @classmethod def block_concurrent(cls, node_os, node_os_version): return [cls.ITEM_TYPE_NAME] def __repr__(self): return "".format( self.name, self.attributes['installed'], ) def fix(self, status): if self.attributes['installed'] is False: pkg_remove(self.node, self.name) else: pkg_install(self.node, self.name) def sdict(self): return { 'installed': pkg_installed(self.node, self.name), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('installed', True), bool): raise BundleError(_( "expected boolean for 'installed' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/postgres_dbs.py000066400000000000000000000064741360562404000222340ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import force_text, mark_for_translation as _ def create_db(node, name, owner, when_creating): template = None cmd = "sudo -u postgres createdb -wO {} ".format(owner) if when_creating.get('collation') is not None: cmd += "--lc-collate={} ".format(when_creating['collation']) template = "template0" if when_creating.get('ctype') is not None: cmd += "--lc-ctype={} ".format(when_creating['ctype']) template = "template0" if when_creating.get('encoding') is not None: cmd += "--encoding={} ".format(when_creating['encoding']) template = "template0" if template is not None: cmd += "--template={} ".format(template) cmd += name return node.run(cmd) def drop_db(node, name): return node.run("sudo -u postgres dropdb -w {}".format(quote(name))) def get_databases(node): output = node.run("echo '\\l' | sudo -u postgres psql -Anqt -F '|' | grep '|'").stdout result = {} for line in force_text(output).strip().split("\n"): db, owner = line.strip().split("|", 2)[:2] result[db] = { 'owner': owner, } return result def set_owner(node, name, owner): return node.run( "echo 'ALTER DATABASE \"{name}\" OWNER TO \"{owner}\"' | " "sudo -u postgres psql -nqw".format( name=name, owner=owner, ), ) class PostgresDB(Item): """ A postgres database. """ BUNDLE_ATTRIBUTE_NAME = "postgres_dbs" ITEM_ATTRIBUTES = { 'delete': False, 'owner': "postgres", } ITEM_TYPE_NAME = "postgres_db" WHEN_CREATING_ATTRIBUTES = { 'collation': None, 'ctype': None, 'encoding': None, } def __repr__(self): return "".format(self.name) def cdict(self): if self.attributes['delete']: return None else: return {'owner': self.attributes['owner']} def fix(self, status): if status.must_be_deleted: drop_db(self.node, self.name) elif status.must_be_created: create_db(self.node, self.name, self.attributes['owner'], self.when_creating) elif 'owner' in status.keys_to_fix: set_owner(self.node, self.name, self.attributes['owner']) else: raise AssertionError("this shouldn't happen") def get_auto_deps(self, items): deps = [] for item in items: if item.ITEM_TYPE_NAME == "postgres_role" and item.name == self.attributes['owner']: deps.append(item.id) return deps def sdict(self): databases = get_databases(self.node) if self.name not in databases: return None else: return {'owner': databases[self.name]['owner']} @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('delete', True), bool): raise BundleError(_( "expected boolean for 'delete' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/postgres_roles.py000066400000000000000000000076171360562404000226100ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from passlib.apps import postgres_context from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import force_text, mark_for_translation as _ AUTHID_COLUMNS = { "rolcanlogin": 'can_login', "rolsuper": 'superuser', "rolpassword": 'password_hash', } def delete_role(node, role): node.run("sudo -u postgres dropuser -w {}".format(role)) def fix_role(node, role, attrs, create=False): password = " PASSWORD '{}'".format(attrs['password_hash']) node.run( "echo \"{operation} ROLE \\\"{role}\\\" WITH LOGIN {superuser}SUPERUSER{password}\" " "| sudo -u postgres psql -nqw".format( operation="CREATE" if create else "ALTER", password="" if attrs['password_hash'] is None else password, role=role, superuser="" if attrs['superuser'] is True else "NO", ) ) def get_role(node, role): result = node.run("echo \"SELECT rolcanlogin, rolsuper, rolpassword from pg_authid " "WHERE rolname='{}'\" " "| sudo -u postgres psql -Anqwx -F '|'".format(role)) role_attrs = {} for line in force_text(result.stdout).strip().split("\n"): try: key, value = line.split("|") except ValueError: pass else: role_attrs[AUTHID_COLUMNS[key]] = value for bool_attr in ('can_login', 'superuser'): if bool_attr in role_attrs: role_attrs[bool_attr] = role_attrs[bool_attr] == "t" return role_attrs if role_attrs else None class PostgresRole(Item): """ A postgres role. """ BUNDLE_ATTRIBUTE_NAME = "postgres_roles" ITEM_ATTRIBUTES = { 'can_login': True, 'delete': False, 'password': None, 'password_hash': None, 'superuser': False, } ITEM_TYPE_NAME = "postgres_role" def __repr__(self): return "".format(self.name) def cdict(self): if self.attributes['delete']: return None cdict = self.attributes.copy() del cdict['delete'] del cdict['password'] return cdict def fix(self, status): if status.must_be_deleted: delete_role(self.node, self.name) elif status.must_be_created: fix_role(self.node, self.name, self.attributes, create=True) else: fix_role(self.node, self.name, self.attributes) def sdict(self): return get_role(self.node, self.name) def patch_attributes(self, attributes): if 'password' in attributes: attributes['password_hash'] = postgres_context.encrypt( force_text(attributes['password']), user=self.name, ) return attributes @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not attributes.get('delete', False): if attributes.get('password') is None and attributes.get('password_hash') is None: raise BundleError(_( "expected either 'password' or 'password_hash' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) if attributes.get('password') is not None and attributes.get('password_hash') is not None: raise BundleError(_( "can't define both 'password' and 'password_hash' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) if not isinstance(attributes.get('delete', True), bool): raise BundleError(_( "expected boolean for 'delete' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/svc_openbsd.py000066400000000000000000000053011360562404000220270ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def svc_start(node, svcname): return node.run("/etc/rc.d/{} start".format(quote(svcname)), may_fail=True) def svc_running(node, svcname): result = node.run("/etc/rc.d/{} check".format(quote(svcname)), may_fail=True) return "ok" in result.stdout_text def svc_stop(node, svcname): return node.run("/etc/rc.d/{} stop".format(quote(svcname)), may_fail=True) def svc_enable(node, svcname): return node.run("rcctl set {} status on".format(quote(svcname)), may_fail=True) def svc_enabled(node, svcname): result = node.run( "rcctl ls on | grep '^{}$'".format(svcname), may_fail=True, ) return result.return_code == 0 def svc_disable(node, svcname): return node.run("rcctl set {} status off".format(quote(svcname)), may_fail=True) class SvcOpenBSD(Item): """ A service managed by OpenBSD rc.d. """ BUNDLE_ATTRIBUTE_NAME = "svc_openbsd" ITEM_ATTRIBUTES = { 'running': True, 'enabled': True } ITEM_TYPE_NAME = "svc_openbsd" def __repr__(self): return "".format( self.name, self.attributes['running'], self.attributes['enabled'], ) def fix(self, status): if 'enabled' in status.keys_to_fix: if self.attributes['enabled'] is False: svc_disable(self.node, self.name) else: svc_enable(self.node, self.name) if self.attributes['running'] is False: svc_stop(self.node, self.name) else: svc_start(self.node, self.name) def get_canned_actions(self): return { 'restart': { 'command': "/etc/rc.d/{} restart".format(self.name), 'needs': [self.id], }, 'stopstart': { 'command': "/etc/rc.d/{0} stop && /etc/rc.d/{0} start".format(self.name), 'needs': [self.id], }, } def sdict(self): return { 'enabled': svc_enabled(self.node, self.name), 'running': svc_running(self.node, self.name), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('running', True), bool): raise BundleError(_( "expected boolean for 'running' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/svc_systemd.py000066400000000000000000000062341360562404000220730ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import force_text, mark_for_translation as _ def svc_start(node, svcname): return node.run("systemctl start -- {}".format(quote(svcname)), may_fail=True) def svc_running(node, svcname): result = node.run( "systemctl status -- {}".format(quote(svcname)), may_fail=True, ) return result.return_code == 0 def svc_stop(node, svcname): return node.run("systemctl stop -- {}".format(quote(svcname)), may_fail=True) def svc_enable(node, svcname): return node.run("systemctl enable -- {}".format(quote(svcname)), may_fail=True) def svc_enabled(node, svcname): result = node.run( "systemctl is-enabled -- {}".format(quote(svcname)), may_fail=True, ) return ( result.return_code == 0 and force_text(result.stdout).strip() != "runtime-enabled" ) def svc_disable(node, svcname): return node.run("systemctl disable -- {}".format(quote(svcname)), may_fail=True) class SvcSystemd(Item): """ A service managed by systemd. """ BUNDLE_ATTRIBUTE_NAME = "svc_systemd" ITEM_ATTRIBUTES = { 'enabled': True, 'running': True, } ITEM_TYPE_NAME = "svc_systemd" def __repr__(self): return "".format( self.name, self.attributes['enabled'], self.attributes['running'], ) def cdict(self): cdict = {} for option, value in self.attributes.items(): if value is not None: cdict[option] = value return cdict def fix(self, status): if 'enabled' in status.keys_to_fix: if self.attributes['enabled']: svc_enable(self.node, self.name) else: svc_disable(self.node, self.name) if 'running' in status.keys_to_fix: if self.attributes['running']: svc_start(self.node, self.name) else: svc_stop(self.node, self.name) def get_canned_actions(self): return { 'reload': { 'command': "systemctl reload -- {}".format(self.name), 'needs': [self.id], }, 'restart': { 'command': "systemctl restart -- {}".format(self.name), 'needs': [self.id], }, } def sdict(self): return { 'enabled': svc_enabled(self.node, self.name), 'running': svc_running(self.node, self.name), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): for attribute in ('enabled', 'running'): if attributes.get(attribute, None) not in (True, False, None): raise BundleError(_( "expected boolean or None for '{attribute}' on {item} in bundle '{bundle}'" ).format( attribute=attribute, bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/svc_systemv.py000066400000000000000000000037201360562404000221120ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def svc_start(node, svcname): return node.run("/etc/init.d/{} start".format(quote(svcname)), may_fail=True) def svc_running(node, svcname): result = node.run( "/etc/init.d/{} status".format(quote(svcname)), may_fail=True, ) return result.return_code == 0 def svc_stop(node, svcname): return node.run("/etc/init.d/{} stop".format(quote(svcname)), may_fail=True) class SvcSystemV(Item): """ A service managed by traditional System V init scripts. """ BUNDLE_ATTRIBUTE_NAME = "svc_systemv" ITEM_ATTRIBUTES = { 'running': True, } ITEM_TYPE_NAME = "svc_systemv" def __repr__(self): return "".format( self.name, self.attributes['running'], ) def fix(self, status): if self.attributes['running'] is False: svc_stop(self.node, self.name) else: svc_start(self.node, self.name) def get_canned_actions(self): return { 'reload': { 'command': "/etc/init.d/{} reload".format(self.name), 'needs': [self.id], }, 'restart': { 'command': "/etc/init.d/{} restart".format(self.name), 'needs': [self.id], }, } def sdict(self): return {'running': svc_running(self.node, self.name)} @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('running', True), bool): raise BundleError(_( "expected boolean for 'running' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/svc_upstart.py000066400000000000000000000041511360562404000221010ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def svc_start(node, svcname): return node.run("initctl start --no-wait -- {}".format(quote(svcname)), may_fail=True) def svc_running(node, svcname): result = node.run("initctl status -- {}".format(quote(svcname)), may_fail=True) if result.return_code != 0: return False return " start/" in result.stdout_text def svc_stop(node, svcname): return node.run("initctl stop --no-wait -- {}".format(quote(svcname)), may_fail=True) class SvcUpstart(Item): """ A service managed by Upstart. """ BUNDLE_ATTRIBUTE_NAME = "svc_upstart" ITEM_ATTRIBUTES = { 'running': True, } ITEM_TYPE_NAME = "svc_upstart" def __repr__(self): return "".format( self.name, self.attributes['running'], ) def fix(self, status): if self.attributes['running'] is False: svc_stop(self.node, self.name) else: svc_start(self.node, self.name) def get_canned_actions(self): return { 'reload': { 'command': "reload {}".format(self.name), 'needs': [self.id], }, 'restart': { 'command': "restart {}".format(self.name), 'needs': [self.id], }, 'stopstart': { 'command': "stop {0} && start {0}".format(self.name), 'needs': [self.id], }, } def sdict(self): return {'running': svc_running(self.node, self.name)} @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('running', True), bool): raise BundleError(_( "expected boolean for 'running' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-3.8.0/bundlewrap/items/symlinks.py000066400000000000000000000146071360562404000214040ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from collections import defaultdict from os.path import dirname, normpath from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.remote import PathInfo from bundlewrap.utils.text import mark_for_translation as _ from bundlewrap.utils.text import is_subdirectory ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) class Symlink(Item): """ A symbolic link. """ BUNDLE_ATTRIBUTE_NAME = "symlinks" ITEM_ATTRIBUTES = { 'group': "root", 'owner': "root", 'target': None, } ITEM_TYPE_NAME = "symlink" REQUIRED_ATTRIBUTES = ['target'] def __repr__(self): return "".format( quote(self.name), self.attributes['target'], ) def cdict(self): cdict = { 'target': self.attributes['target'], 'type': 'symlink', } for optional_attr in ('group', 'owner'): if self.attributes[optional_attr] is not None: cdict[optional_attr] = self.attributes[optional_attr] return cdict def fix(self, status): if status.must_be_created or 'type' in status.keys_to_fix: # fixing the type fixes everything self._fix_type(status) return for fix_type in ('target', 'owner', 'group'): if fix_type in status.keys_to_fix: if fix_type == 'group' and 'owner' in status.keys_to_fix: # owner and group are fixed with a single chown continue getattr(self, "_fix_" + fix_type)(status) def _fix_owner(self, status): group = self.attributes['group'] or "" if group: group = ":" + quote(group) if self.node.os in self.node.OS_FAMILY_BSD: command = "chown -h {}{} {}" else: command = "chown -h {}{} -- {}" self.node.run(command.format( quote(self.attributes['owner'] or ""), group, quote(self.name), )) _fix_group = _fix_owner def _fix_target(self, status): if self.node.os in self.node.OS_FAMILY_BSD: self.node.run("ln -sfh -- {} {}".format( quote(self.attributes['target']), quote(self.name), )) else: self.node.run("ln -sfT -- {} {}".format( quote(self.attributes['target']), quote(self.name), )) def _fix_type(self, status): self.node.run("rm -rf -- {}".format(quote(self.name))) self.node.run("mkdir -p -- {}".format(quote(dirname(self.name)))) self.node.run("ln -s -- {} {}".format( quote(self.attributes['target']), quote(self.name), )) if self.attributes['owner'] or self.attributes['group']: self._fix_owner(status) def get_auto_deps(self, items): deps = [] for item in items: if item == self: continue if item.ITEM_TYPE_NAME == "file" and ( is_subdirectory(item.name, self.name) or item.name == self.name ): raise BundleError(_( "{item1} (from bundle '{bundle1}') blocking path to " "{item2} (from bundle '{bundle2}')" ).format( item1=item.id, bundle1=item.bundle.name, item2=self.id, bundle2=self.bundle.name, )) elif item.ITEM_TYPE_NAME == "user" and item.name == self.attributes['owner']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME == "group" and item.name == self.attributes['group']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME in ("directory", "symlink"): if is_subdirectory(item.name, self.name): deps.append(item.id) return deps def patch_attributes(self, attributes): if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD: # BSD doesn't have a root group, so we have to use a # different default value here attributes['group'] = 'wheel' return attributes def sdict(self): path_info = PathInfo(self.node, self.name) if not path_info.exists: return None else: return { 'target': path_info.symlink_target if path_info.is_symlink else "", 'type': 'symlink' if path_info.is_symlink else path_info.stat['type'], 'owner': path_info.owner, 'group': path_info.group, } @classmethod def validate_attributes(cls, bundle, item_id, attributes): for key, value in attributes.items(): ATTRIBUTE_VALIDATORS[key](item_id, value) @classmethod def validate_name(cls, bundle, name): if normpath(name) == "/": raise BundleError(_("'/' cannot be a file")) if normpath(name) != name: raise BundleError(_( "'{path}' is an invalid symlink path, should be '{normpath}' (bundle '{bundle}')" ).format( path=name, normpath=normpath(name), bundle=bundle.name, )) bundlewrap-3.8.0/bundlewrap/items/users.py000066400000000000000000000267021360562404000206730ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from logging import ERROR, getLogger from pipes import quote from string import ascii_lowercase, digits from passlib.hash import bcrypt, md5_crypt, sha256_crypt, sha512_crypt from bundlewrap.exceptions import BundleError from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item from bundlewrap.utils.text import force_text, mark_for_translation as _ getLogger('passlib').setLevel(ERROR) _ATTRIBUTE_NAMES = { 'full_name': _("full name"), 'gid': _("GID"), 'groups': _("groups"), 'home': _("home dir"), 'password_hash': _("password hash"), 'shell': _("shell"), 'uid': _("UID"), } _ATTRIBUTE_OPTIONS = { 'full_name': "-c", 'gid': "-g", 'groups': "-G", 'home': "-d", 'password_hash': "-p", 'shell': "-s", 'uid': "-u", } # a random static salt if users don't provide one _DEFAULT_SALT = "uJzJlYdG" # bcrypt needs special salts. 22 characters long, ending in ".", "O", "e", "u" # see https://bitbucket.org/ecollins/passlib/issues/25 _DEFAULT_BCRYPT_SALT = "oo2ahgheen9Tei0IeJohTO" HASH_METHODS = { 'md5': md5_crypt, 'sha256': sha256_crypt, 'sha512': sha512_crypt, 'bcrypt': bcrypt } _USERNAME_VALID_CHARACTERS = ascii_lowercase + digits + "-_" def _group_name_for_gid(node, gid): """ Returns the group name that matches the gid. """ group_output = node.run("grep -e ':{}:[^:]*$' /etc/group".format(gid), may_fail=True) if group_output.return_code != 0: return None else: return group_output.stdout_text.split(":")[0] def _groups_for_user(node, username): """ Returns the list of group names for the given username on the given node. """ groups = node.run("id -Gn {}".format(username)).stdout_text.strip().split(" ") primary_group = node.run("id -gn {}".format(username)).stdout_text.strip() groups.remove(primary_group) return groups def _parse_passwd_line(line, entries): """ Parses a line from /etc/passwd and returns the information as a dictionary. """ result = dict(zip( entries, line.strip().split(":"), )) result['full_name'] = result['gecos'].split(",")[0] return result class User(Item): """ A user account. """ BUNDLE_ATTRIBUTE_NAME = "users" ITEM_ATTRIBUTES = { 'delete': False, 'full_name': None, 'gid': None, 'groups': None, 'hash_method': 'sha512', 'home': None, 'password': None, 'password_hash': None, 'salt': None, 'shell': None, 'uid': None, 'use_shadow': None, } ITEM_TYPE_NAME = "user" @classmethod def block_concurrent(cls, node_os, node_os_version): # https://github.com/bundlewrap/bundlewrap/issues/367 if node_os == 'openbsd': return [cls.ITEM_TYPE_NAME] else: return [] def __repr__(self): return "".format(self.name) def cdict(self): if self.attributes['delete']: return None cdict = self.attributes.copy() del cdict['delete'] del cdict['hash_method'] del cdict['password'] del cdict['salt'] del cdict['use_shadow'] for key in list(cdict.keys()): if cdict[key] is None: del cdict[key] if 'groups' in cdict: cdict['groups'] = set(cdict['groups']) return cdict def fix(self, status): if status.must_be_deleted: self.node.run("userdel {}".format(self.name), may_fail=True) else: command = "useradd " if status.must_be_created else "usermod " for attr, option in sorted(_ATTRIBUTE_OPTIONS.items()): if (attr in status.keys_to_fix or status.must_be_created) and \ self.attributes[attr] is not None: if attr == 'groups': value = ",".join(self.attributes[attr]) else: value = str(self.attributes[attr]) command += "{} {} ".format(option, quote(value)) command += self.name self.node.run(command, may_fail=True) def display_dicts(self, cdict, sdict, keys): for attr_name, attr_display_name in _ATTRIBUTE_NAMES.items(): if attr_name == attr_display_name: # Don't change anything; the `del`s below would # always remove the key entirely! continue try: keys.remove(attr_name) except ValueError: pass else: keys.append(attr_display_name) cdict[attr_display_name] = cdict[attr_name] sdict[attr_display_name] = sdict[attr_name] del cdict[attr_name] del sdict[attr_name] return (cdict, sdict, keys) def get_auto_deps(self, items): deps = [] groups = self.attributes['groups'] or [] for item in items: if item.ITEM_TYPE_NAME == "group": if not (item.name in groups or ( self.attributes['gid'] in [item.attributes['gid'], item.name] and self.attributes['gid'] is not None )): # we don't need to depend on this group continue elif item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) return deps def sdict(self): # verify content of /etc/passwd if self.node.os in self.node.OS_FAMILY_BSD: password_command = "grep -ae '^{}:' /etc/master.passwd" else: password_command = "grep -ae '^{}:' /etc/passwd" passwd_grep_result = self.node.run( password_command.format(self.name), may_fail=True, ) if passwd_grep_result.return_code != 0: return None if self.node.os in self.node.OS_FAMILY_BSD: entries = ( 'username', 'passwd_hash', 'uid', 'gid', 'class', 'change', 'expire', 'gecos', 'home', 'shell', ) else: entries = ('username', 'passwd_hash', 'uid', 'gid', 'gecos', 'home', 'shell') sdict = _parse_passwd_line(passwd_grep_result.stdout_text, entries) if self.attributes['gid'] is not None and not self.attributes['gid'].isdigit(): sdict['gid'] = _group_name_for_gid(self.node, sdict['gid']) if self.attributes['password_hash'] is not None: if self.attributes['use_shadow'] and self.node.os not in self.node.OS_FAMILY_BSD: # verify content of /etc/shadow unless we are on OpenBSD shadow_grep_result = self.node.run( "grep -e '^{}:' /etc/shadow".format(self.name), may_fail=True, ) if shadow_grep_result.return_code != 0: sdict['password_hash'] = None else: sdict['password_hash'] = shadow_grep_result.stdout_text.split(":")[1] else: sdict['password_hash'] = sdict['passwd_hash'] del sdict['passwd_hash'] # verify content of /etc/group sdict['groups'] = set(_groups_for_user(self.node, self.name)) return sdict def patch_attributes(self, attributes): if attributes.get('password', None) is not None: # defaults aren't set yet hash_method = HASH_METHODS[attributes.get( 'hash_method', self.ITEM_ATTRIBUTES['hash_method'], )] salt = attributes.get('salt', None) if self.node.os in self.node.OS_FAMILY_BSD: attributes['password_hash'] = bcrypt.encrypt( force_text(attributes['password']), rounds=8, # default rounds for OpenBSD accounts salt=_DEFAULT_BCRYPT_SALT if salt is None else salt, ) elif attributes.get('hash_method') == 'md5': attributes['password_hash'] = hash_method.encrypt( force_text(attributes['password']), salt=_DEFAULT_SALT if salt is None else salt, ) else: attributes['password_hash'] = hash_method.encrypt( force_text(attributes['password']), rounds=5000, # default from glibc salt=_DEFAULT_SALT if salt is None else salt, ) if 'use_shadow' not in attributes: attributes['use_shadow'] = self.node.use_shadow_passwords for attr in ('gid', 'uid'): if isinstance(attributes.get(attr), int): attributes[attr] = str(attributes[attr]) return attributes @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('delete', False): for attr in attributes.keys(): if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): raise BundleError(_( "{item} from bundle '{bundle}' cannot have other " "attributes besides 'delete'" ).format(item=item_id, bundle=bundle.name)) if 'hash_method' in attributes and \ attributes['hash_method'] not in HASH_METHODS: raise BundleError( _("Invalid hash method for {item} in bundle '{bundle}': '{method}'").format( bundle=bundle.name, item=item_id, method=attributes['hash_method'], ) ) if 'password_hash' in attributes and ( 'password' in attributes or 'salt' in attributes ): raise BundleError(_( "{item} in bundle '{bundle}': 'password_hash' " "cannot be used with 'password' or 'salt'" ).format(bundle=bundle.name, item=item_id)) if 'salt' in attributes and 'password' not in attributes: raise BundleError( _("{}: salt given without a password").format(item_id) ) @classmethod def validate_name(cls, bundle, name): for char in name: if char not in _USERNAME_VALID_CHARACTERS: raise BundleError(_( "Invalid character in username '{user}': {char} (bundle '{bundle}')" ).format(bundle=bundle.name, char=char, user=name)) if name.endswith("_") or name.endswith("-"): raise BundleError(_( "Username '{user}' must not end in dash or underscore (bundle '{bundle}')" ).format(bundle=bundle.name, user=name)) if len(name) > 30: raise BundleError(_( "Username '{user}' is longer than 30 characters (bundle '{bundle}')" ).format(bundle=bundle.name, user=name)) bundlewrap-3.8.0/bundlewrap/lock.py000066400000000000000000000176411360562404000173430ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime from getpass import getuser import json from os import environ from pipes import quote from socket import gethostname from time import time from .exceptions import NodeLockedException, RemoteException from .utils import cached_property, tempfile from .utils.text import ( blue, bold, format_duration, format_timestamp, mark_for_translation as _, parse_duration, red, wrap_question, ) from .utils.ui import io HARD_LOCK_PATH = "/tmp/bundlewrap.lock" HARD_LOCK_FILE = HARD_LOCK_PATH + "/info" SOFT_LOCK_PATH = "/tmp/bundlewrap.softlock.d" SOFT_LOCK_FILE = "/tmp/bundlewrap.softlock.d/{id}" def get_hard_lock_info(node, local_path): try: node.download(HARD_LOCK_FILE, local_path) with open(local_path, 'r') as fp: return json.load(fp) except (RemoteException, ValueError): io.stderr(_( "{x} {node_bold} corrupted hard lock: " "unable to read or parse lock file contents " "(clear it with `bw run {node} 'rm -Rf {path}'`)" ).format( node_bold=bold(node.name), node=node.name, path=HARD_LOCK_PATH, x=red("!"), )) return {} def identity(): return environ.get('BW_IDENTITY', "{}@{}".format( getuser(), gethostname(), )) class NodeLock(object): def __init__(self, node, interactive=False, ignore=False): self.node = node self.ignore = ignore self.interactive = interactive def __enter__(self): if self.node.os not in self.node.OS_FAMILY_UNIX: # no locking required/possible return self with tempfile() as local_path: if not self.ignore: with io.job(_("{node} checking hard lock status").format(node=bold(self.node.name))): result = self.node.run("mkdir " + quote(HARD_LOCK_PATH), may_fail=True) if result.return_code != 0: info = get_hard_lock_info(self.node, local_path) expired = False try: d = info['date'] except KeyError: info['date'] = _("") info['duration'] = _("") else: duration = datetime.now() - datetime.fromtimestamp(d) info['date'] = format_timestamp(d) info['duration'] = format_duration(duration) if duration > parse_duration(environ.get('BW_HARDLOCK_EXPIRY', "8h")): expired = True io.debug("ignoring expired hard lock on {}".format(self.node.name)) if 'user' not in info: info['user'] = _("") if expired or self.ignore or (self.interactive and io.ask( self._warning_message_hard(info), False, epilogue=blue("?") + " " + bold(self.node.name), )): pass else: raise NodeLockedException(info) with io.job(_("{node} uploading lock file").format(node=bold(self.node.name))): if self.ignore: self.node.run("mkdir -p " + quote(HARD_LOCK_PATH)) with open(local_path, 'w') as f: f.write(json.dumps({ 'date': time(), 'user': identity(), })) self.node.upload(local_path, HARD_LOCK_FILE) return self def __exit__(self, type, value, traceback): if self.node.os not in self.node.OS_FAMILY_UNIX: # no locking required/possible return with io.job(_("{node} removing hard lock").format(node=bold(self.node.name))): result = self.node.run("rm -R {}".format(quote(HARD_LOCK_PATH)), may_fail=True) if result.return_code != 0: io.stderr(_("{x} {node} could not release hard lock").format( node=bold(self.node.name), x=red("!"), )) def _warning_message_hard(self, info): return wrap_question( red(_("NODE LOCKED")), _( "Looks like somebody is currently using BundleWrap on this node.\n" "You should let them finish or override the lock if it has gone stale.\n" "\n" "locked by {user}\n" " since {date} ({duration} ago)" ).format( user=bold(info['user']), date=info['date'], duration=info['duration'], ), bold(_("Override lock?")), prefix="{x} {node} ".format(node=bold(self.node.name), x=blue("?")), ) @cached_property def soft_locks(self): return softlock_list(self.node) @cached_property def my_soft_locks(self): for lock in self.soft_locks: if lock['user'] == identity(): yield lock @cached_property def other_peoples_soft_locks(self): for lock in self.soft_locks: if lock['user'] != identity(): yield lock def softlock_add(node, lock_id, comment="", expiry="8h", item_selectors=None): assert node.os in node.OS_FAMILY_UNIX if "\n" in comment: raise ValueError(_("Lock comments must not contain any newlines")) if not item_selectors: item_selectors = ["*"] expiry_timedelta = parse_duration(expiry) now = time() expiry_timestamp = now + expiry_timedelta.days * 86400 + expiry_timedelta.seconds content = json.dumps({ 'comment': comment, 'date': now, 'expiry': expiry_timestamp, 'id': lock_id, 'items': item_selectors, 'user': identity(), }, indent=None, sort_keys=True) with tempfile() as local_path: with open(local_path, 'w') as f: f.write(content + "\n") node.run("mkdir -p " + quote(SOFT_LOCK_PATH)) node.upload(local_path, SOFT_LOCK_FILE.format(id=lock_id), mode='0644') node.repo.hooks.lock_add(node.repo, node, lock_id, item_selectors, expiry_timestamp, comment) return lock_id def softlock_list(node): if node.os not in node.OS_FAMILY_UNIX: return [] with io.job(_("{} checking soft locks").format(bold(node.name))): cat = node.run("cat {}".format(SOFT_LOCK_FILE.format(id="*")), may_fail=True) if cat.return_code != 0: return [] result = [] for line in cat.stdout.decode('utf-8').strip().split("\n"): try: result.append(json.loads(line.strip())) except json.decoder.JSONDecodeError: io.stderr(_( "{x} {node} unable to parse soft lock file contents, ignoring: {line}" ).format( x=red("!"), node=bold(node.name), line=line.strip(), )) for lock in result[:]: if lock['expiry'] < time(): io.debug(_("removing expired soft lock {id} from node {node}").format( id=lock['id'], node=node.name, )) softlock_remove(node, lock['id']) result.remove(lock) return result def softlock_remove(node, lock_id): assert node.os in node.OS_FAMILY_UNIX io.debug(_("removing soft lock {id} from node {node}").format( id=lock_id, node=node.name, )) node.run("rm {}".format(SOFT_LOCK_FILE.format(id=lock_id))) node.repo.hooks.lock_remove(node.repo, node, lock_id) bundlewrap-3.8.0/bundlewrap/metadata.py000066400000000000000000000311551360562404000201670ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from copy import copy from hashlib import sha1 from json import dumps, JSONEncoder from .exceptions import RepositoryError from .utils import Fault from .utils.dicts import ATOMIC_TYPES, map_dict_keys, merge_dict, value_at_key_path from .utils.text import force_text, mark_for_translation as _ try: text_type = unicode byte_type = str except NameError: text_type = str byte_type = bytes METADATA_TYPES = ( bool, byte_type, Fault, int, text_type, type(None), ) # constants returned as options by metadata processors DONE = 1 RUN_ME_AGAIN = 2 DEFAULTS = 3 OVERWRITE = 4 def atomic(obj): """ Wraps a compatible object in a custom class to prevent it from being merged with another object of the same type during metadata compilation. """ try: cls = ATOMIC_TYPES[type(obj)] except KeyError: raise ValueError("atomic() can only be applied to dicts, lists, sets, or tuples " "(not: {})".format(repr(obj))) else: return cls(obj) def blame_changed_paths(old_dict, new_dict, blame_dict, blame_name, defaults=False): def is_mergeable(value1, value2): if isinstance(value1, (list, set, tuple)) and isinstance(value2, (list, set, tuple)): return True elif isinstance(value1, dict) and isinstance(value2, dict): return True return False new_paths = map_dict_keys(new_dict) # clean up removed paths from blame_dict for path in list(blame_dict.keys()): if path not in new_paths: del blame_dict[path] for path in new_paths: new_value = value_at_key_path(new_dict, path) try: old_value = value_at_key_path(old_dict, path) except KeyError: blame_dict[path] = (blame_name,) else: if old_value != new_value: if defaults or is_mergeable(old_value, new_value): blame_dict[path] += (blame_name,) else: blame_dict[path] = (blame_name,) return blame_dict def check_metadata_keys(node): try: basestring except NameError: # Python 2 basestring = str for path in map_dict_keys(node.metadata): value = path[-1] if not isinstance(value, basestring): raise TypeError(_("metadata key for {node} at path '{path}' is not a string").format( node=node.name, path="'->'".join(path[:-1]), )) def check_metadata_processor_result(input_metadata, result, node_name, metadata_processor_name): """ Validates the return value of a metadata processor and splits it into metadata and options. """ if not isinstance(result, tuple) or not len(result) >= 2: raise ValueError(_( "metadata processor {metaproc} for node {node} did not return " "a tuple of length 2 or greater" ).format( metaproc=metadata_processor_name, node=node_name, )) result_dict, options = result[0], result[1:] if not isinstance(result_dict, dict): raise ValueError(_( "metadata processor {metaproc} for node {node} did not return " "a dict as the first element" ).format( metaproc=metadata_processor_name, node=node_name, )) if ( (DEFAULTS in options or OVERWRITE in options) and id(input_metadata) == id(result_dict) ): raise ValueError(_( "metadata processor {metaproc} for node {node} returned original " "metadata dict plus DEFAULTS or OVERWRITE" ).format( metaproc=metadata_processor_name, node=node_name, )) for option in options: if option not in (DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE): raise ValueError(_( "metadata processor {metaproc} for node {node} returned an " "invalid option: {opt}" ).format( metaproc=metadata_processor_name, node=node_name, opt=repr(option), )) if DONE in options and RUN_ME_AGAIN in options: raise ValueError(_( "metadata processor {metaproc} for node {node} cannot return both " "DONE and RUN_ME_AGAIN" ).format( metaproc=metadata_processor_name, node=node_name, )) if DONE not in options and RUN_ME_AGAIN not in options: raise ValueError(_( "metadata processor {metaproc} for node {node} must return either " "DONE or RUN_ME_AGAIN" ).format( metaproc=metadata_processor_name, node=node_name, )) if DEFAULTS in options and OVERWRITE in options: raise ValueError(_( "metadata processor {metaproc} for node {node} cannot return both " "DEFAULTS and OVERWRITE" ).format( metaproc=metadata_processor_name, node=node_name, )) return result_dict, options def check_for_unsolvable_metadata_key_conflicts(node): """ Finds metadata keys defined by two groups that are not part of a shared subgroup hierarchy. """ # First, we build a list of subgroup chains. # # A chain is simply a list of groups starting with a parent group # that has no parent groups itself and then descends depth-first # into its subgroups until a subgroup is reached that the node is # not a member of. # Every possible path on every subgroup tree is a separate chain. # # group4 # / \ # group2 group3 # \ / # group1 # # This example has two chains, even though both start and end at the # some groups: # # group1 -> group2 -> group4 # group1 -> group3 -> group4 # # find all groups whose subgroups this node is *not* a member of lowest_subgroups = set() for group in node.groups: in_subgroup = False for subgroup in group.subgroups: if subgroup in node.groups: in_subgroup = True break if not in_subgroup: lowest_subgroups.add(group) chains = [] incomplete_chains = [[group] for group in lowest_subgroups] while incomplete_chains: for chain in incomplete_chains[:]: highest_group = chain[-1] if list(highest_group.parent_groups): chain_so_far = chain[:] # continue this chain with the first parent group chain.append(list(highest_group.parent_groups)[0]) # further parent groups form new chains for further_parents in list(highest_group.parent_groups)[1:]: new_chain = chain_so_far[:] new_chain.append(further_parents) incomplete_chains.append(new_chain) else: # chain has ended chains.append(chain) incomplete_chains.remove(chain) # chains now look like this (parents right of children): # [ # [group1], # [group2, group3, group5], # [group2, group4, group5], # [group2, group4, group6, group7], # ] # let's merge metadata for each chain chain_metadata = [] for chain in chains: metadata = {} for group in chain: metadata = merge_dict(metadata, group.metadata) chain_metadata.append(metadata) # create a "key path map" for each chain's metadata chain_metadata_keys = [list(map_dict_keys(metadata)) for metadata in chain_metadata] # compare all metadata keys with other chains and find matches for index1, keymap1 in enumerate(chain_metadata_keys): for keypath in keymap1: for index2, keymap2 in enumerate(chain_metadata_keys): if index1 == index2: # same keymap, don't compare continue else: if keypath in keymap2: if ( type(value_at_key_path(chain_metadata[index1], keypath)) == type(value_at_key_path(chain_metadata[index2], keypath)) and type(value_at_key_path(chain_metadata[index2], keypath)) in (set, dict) ): continue # We now know that there is a conflict between the first # and second chain we're looking at right now. # That is however not a problem if the conflict is caused # by a group that is present in both chains. # So all that's left is to figure out which two single groups # within those chains are at fault so we can report them # to the user if necessary. find_groups_causing_metadata_conflict( node.name, chains[index1], chains[index2], keypath, ) def deepcopy_metadata(obj): """ Our own version of deepcopy.copy that doesn't pickle and ensures a limited range of types is used in metadata. """ if isinstance(obj, METADATA_TYPES): return obj elif isinstance(obj, dict): if isinstance(obj, ATOMIC_TYPES[dict]): new_obj = atomic({}) else: new_obj = {} for key, value in obj.items(): if not isinstance(key, METADATA_TYPES): raise ValueError(_("illegal metadata key type: {}").format(repr(key))) new_key = copy(key) new_obj[new_key] = deepcopy_metadata(value) elif isinstance(obj, (list, tuple)): if isinstance(obj, (ATOMIC_TYPES[list], ATOMIC_TYPES[tuple])): new_obj = atomic([]) else: new_obj = [] for member in obj: new_obj.append(deepcopy_metadata(member)) elif isinstance(obj, set): if isinstance(obj, ATOMIC_TYPES[set]): new_obj = atomic(set()) else: new_obj = set() for member in obj: new_obj.add(deepcopy_metadata(member)) else: raise ValueError(_("illegal metadata value type: {}").format(repr(obj))) return new_obj def find_groups_causing_metadata_conflict(node_name, chain1, chain2, keypath): """ Given two chains (lists of groups), find one group in each chain that has conflicting metadata with the other for the given key path. """ chain1_metadata = [list(map_dict_keys(group.metadata)) for group in chain1] chain2_metadata = [list(map_dict_keys(group.metadata)) for group in chain2] bad_keypath = None for index1, keymap1 in enumerate(chain1_metadata): for index2, keymap2 in enumerate(chain2_metadata): if chain1[index1] == chain2[index2]: # same group, ignore continue if ( keypath in keymap1 and keypath in keymap2 and chain1[index1] not in chain2[index2].subgroups and chain2[index2] not in chain1[index1].subgroups ): bad_keypath = keypath bad_group1 = chain1[index1] bad_group2 = chain2[index2] if bad_keypath is not None: raise RepositoryError(_( "Conflicting metadata keys between groups '{group1}' and '{group2}' on node '{node}':\n\n" " metadata['{keypath}']\n\n" "You must either connect both groups through subgroups or have them not define " "conflicting metadata keys. Otherwise there is no way for BundleWrap to determine " "which group's metadata should win when they are merged." ).format( keypath="']['".join(bad_keypath), group1=bad_group1.name, group2=bad_group2.name, node=node_name, )) class MetadataJSONEncoder(JSONEncoder): def default(self, obj): if isinstance(obj, Fault): return obj.value if isinstance(obj, set): return sorted(obj) if isinstance(obj, bytes): return force_text(obj) else: raise ValueError(_("illegal metadata value type: {}").format(repr(obj))) def hash_metadata(sdict): """ Returns a canonical SHA1 hash to describe this dict. """ return sha1(dumps( sdict, cls=MetadataJSONEncoder, indent=None, sort_keys=True, ).encode('utf-8')).hexdigest() bundlewrap-3.8.0/bundlewrap/node.py000066400000000000000000001004261360562404000173320ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime, timedelta from hashlib import md5 from os import environ from threading import Lock from . import operations from .bundle import Bundle from .concurrency import WorkerPool from .deps import ( DummyItem, find_item, ) from .exceptions import ( DontCache, GracefulApplyException, ItemDependencyLoop, NodeLockedException, NoSuchBundle, RemoteException, RepositoryError, SkipNode, ) from .group import GROUP_ATTR_DEFAULTS from .itemqueue import ItemQueue from .items import Item from .lock import NodeLock from .metadata import hash_metadata from .utils import cached_property, names from .utils.dicts import hash_statedict from .utils.text import ( blue, bold, cyan, force_text, format_duration, green, mark_for_translation as _, red, validate_name, yellow, ) from .utils.ui import io class ApplyResult(object): """ Holds information about an apply run for a node. """ def __init__(self, node, item_results): self.node_name = node.name self.correct = 0 self.fixed = 0 self.skipped = 0 self.failed = 0 self.total = 0 for item_id, result, duration in item_results: self.total += 1 if result == Item.STATUS_ACTION_SUCCEEDED: self.correct += 1 elif result == Item.STATUS_OK: self.correct += 1 elif result == Item.STATUS_FIXED: self.fixed += 1 elif result == Item.STATUS_SKIPPED: self.skipped += 1 elif result == Item.STATUS_FAILED: self.failed += 1 else: raise RuntimeError(_( "can't make sense of results for {} on {}: {}" ).format(item_id, self.node_name, result)) self.start = None self.end = None @property def duration(self): return self.end - self.start def format_node_result(result): output = [] output.append(("{count} OK").format(count=result.correct)) if result.fixed: output.append(green(_("{count} fixed").format(count=result.fixed))) else: output.append(_("{count} fixed").format(count=result.fixed)) if result.skipped: output.append(yellow(_("{count} skipped").format(count=result.skipped))) else: output.append(_("{count} skipped").format(count=result.skipped)) if result.failed: output.append(red(_("{count} failed").format(count=result.failed))) else: output.append(_("{count} failed").format(count=result.failed)) return ", ".join(output) def handle_apply_result(node, item, status_code, interactive, details=None): if status_code == Item.STATUS_SKIPPED and details in ( Item.SKIP_REASON_NO_TRIGGER, Item.SKIP_REASON_UNLESS, ): # skipped for "unless" or "not triggered", don't output those return formatted_result = format_item_result( status_code, node.name, item.bundle.name if item.bundle else "", # dummy items don't have bundles item.id, interactive=interactive, details=details, ) if formatted_result is not None: if status_code == Item.STATUS_FAILED: io.stderr(formatted_result) else: io.stdout(formatted_result) def apply_items( node, autoskip_selector="", autoonly_selector="", my_soft_locks=(), other_peoples_soft_locks=(), workers=1, interactive=False, ): item_queue = ItemQueue(node.items, node.os, node.os_version) # the item queue might contain new generated items (canned actions, # dummy items); adjust progress total accordingly extra_items = len(item_queue.all_items) - len(node.items) io.progress_increase_total(increment=extra_items) results = [] def tasks_available(): return bool(item_queue.items_without_deps) def next_task(): item = item_queue.pop() return { 'task_id': "{}:{}".format(node.name, item.id), 'target': item.apply, 'kwargs': { 'autoskip_selector': autoskip_selector, 'autoonly_selector': autoonly_selector, 'my_soft_locks': my_soft_locks, 'other_peoples_soft_locks': other_peoples_soft_locks, 'interactive': interactive, }, } def handle_result(task_id, return_value, duration): item_id = task_id.split(":", 1)[1] item = find_item(item_id, item_queue.pending_items) status_code, details = return_value if status_code == Item.STATUS_FAILED: for skipped_item in item_queue.item_failed(item): handle_apply_result( node, skipped_item, Item.STATUS_SKIPPED, interactive, details=Item.SKIP_REASON_DEP_FAILED, ) results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0))) elif status_code in (Item.STATUS_FIXED, Item.STATUS_ACTION_SUCCEEDED): item_queue.item_fixed(item) elif status_code == Item.STATUS_OK: item_queue.item_ok(item) elif status_code == Item.STATUS_SKIPPED: for skipped_item in item_queue.item_skipped(item): skip_reason = Item.SKIP_REASON_DEP_SKIPPED for lock in other_peoples_soft_locks: for selector in lock['items']: if skipped_item.covered_by_autoskip_selector(selector): skip_reason = Item.SKIP_REASON_SOFTLOCK break handle_apply_result( node, skipped_item, Item.STATUS_SKIPPED, interactive, details=skip_reason, ) results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0))) else: raise AssertionError(_( "unknown item status returned for {item}: {status}".format( item=item.id, status=repr(status_code), ), )) handle_apply_result(node, item, status_code, interactive, details=details) io.progress_advance() if not isinstance(item, DummyItem): results.append((item.id, status_code, duration)) worker_pool = WorkerPool( tasks_available, next_task, handle_result=handle_result, pool_id="apply_{}".format(node.name), workers=workers, ) worker_pool.run() # we have no items without deps left and none are processing # there must be a loop if item_queue.items_with_deps: raise ItemDependencyLoop(item_queue.items_with_deps) return results def _flatten_group_hierarchy(groups): """ Takes a list of groups and returns a list of group names ordered so that parent groups will appear before any of their subgroups. """ # dict mapping groups to subgroups child_groups = {} for group in groups: child_groups[group.name] = list(names(group.subgroups)) # dict mapping groups to parent groups parent_groups = {} for child_group in child_groups.keys(): parent_groups[child_group] = [] for parent_group, subgroups in child_groups.items(): if child_group in subgroups: parent_groups[child_group].append(parent_group) order = [] while True: top_level_group = None for group, parents in parent_groups.items(): if parents: continue else: top_level_group = group break if not top_level_group: if parent_groups: raise RuntimeError( _("encountered subgroup loop that should have been detected") ) else: break order.append(top_level_group) del parent_groups[top_level_group] for group in parent_groups.keys(): if top_level_group in parent_groups[group]: parent_groups[group].remove(top_level_group) return order def format_item_result(result, node, bundle, item, interactive=False, details=None): if details is True: details_text = "({})".format(_("create")) elif details is False: details_text = "({})".format(_("remove")) elif details is None: details_text = "" elif result == Item.STATUS_SKIPPED: details_text = "({})".format(Item.SKIP_REASON_DESC[details]) else: details_text = "({})".format(", ".join(sorted(details))) if result == Item.STATUS_FAILED: return "{x} {node} {bundle} {item} {status} {details}".format( bundle=bold(bundle), details=details_text, item=item, node=bold(node), status=red(_("failed")), x=bold(red("✘")), ) elif result == Item.STATUS_ACTION_SUCCEEDED: return "{x} {node} {bundle} {item} {status}".format( bundle=bold(bundle), item=item, node=bold(node), status=green(_("succeeded")), x=bold(green("✓")), ) elif result == Item.STATUS_SKIPPED: return "{x} {node} {bundle} {item} {status} {details}".format( bundle=bold(bundle), details=details_text, item=item, node=bold(node), x=bold(yellow("»")), status=yellow(_("skipped")), ) elif result == Item.STATUS_FIXED: return "{x} {node} {bundle} {item} {status} {details}".format( bundle=bold(bundle), details=details_text, item=item, node=bold(node), x=bold(green("✓")), status=green(_("fixed")), ) class Node(object): OS_FAMILY_BSD = ( 'freebsd', 'macos', 'netbsd', 'openbsd', ) OS_FAMILY_DEBIAN = ( 'debian', 'ubuntu', 'raspbian', ) OS_FAMILY_REDHAT = ( 'rhel', 'centos', 'fedora', 'oraclelinux', ) OS_FAMILY_LINUX = ( 'amazonlinux', 'arch', 'opensuse', 'openwrt', 'gentoo', 'linux', ) + \ OS_FAMILY_DEBIAN + \ OS_FAMILY_REDHAT OS_FAMILY_UNIX = OS_FAMILY_BSD + OS_FAMILY_LINUX OS_KNOWN = OS_FAMILY_UNIX + ('kubernetes',) def __init__(self, name, attributes=None): if attributes is None: attributes = {} if not validate_name(name): raise RepositoryError(_("'{}' is not a valid node name").format(name)) self._add_host_keys = environ.get('BW_ADD_HOST_KEYS', False) == "1" self._bundles = attributes.get('bundles', []) self._compiling_metadata = Lock() self._dynamic_group_lock = Lock() self._dynamic_groups_resolved = False # None means we're currently doing it self._metadata_so_far = {} self._node_metadata = attributes.get('metadata', {}) self._ssh_conn_established = False self._ssh_first_conn_lock = Lock() self._template_node_name = attributes.get('template_node') self.hostname = attributes.get('hostname', name) self.name = name for attr in GROUP_ATTR_DEFAULTS: setattr(self, "_{}".format(attr), attributes.get(attr)) def __lt__(self, other): return self.name < other.name def __repr__(self): return "".format(self.name) @cached_property def bundles(self): if self._dynamic_group_lock.acquire(False): self._dynamic_group_lock.release() else: raise RepositoryError(_( "node bundles cannot be queried with members_add/remove" )) with io.job(_("{node} loading bundles").format(node=bold(self.name))): added_bundles = [] found_bundles = [] for group in self.groups: for bundle_name in group.bundle_names: found_bundles.append(bundle_name) for bundle_name in found_bundles + list(self._bundles): if bundle_name not in added_bundles: added_bundles.append(bundle_name) try: yield Bundle(self, bundle_name) except NoSuchBundle: raise NoSuchBundle(_( "Node '{node}' wants bundle '{bundle}', but it doesn't exist." ).format( bundle=bundle_name, node=self.name, )) @cached_property def cdict(self): node_dict = {} for item in self.items: try: node_dict[item.id] = item.hash() except AttributeError: # actions have no cdict pass return node_dict def covered_by_autoskip_selector(self, autoskip_selector): """ True if this node should be skipped based on the given selector string (e.g. "node:foo,group:bar"). """ components = [c.strip() for c in autoskip_selector.split(",")] if "node:{}".format(self.name) in components: return True for group in self.groups: if "group:{}".format(group.name) in components: return True return False def group_membership_hash(self): return hash_statedict(sorted(names(self.groups))) @cached_property @io.job_wrapper(_("{} determining groups").format(bold("{0.name}"))) def groups(self): _groups = set(self.repo._static_groups_for_node(self)) # lock to avoid infinite recursion when .members_add/remove # use stuff like node.in_group() that in turn calls this function if self._dynamic_group_lock.acquire(False): cache_result = True self._dynamic_groups_resolved = None # first we remove ourselves from all static groups whose # .members_remove matches us for group in list(_groups): if group.members_remove is not None and group.members_remove(self): try: _groups.remove(group) except KeyError: pass # now add all groups whose .members_add (but not .members_remove) # matches us _groups = _groups.union(self._groups_dynamic) self._dynamic_groups_resolved = True self._dynamic_group_lock.release() else: cache_result = False # we have to add parent groups at the very end, since we might # have added or removed subgroups thru .members_add/remove while True: # Since we're only looking at *immediate* parent groups, # we have to keep doing this until we stop adding parent # groups. _original_groups = _groups.copy() for group in list(_groups): for parent_group in group.immediate_parent_groups: if cache_result: with self._dynamic_group_lock: self._dynamic_groups_resolved = None if ( not parent_group.members_remove or not parent_group.members_remove(self) ): _groups.add(parent_group) self._dynamic_groups_resolved = True else: _groups.add(parent_group) if _groups == _original_groups: # we didn't add any new parent groups, so we can stop break if cache_result: return sorted(_groups) else: raise DontCache(sorted(_groups)) @property def _groups_dynamic(self): """ Returns all groups whose members_add matches this node. """ _groups = set() for group in self.repo.groups: if group.members_add is not None and group.members_add(self): _groups.add(group) if group.members_remove is not None and group.members_remove(self): try: _groups.remove(group) except KeyError: pass return _groups def has_any_bundle(self, bundle_list): for bundle_name in bundle_list: if self.has_bundle(bundle_name): return True return False def has_bundle(self, bundle_name): for bundle in self.bundles: if bundle.name == bundle_name: return True return False def hash(self): return hash_statedict(self.cdict) def in_any_group(self, group_list): for group_name in group_list: if self.in_group(group_name): return True return False def in_group(self, group_name): for group in self.groups: if group.name == group_name: return True return False @cached_property def items(self): if not self.dummy: for bundle in self.bundles: for item in bundle.items: yield item @cached_property def magic_number(self): return int(md5(self.name.encode('UTF-8')).hexdigest(), 16) def apply( self, autoskip_selector="", autoonly_selector="", interactive=False, force=False, skip_list=tuple(), workers=4, ): if not list(self.items): io.stdout(_("{x} {node} has no items").format( node=bold(self.name), x=yellow("»"), )) return None if self.covered_by_autoskip_selector(autoskip_selector): io.stdout(_("{x} {node} skipped by --skip").format( node=bold(self.name), x=yellow("»"), )) return None if self.name in skip_list: io.stdout(_("{x} {node} skipped by --resume-file").format( node=bold(self.name), x=yellow("»"), )) return None try: self.repo.hooks.node_apply_start( self.repo, self, interactive=interactive, ) except SkipNode as exc: io.stdout(_("{x} {node} skipped by hook ({reason})").format( node=bold(self.name), reason=str(exc) or _("no reason given"), x=yellow("»"), )) return None start = datetime.now() io.stdout(_("{x} {node} {started} at {time}").format( node=bold(self.name), started=bold(_("started")), time=start.strftime("%Y-%m-%d %H:%M:%S"), x=blue("i"), )) error = False try: # Running "true" is meant to catch connection errors early, # but this only works on UNIX-y systems (i.e., not k8s). if self.os in self.OS_FAMILY_UNIX: self.run("true") except RemoteException as exc: io.stdout(_("{x} {node} Connection error: {msg}").format( msg=exc, node=bold(self.name), x=red("!"), )) error = _("Connection error (details above)") item_results = [] else: try: with NodeLock(self, interactive=interactive, ignore=force) as lock: item_results = apply_items( self, autoskip_selector=autoskip_selector, autoonly_selector=autoonly_selector, my_soft_locks=lock.my_soft_locks, other_peoples_soft_locks=lock.other_peoples_soft_locks, workers=workers, interactive=interactive, ) except NodeLockedException as e: if not interactive: io.stderr(_( "{x} {node} already locked by {user} at {date} ({duration} ago, " "`bw apply -f` to override)" ).format( date=bold(e.args[0]['date']), duration=e.args[0]['duration'], node=bold(self.name), user=bold(e.args[0]['user']), x=red("!"), )) error = _("Node locked (details above)") item_results = [] result = ApplyResult(self, item_results) result.start = start result.end = datetime.now() io.stdout(_("{x} {node} {completed} after {time} ({stats})").format( completed=bold(_("completed")), node=bold(self.name), stats=format_node_result(result), time=format_duration(result.end - start), x=blue("i"), )) self.repo.hooks.node_apply_end( self.repo, self, duration=result.duration, interactive=interactive, result=result, ) if error: raise GracefulApplyException(error) else: return result def download(self, remote_path, local_path): return operations.download( self.hostname, remote_path, local_path, add_host_keys=self._add_host_keys, wrapper_inner=self.cmd_wrapper_inner, wrapper_outer=self.cmd_wrapper_outer, ) def get_item(self, item_id): return find_item(item_id, self.items) @property def metadata(self): """ Returns full metadata for a node. MUST NOT be used from inside a metadata processor. Use .partial_metadata instead. """ if self._dynamic_groups_resolved is None: # return only metadata set directly at the node level if # we're still in the process of figuring out which groups # we belong to return self._node_metadata else: return self.repo._metadata_for_node(self.name, partial=False) @property def metadata_blame(self): return self.repo._metadata_for_node(self.name, partial=False, blame=True) def metadata_hash(self): return hash_metadata(self.metadata) @property def metadata_processors(self): for bundle in self.bundles: for metadata_processor in bundle.metadata_processors: yield ( "{}.{}".format( bundle.name, metadata_processor.__name__, ), metadata_processor, ) @property def partial_metadata(self): """ Only to be used from inside metadata processors. Can't use the normal .metadata there because it might deadlock when nodes have interdependent metadata. It's OK for metadata processors to work with partial metadata because they will be fed all metadata updates until no more changes are made by any metadata processor. """ return self.repo._metadata_for_node(self.name, partial=True) def run(self, command, data_stdin=None, may_fail=False, log_output=False): assert self.os in self.OS_FAMILY_UNIX if log_output: def log_function(msg): io.stdout("{x} {node} {msg}".format( node=bold(self.name), msg=force_text(msg).rstrip("\n"), x=cyan("›"), )) else: log_function = None if not self._ssh_conn_established: # Sometimes we're opening SSH connections to a node too fast # for OpenSSH to establish the ControlMaster socket for the # second and following connections to use. # To prevent this, we just wait until a first dummy command # has completed on the node before trying to reuse the # multiplexed connection. if self._ssh_first_conn_lock.acquire(False): try: with io.job(_("{} establishing connection...").format(bold(self.name))): operations.run(self.hostname, "true", add_host_keys=self._add_host_keys) self._ssh_conn_established = True finally: self._ssh_first_conn_lock.release() else: # we didn't get the lock immediately, now we just wait # until it is released before we proceed with self._ssh_first_conn_lock: pass return operations.run( self.hostname, command, add_host_keys=self._add_host_keys, data_stdin=data_stdin, ignore_failure=may_fail, log_function=log_function, wrapper_inner=self.cmd_wrapper_inner, wrapper_outer=self.cmd_wrapper_outer, ) @property def template_node(self): if not self._template_node_name: return None else: target_node = self.repo.get_node(self._template_node_name) if target_node._template_node_name: raise RepositoryError(_( "{template_node} cannot use template_node because {node} uses {template_node} " "as template_node" ).format(node=self.name, template_node=target_node.name)) else: return target_node def upload(self, local_path, remote_path, mode=None, owner="", group="", may_fail=False): assert self.os in self.OS_FAMILY_UNIX return operations.upload( self.hostname, local_path, remote_path, add_host_keys=self._add_host_keys, group=group, mode=mode, owner=owner, ignore_failure=may_fail, wrapper_inner=self.cmd_wrapper_inner, wrapper_outer=self.cmd_wrapper_outer, ) def verify(self, show_all=False, workers=4): result = [] start = datetime.now() if not self.items: io.stdout(_("{x} {node} has no items").format(node=bold(self.name), x=yellow("!"))) else: result = verify_items(self, show_all=show_all, workers=workers) return { 'good': result.count(True), 'bad': result.count(False), 'unknown': result.count(None), 'duration': datetime.now() - start, } def build_attr_property(attr, default): def method(self): attr_source = None attr_value = None group_order = [ self.repo.get_group(group_name) for group_name in _flatten_group_hierarchy(self.groups) ] for group in group_order: if getattr(group, attr) is not None: attr_source = "group:{}".format(group.name) attr_value = getattr(group, attr) if self.template_node: attr_source = "template_node" attr_value = getattr(self.template_node, attr) if getattr(self, "_{}".format(attr)) is not None: attr_source = "node" attr_value = getattr(self, "_{}".format(attr)) if attr_value is None: attr_source = "default" attr_value = default io.debug(_("node {node} gets its {attr} attribute from: {source}").format( node=self.name, attr=attr, source=attr_source, )) if self._dynamic_groups_resolved: return attr_value else: raise DontCache(attr_value) method.__name__ = str("_group_attr_{}".format(attr)) # required for cached_property # str() for Python 2 compatibility return cached_property(method) for attr, default in GROUP_ATTR_DEFAULTS.items(): setattr(Node, attr, build_attr_property(attr, default)) def verify_items(node, show_all=False, workers=1): items = [] for item in node.items: if not item.triggered: items.append(item) elif not isinstance(item, DummyItem): io.progress_advance() try: # See comment in node.apply(). if node.os in node.OS_FAMILY_UNIX: node.run("true") except RemoteException as exc: io.stdout(_("{x} {node} Connection error: {msg}").format( msg=exc, node=bold(node.name), x=red("!"), )) for item in items: io.progress_advance() return [None for item in items] def tasks_available(): return bool(items) def next_task(): while True: try: item = items.pop() except IndexError: return None if item._faults_missing_for_attributes: if item.error_on_missing_fault: item._raise_for_faults() else: io.progress_advance() io.stdout(_("{x} {node} {bundle} {item} ({msg})").format( bundle=bold(item.bundle.name), item=item.id, msg=yellow(_("Fault unavailable")), node=bold(node.name), x=yellow("»"), )) else: return { 'task_id': node.name + ":" + item.bundle.name + ":" + item.id, 'target': item.verify, } def handle_exception(task_id, exception, traceback): node_name, bundle_name, item_id = task_id.split(":", 2) io.progress_advance() if isinstance(exception, NotImplementedError): io.stdout(_("{x} {node} {bundle} {item} (does not support verify)").format( bundle=bold(bundle_name), item=item_id, node=bold(node_name), x=cyan("?"), )) else: # Unlike with `bw apply`, it is OK for `bw verify` to encounter # exceptions when getting an item's status. `bw verify` doesn't # care about dependencies and therefore cannot know that looking # up a database user requires the database to be installed in # the first place. io.debug("exception while verifying {}:".format(task_id)) io.debug(traceback) io.debug(repr(exception)) io.stdout(_("{x} {node} {bundle} {item} (unable to get status, check --debug for details)").format( bundle=bold(bundle_name), item=item_id, node=bold(node_name), x=cyan("?"), )) return None # count this result as "unknown" def handle_result(task_id, return_value, duration): io.progress_advance() unless_result, item_status = return_value node_name, bundle_name, item_id = task_id.split(":", 2) if not unless_result and not item_status.correct: if item_status.must_be_created: details_text = _("create") elif item_status.must_be_deleted: details_text = _("remove") else: details_text = ", ".join(sorted(item_status.display_keys_to_fix)) io.stderr("{x} {node} {bundle} {item} ({details})".format( bundle=bold(bundle_name), details=details_text, item=item_id, node=bold(node_name), x=red("✘"), )) return False else: if show_all: io.stdout("{x} {node} {bundle} {item}".format( bundle=bold(bundle_name), item=item_id, node=bold(node_name), x=green("✓"), )) return True worker_pool = WorkerPool( tasks_available, next_task, handle_result, handle_exception=handle_exception, pool_id="verify_{}".format(node.name), workers=workers, ) return worker_pool.run() bundlewrap-3.8.0/bundlewrap/operations.py000066400000000000000000000234771360562404000206020ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime from pipes import quote from select import select from shlex import split from subprocess import Popen, PIPE from threading import Event, Thread from os import close, environ, pipe, read, setpgrp from .exceptions import RemoteException from .utils import cached_property from .utils.text import force_text, LineBuffer, mark_for_translation as _, randstr from .utils.ui import io def output_thread_body(line_buffer, read_fd, quit_event, read_until_eof): # see run() for details while True: r, w, x = select([read_fd], [], [], 0.1) if r: chunk = read(read_fd, 1024) if chunk: line_buffer.write(chunk) else: # EOF return elif quit_event.is_set() and not read_until_eof: # one last chance to read output after the child process # has died while True: r, w, x = select([read_fd], [], [], 0) if r: line_buffer.write(read(read_fd, 1024)) else: break return def download( hostname, remote_path, local_path, add_host_keys=False, wrapper_inner="{}", wrapper_outer="{}", ): """ Download a file. """ io.debug(_("downloading {host}:{path} -> {target}").format( host=hostname, path=remote_path, target=local_path)) result = run( hostname, "cat {}".format(quote(remote_path)), # See issue #39. add_host_keys=add_host_keys, wrapper_inner=wrapper_inner, wrapper_outer=wrapper_outer, ) if result.return_code == 0: with open(local_path, "wb") as f: f.write(result.stdout) else: raise RemoteException(_( "reading file '{path}' on {host} failed: {error}" ).format( error=force_text(result.stderr) + force_text(result.stdout), host=hostname, path=remote_path, )) class RunResult(object): def __init__(self): self.duration = None self.return_code = None self.stderr = None self.stdout = None @cached_property def stderr_text(self): return force_text(self.stderr) @cached_property def stdout_text(self): return force_text(self.stdout) def run_local( command, data_stdin=None, log_function=None, shell=False, ): """ Runs a command on the local system. """ # LineBuffer objects take care of always printing complete lines # which have been properly terminated by a newline. This is only # relevant when using `bw run`. # Does nothing when log_function is None. stderr_lb = LineBuffer(log_function) stdout_lb = LineBuffer(log_function) # Create pipes which will be used by the SSH child process. We do # not use subprocess.PIPE because we need to be able to continuously # check those pipes for new output, so we can feed it to the # LineBuffers during `bw run`. stdout_fd_r, stdout_fd_w = pipe() stderr_fd_r, stderr_fd_w = pipe() cmd_id = randstr(length=4).upper() io.debug("running command with ID {}: {}".format(cmd_id, " ".join(command))) start = datetime.utcnow() # Launch the child process. It's important that SSH gets a dummy # stdin, i.e. it must *not* read from the terminal. Otherwise, it # can steal user input. child_process = Popen( command, preexec_fn=setpgrp, shell=shell, stdin=PIPE, stderr=stderr_fd_w, stdout=stdout_fd_w, ) io._child_pids.append(child_process.pid) if data_stdin is not None: child_process.stdin.write(data_stdin) quit_event = Event() stdout_thread = Thread( args=(stdout_lb, stdout_fd_r, quit_event, True), target=output_thread_body, ) stderr_thread = Thread( args=(stderr_lb, stderr_fd_r, quit_event, False), target=output_thread_body, ) stdout_thread.start() stderr_thread.start() try: child_process.communicate() finally: # Once we end up here, the child process has terminated. # # Now, the big question is: Why do we need an Event here? # # Problem is, a user could use SSH multiplexing with # auto-forking (e.g., "ControlPersist 10m"). In this case, # OpenSSH forks another process which holds the "master" # connection. This forked process *inherits* our pipes (at least # for stderr). Thus, only when that master process finally # terminates (possibly after many minutes), we will be informed # about EOF on our stderr pipe. That doesn't work. bw will hang. # # So, instead, we use a busy loop in output_thread_body() which # checks for quit_event being set. Unfortunately there is no way # to be absolutely sure that we received all output from stderr # because we never get a proper EOF there. All we can do is hope # that all output has arrived on the reading end of the pipe by # the time the quit_event is checked in the thread. # # Luckily stdout is a somewhat simpler affair: we can just close # the writing end of the pipe, causing the reader thread to # shut down as it sees the EOF. io._child_pids.remove(child_process.pid) quit_event.set() close(stdout_fd_w) stdout_thread.join() stderr_thread.join() stdout_lb.close() stderr_lb.close() for fd in (stdout_fd_r, stderr_fd_r, stderr_fd_w): close(fd) io.debug("command with ID {} finished with return code {}".format( cmd_id, child_process.returncode, )) result = RunResult() result.duration = datetime.utcnow() - start result.stdout = stdout_lb.record.getvalue() result.stderr = stderr_lb.record.getvalue() result.return_code = child_process.returncode return result def run( hostname, command, add_host_keys=False, data_stdin=None, ignore_failure=False, raise_for_return_codes=( 126, # command not executable 127, # command not found 255, # SSH error ), log_function=None, wrapper_inner="{}", wrapper_outer="{}", ): """ Runs a command on a remote system. """ ssh_command = [ "ssh", "-o", "KbdInteractiveAuthentication=no", "-o", "PasswordAuthentication=no", "-o", "StrictHostKeyChecking=no" if add_host_keys else "StrictHostKeyChecking=yes", ] extra_args = environ.get("BW_SSH_ARGS", "").strip() if extra_args: ssh_command.extend(split(extra_args)) ssh_command.append(hostname) ssh_command.append(wrapper_outer.format(quote(wrapper_inner.format(command)))) result = run_local( ssh_command, data_stdin=data_stdin, log_function=log_function, ) if result.return_code != 0: error_msg = _( "Non-zero return code ({rcode}) running '{command}' " "on '{host}':\n\n{result}\n\n" ).format( command=command, host=hostname, rcode=result.return_code, result=force_text(result.stdout) + force_text(result.stderr), ) io.debug(error_msg) if not ignore_failure or result.return_code in raise_for_return_codes: raise RemoteException(error_msg) return result def upload( hostname, local_path, remote_path, add_host_keys=False, group="", mode=None, owner="", ignore_failure=False, wrapper_inner="{}", wrapper_outer="{}", ): """ Upload a file. """ io.debug(_("uploading {path} -> {host}:{target}").format( host=hostname, path=local_path, target=remote_path)) temp_filename = ".bundlewrap_tmp_" + randstr() scp_command = [ "scp", "-o", "StrictHostKeyChecking=no" if add_host_keys else "StrictHostKeyChecking=yes", ] extra_args = environ.get("BW_SCP_ARGS", environ.get("BW_SSH_ARGS", "")).strip() if extra_args: scp_command.extend(split(extra_args)) scp_command.append(local_path) scp_command.append("{}:{}".format(hostname, temp_filename)) scp_process = run_local(scp_command) if scp_process.return_code != 0: if ignore_failure: return False raise RemoteException(_( "Upload to {host} failed for {failed}:\n\n{result}\n\n" ).format( failed=remote_path, host=hostname, result=force_text(scp_process.stdout) + force_text(scp_process.stderr), )) if owner or group: if group: group = ":" + quote(group) result = run( hostname, "chown {}{} {}".format( quote(owner), group, quote(temp_filename), ), add_host_keys=add_host_keys, ignore_failure=ignore_failure, wrapper_inner=wrapper_inner, wrapper_outer=wrapper_outer, ) if result.return_code != 0: return False if mode: result = run( hostname, "chmod {} {}".format( mode, quote(temp_filename), ), add_host_keys=add_host_keys, ignore_failure=ignore_failure, wrapper_inner=wrapper_inner, wrapper_outer=wrapper_outer, ) if result.return_code != 0: return False result = run( hostname, "mv -f {} {}".format( quote(temp_filename), quote(remote_path), ), add_host_keys=add_host_keys, ignore_failure=ignore_failure, wrapper_inner=wrapper_inner, wrapper_outer=wrapper_outer, ) return result.return_code == 0 bundlewrap-3.8.0/bundlewrap/plugins.py000066400000000000000000000156571360562404000201010ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from json import dumps, loads from os import chmod, remove from os.path import exists, join from stat import S_IREAD, S_IRGRP, S_IROTH from requests import get from .exceptions import NoSuchPlugin, PluginError, PluginLocalConflict from .utils import download, hash_local_file from .utils.text import mark_for_translation as _ from .utils.ui import io BASE_URL = "https://raw.githubusercontent.com/bundlewrap/plugins/master" class PluginManager(object): def __init__(self, path, base_url=BASE_URL): self.base_url = base_url self.path = path if exists(join(self.path, "plugins.json")): with open(join(self.path, "plugins.json")) as f: self.plugin_db = loads(f.read()) else: self.plugin_db = {} @property def index(self): return get( "{}/index.json".format(self.base_url) ).json() def install(self, plugin, force=False): if plugin in self.plugin_db: raise PluginError(_("plugin '{plugin}' is already installed").format(plugin=plugin)) manifest = self.manifest_for_plugin(plugin) for file in manifest['provides']: target_path = join(self.path, file) if exists(target_path) and not force: raise PluginLocalConflict(_( "cannot install '{plugin}' because it provides " "'{path}' which already exists" ).format(path=target_path, plugin=plugin)) url = "{}/{}/{}".format(self.base_url, plugin, file) download(url, target_path) # make file read-only to discourage users from editing them # which will block future updates of the plugin chmod(target_path, S_IREAD | S_IRGRP | S_IROTH) self.record_as_installed(plugin, manifest) return manifest def list(self): for plugin, info in self.plugin_db.items(): yield (plugin, info['version']) def local_modifications(self, plugin): try: plugin_data = self.plugin_db[plugin] except KeyError: raise NoSuchPlugin(_( "The plugin '{plugin}' is not installed." ).format(plugin=plugin)) local_changes = [] for filename, checksum in plugin_data['files'].items(): target_path = join(self.path, filename) actual_checksum = hash_local_file(target_path) if actual_checksum != checksum: local_changes.append(( target_path, actual_checksum, checksum, )) return local_changes def manifest_for_plugin(self, plugin): r = get( "{}/{}/manifest.json".format(self.base_url, plugin) ) if r.status_code == 404: raise NoSuchPlugin(plugin) else: return r.json() def record_as_installed(self, plugin, manifest): file_hashes = {} for file in manifest['provides']: target_path = join(self.path, file) file_hashes[file] = hash_local_file(target_path) self.plugin_db[plugin] = { 'files': file_hashes, 'version': manifest['version'], } self.write_db() def remove(self, plugin, force=False): if plugin not in self.plugin_db: raise NoSuchPlugin(_("plugin '{plugin}' is not installed").format(plugin=plugin)) for file, db_checksum in self.plugin_db[plugin]['files'].items(): file_path = join(self.path, file) if not exists(file_path): continue current_checksum = hash_local_file(file_path) if db_checksum != current_checksum and not force: io.stderr(_( "not removing '{path}' because it has been modified since installation" ).format(path=file_path)) continue remove(file_path) del self.plugin_db[plugin] self.write_db() def search(self, term): term = term.lower() for plugin_name, plugin_data in self.index.items(): if term in plugin_name.lower() or term in plugin_data['desc'].lower(): yield (plugin_name, plugin_data['desc']) def update(self, plugin, check_only=False, force=False): if plugin not in self.plugin_db: raise PluginError(_("plugin '{plugin}' is not installed").format(plugin=plugin)) # before updating anything, we need to check for local modifications local_changes = self.local_modifications(plugin) if local_changes and not force: files = [path for path, c1, c2 in local_changes] raise PluginLocalConflict(_( "cannot update '{plugin}' because the following files have been modified locally:" "\n{files}" ).format(files="\n".join(files), plugin=plugin)) manifest = self.manifest_for_plugin(plugin) for file in manifest['provides']: file_path = join(self.path, file) if exists(file_path) and file not in self.plugin_db[plugin]['files'] and not force: # new version added a file that already existed locally raise PluginLocalConflict(_( "cannot update '{plugin}' because it would overwrite '{path}'" ).format(path=file, plugin=plugin)) old_version = self.plugin_db[plugin]['version'] new_version = manifest['version'] if not check_only and old_version != new_version: # actually install files for file in manifest['provides']: target_path = join(self.path, file) url = "{}/{}/{}".format(self.base_url, plugin, file) download(url, target_path) # make file read-only to discourage users from editing them # which will block future updates of the plugin chmod(target_path, S_IREAD | S_IRGRP | S_IROTH) # check for files that have been removed in the new version for file, db_checksum in self.plugin_db[plugin]['files'].items(): if file not in manifest['provides']: file_path = join(self.path, file) current_checksum = hash_local_file(file_path) if db_checksum != current_checksum and not force: io.stderr(_( "not removing '{path}' because it has been modified since installation" ).format(path=file_path)) continue remove(file_path) self.record_as_installed(plugin, manifest) return (old_version, new_version) def write_db(self): with open(join(self.path, "plugins.json"), 'w') as f: f.write(dumps(self.plugin_db, indent=4, sort_keys=True)) f.write("\n") bundlewrap-3.8.0/bundlewrap/repo.py000066400000000000000000000625151360562404000173600ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from imp import load_source from inspect import isabstract from os import listdir, mkdir from os.path import isdir, isfile, join from threading import Lock from pkg_resources import DistributionNotFound, require, VersionConflict from . import items, utils, VERSION_STRING from .bundle import FILENAME_BUNDLE from .exceptions import ( NoSuchGroup, NoSuchNode, NoSuchRepository, MissingRepoDependency, RepositoryError, ) from .group import Group from .metadata import ( blame_changed_paths, check_metadata_processor_result, deepcopy_metadata, DEFAULTS, DONE, OVERWRITE, ) from .node import _flatten_group_hierarchy, Node from .secrets import FILENAME_SECRETS, generate_initial_secrets_cfg, SecretProxy from .utils import cached_property, names from .utils.scm import get_git_branch, get_git_clean, get_rev from .utils.dicts import hash_statedict, merge_dict from .utils.text import bold, mark_for_translation as _, red, validate_name from .utils.ui import io, QUIT_EVENT DIRNAME_BUNDLES = "bundles" DIRNAME_DATA = "data" DIRNAME_HOOKS = "hooks" DIRNAME_ITEM_TYPES = "items" DIRNAME_LIBS = "libs" FILENAME_GROUPS = "groups.py" FILENAME_NODES = "nodes.py" FILENAME_REQUIREMENTS = "requirements.txt" HOOK_EVENTS = ( 'action_run_end', 'action_run_start', 'apply_end', 'apply_start', 'item_apply_end', 'item_apply_start', 'lock_add', 'lock_remove', 'lock_show', 'node_apply_end', 'node_apply_start', 'node_run_end', 'node_run_start', 'run_end', 'run_start', 'test', 'test_node', ) INITIAL_CONTENT = { FILENAME_GROUPS: _(""" groups = { #'group-1': { # 'bundles': ( # 'bundle-1', # ), # 'members': ( # 'node-1', # ), # 'subgroups': ( # 'group-2', # ), #}, 'all': { 'member_patterns': ( r".*", ), }, } """), FILENAME_NODES: _(""" nodes = { 'node-1': { 'hostname': "localhost", }, } """), FILENAME_REQUIREMENTS: "bundlewrap>={}\n".format(VERSION_STRING), FILENAME_SECRETS: generate_initial_secrets_cfg, } def groups_from_file(filepath, libs, repo_path, vault): """ Returns all groups as defined in the given groups.py. """ try: flat_group_dict = utils.getattr_from_file( filepath, 'groups', base_env={ 'libs': libs, 'repo_path': repo_path, 'vault': vault, }, ) except KeyError: raise RepositoryError(_( "{} must define a 'groups' variable" ).format(filepath)) for groupname, infodict in flat_group_dict.items(): yield Group(groupname, infodict) class HooksProxy(object): def __init__(self, path): self.__hook_cache = {} self.__module_cache = {} self.__path = path self.__registered_hooks = None def __getattr__(self, attrname): if attrname not in HOOK_EVENTS: raise AttributeError if self.__registered_hooks is None: self._register_hooks() event = attrname if event not in self.__hook_cache: # build a list of files that define a hook for the event files = [] for filename, events in self.__registered_hooks.items(): if event in events: files.append(filename) # define a function that calls all hook functions def hook(*args, **kwargs): for filename in files: self.__module_cache[filename][event](*args, **kwargs) self.__hook_cache[event] = hook return self.__hook_cache[event] def _register_hooks(self): """ Builds an internal dictionary of defined hooks. Priming __module_cache here is just a performance shortcut and could be left out. """ self.__registered_hooks = {} if not isdir(self.__path): return for filename in listdir(self.__path): filepath = join(self.__path, filename) if not filename.endswith(".py") or \ not isfile(filepath) or \ filename.startswith("_"): continue self.__module_cache[filename] = {} self.__registered_hooks[filename] = [] for name, obj in utils.get_all_attrs_from_file(filepath).items(): if name not in HOOK_EVENTS: continue self.__module_cache[filename][name] = obj self.__registered_hooks[filename].append(name) def items_from_path(path): """ Looks for Item subclasses in the given path. An alternative method would involve metaclasses (as Django does it), but then it gets very hard to have two separate repos in the same process, because both of them would register config item classes globally. """ if not isdir(path): return for filename in listdir(path): filepath = join(path, filename) if not filename.endswith(".py") or \ not isfile(filepath) or \ filename.startswith("_"): continue for name, obj in \ utils.get_all_attrs_from_file(filepath).items(): if obj == items.Item or name.startswith("_"): continue try: if issubclass(obj, items.Item) and not isabstract(obj): yield obj except TypeError: pass class LibsProxy(object): def __init__(self, path): self.__module_cache = {} self.__path = path def __getattr__(self, attrname): if attrname.startswith("__") and attrname.endswith("__"): raise AttributeError(attrname) if attrname not in self.__module_cache: filename = attrname + ".py" filepath = join(self.__path, filename) try: m = load_source('bundlewrap.repo.libs_{}'.format(attrname), filepath) except: io.stderr(_("Exception while trying to load {}:").format(filepath)) raise self.__module_cache[attrname] = m return self.__module_cache[attrname] def nodes_from_file(filepath, libs, repo_path, vault): """ Returns a list of nodes as defined in the given nodes.py. """ try: flat_node_dict = utils.getattr_from_file( filepath, 'nodes', base_env={ 'libs': libs, 'repo_path': repo_path, 'vault': vault, }, ) except KeyError: raise RepositoryError( _("{} must define a 'nodes' variable").format(filepath) ) for nodename, infodict in flat_node_dict.items(): yield Node(nodename, infodict) class Repository(object): def __init__(self, repo_path=None): self.path = "/dev/null" if repo_path is None else repo_path self._set_path(self.path) self.bundle_names = [] self.group_dict = {} self.node_dict = {} self._node_metadata_blame = {} self._node_metadata_complete = {} self._node_metadata_partial = {} self._node_metadata_static_complete = set() self._node_metadata_lock = Lock() if repo_path is not None: self.populate_from_path(repo_path) else: self.item_classes = list(items_from_path(items.__path__[0])) def __eq__(self, other): if self.path == "/dev/null": # in-memory repos are never equal return False return self.path == other.path def __repr__(self): return "".format(self.path) @staticmethod def is_repo(path): """ Validates whether the given path is a bundlewrap repository. """ try: assert isdir(path) assert isfile(join(path, "nodes.py")) assert isfile(join(path, "groups.py")) except AssertionError: return False return True def add_group(self, group): """ Adds the given group object to this repo. """ if group.name in utils.names(self.nodes): raise RepositoryError(_("you cannot have a node and a group " "both named '{}'").format(group.name)) if group.name in utils.names(self.groups): raise RepositoryError(_("you cannot have two groups " "both named '{}'").format(group.name)) group.repo = self self.group_dict[group.name] = group def add_node(self, node): """ Adds the given node object to this repo. """ if node.name in utils.names(self.groups): raise RepositoryError(_("you cannot have a node and a group " "both named '{}'").format(node.name)) if node.name in utils.names(self.nodes): raise RepositoryError(_("you cannot have two nodes " "both named '{}'").format(node.name)) node.repo = self self.node_dict[node.name] = node @cached_property def branch(self): return get_git_branch() @cached_property def cdict(self): repo_dict = {} for node in self.nodes: repo_dict[node.name] = node.hash() return repo_dict @cached_property def clean(self): return get_git_clean() @classmethod def create(cls, path): """ Creates and returns a repository at path, which must exist and be empty. """ for filename, content in INITIAL_CONTENT.items(): if callable(content): content = content() with open(join(path, filename), 'w') as f: f.write(content.strip() + "\n") mkdir(join(path, DIRNAME_BUNDLES)) mkdir(join(path, DIRNAME_ITEM_TYPES)) return cls(path) def create_bundle(self, bundle_name): """ Creates an empty bundle. """ if not validate_name(bundle_name): raise ValueError(_("'{}' is not a valid bundle name").format(bundle_name)) bundle_dir = join(self.bundles_dir, bundle_name) # deliberately not using makedirs() so this will raise an # exception if the directory exists mkdir(bundle_dir) mkdir(join(bundle_dir, "files")) open(join(bundle_dir, FILENAME_BUNDLE), 'a').close() def create_node(self, node_name): """ Creates an adhoc node with the given name. """ node = Node(node_name) self.add_node(node) return node def get_group(self, group_name): try: return self.group_dict[group_name] except KeyError: raise NoSuchGroup(group_name) def get_node(self, node_name): try: return self.node_dict[node_name] except KeyError: raise NoSuchNode(node_name) def group_membership_hash(self): return hash_statedict(sorted(names(self.groups))) @property def groups(self): return sorted(self.group_dict.values()) def _static_groups_for_node(self, node): for group in self.groups: if node in group._static_nodes: yield group def hash(self): return hash_statedict(self.cdict) @property def nodes(self): return sorted(self.node_dict.values()) def nodes_in_all_groups(self, group_names): """ Returns a list of nodes where every node is a member of every group given. """ base_group = set(self.get_group(group_names[0]).nodes) for group_name in group_names[1:]: if not base_group: # quit early if we have already eliminated every node break base_group.intersection_update(set(self.get_group(group_name).nodes)) result = list(base_group) result.sort() return result def nodes_in_any_group(self, group_names): """ Returns all nodes that are a member of at least one of the given groups. """ for node in self.nodes: if node.in_any_group(group_names): yield node def nodes_in_group(self, group_name): """ Returns a list of nodes in the given group. """ return self.nodes_in_all_groups([group_name]) def _metadata_for_node(self, node_name, partial=False, blame=False): """ Returns full or partial metadata for this node. Partial metadata may only be requested from inside a metadata processor. If necessary, this method will build complete metadata for this node and all related nodes. Related meaning nodes that this node depends on in one of its metadata processors. """ try: return self._node_metadata_complete[node_name] except KeyError: pass if partial: self._node_metadata_partial.setdefault(node_name, {}) return self._node_metadata_partial[node_name] with self._node_metadata_lock: try: # maybe our metadata got completed while waiting for the lock return self._node_metadata_complete[node_name] except KeyError: pass self._node_metadata_partial[node_name] = {} self._build_node_metadata(blame=blame) # now that we have completed all metadata for this # node and all related nodes, copy that data over # to the complete dict self._node_metadata_complete.update(self._node_metadata_partial) # reset temporary vars self._node_metadata_partial = {} self._node_metadata_static_complete = set() if blame: return self._node_metadata_blame[node_name] else: return self._node_metadata_complete[node_name] def _build_node_metadata(self, blame=False): """ Builds complete metadata for all nodes that appear in self._node_metadata_partial.keys(). """ # these processors have indicated that they do not need to be run again blacklisted_metaprocs = set() while not QUIT_EVENT.is_set(): # First, get the static metadata out of the way for node_name in list(self._node_metadata_partial): if QUIT_EVENT.is_set(): break node = self.get_node(node_name) node_blame = self._node_metadata_blame.setdefault(node_name, {}) # check if static metadata for this node is already done if node_name in self._node_metadata_static_complete: continue else: self._node_metadata_static_complete.add(node_name) with io.job(_("{node} building group metadata").format(node=bold(node.name))): group_order = _flatten_group_hierarchy(node.groups) for group_name in group_order: new_metadata = merge_dict( self._node_metadata_partial[node.name], self.get_group(group_name).metadata, ) if blame: blame_changed_paths( self._node_metadata_partial[node.name], new_metadata, node_blame, "group:{}".format(group_name), ) self._node_metadata_partial[node.name] = new_metadata with io.job(_("{node} merging node metadata").format(node=bold(node.name))): # deepcopy_metadata is important here because up to this point # different nodes from the same group might still share objects # nested deeply in their metadata. This becomes a problem if we # start messing with these objects in metadata processors. Every # time we would edit one of these objects, the changes would be # shared amongst multiple nodes. for source_node in (node.template_node, node): if not source_node: # template_node might be None continue new_metadata = deepcopy_metadata(merge_dict( self._node_metadata_partial[node.name], source_node._node_metadata, )) if blame: blame_changed_paths( self._node_metadata_partial[node.name], new_metadata, node_blame, "node:{}".format(source_node.name), ) self._node_metadata_partial[node.name] = new_metadata # Now for the interesting part: We run all metadata processors # until none of them return DONE anymore (indicating that they're # just waiting for another metaproc to maybe insert new data, # which isn't happening if none return DONE) metaproc_returned_DONE = False for node_name in list(self._node_metadata_partial): if QUIT_EVENT.is_set(): break node = self.get_node(node_name) node_blame = self._node_metadata_blame[node_name] with io.job(_("{node} running metadata processors").format(node=bold(node.name))): for metadata_processor_name, metadata_processor in node.metadata_processors: if (node_name, metadata_processor_name) in blacklisted_metaprocs: continue io.debug(_( "running metadata processor {metaproc} for node {node}" ).format( metaproc=metadata_processor_name, node=node.name, )) if blame: # We need to deepcopy here because otherwise we have no chance of # figuring out what changed... input_metadata = deepcopy_metadata(self._node_metadata_partial[node.name]) else: # ...but we can't always do it for performance reasons. input_metadata = self._node_metadata_partial[node.name] try: processed = metadata_processor(input_metadata) except Exception as exc: io.stderr(_( "{x} Exception while executing metadata processor " "{metaproc} for node {node}:" ).format( x=red("!!!"), metaproc=metadata_processor_name, node=node.name, )) raise exc processed_dict, options = check_metadata_processor_result( input_metadata, processed, node.name, metadata_processor_name, ) if DONE in options: io.debug(_( "metadata processor {metaproc} for node {node} " "has indicated that it need NOT be run again" ).format( metaproc=metadata_processor_name, node=node.name, )) blacklisted_metaprocs.add((node_name, metadata_processor_name)) metaproc_returned_DONE = True else: io.debug(_( "metadata processor {metaproc} for node {node} " "has indicated that it must be run again" ).format( metaproc=metadata_processor_name, node=node.name, )) blame_defaults = False if DEFAULTS in options: processed_dict = merge_dict( processed_dict, self._node_metadata_partial[node.name], ) blame_defaults = True elif OVERWRITE in options: processed_dict = merge_dict( self._node_metadata_partial[node.name], processed_dict, ) if blame: blame_changed_paths( self._node_metadata_partial[node.name], processed_dict, node_blame, "metadata_processor:{}".format(metadata_processor_name), defaults=blame_defaults, ) self._node_metadata_partial[node.name] = processed_dict if not metaproc_returned_DONE: if self._node_metadata_static_complete != set(self._node_metadata_partial.keys()): # During metadata processor execution, partial metadata may # have been requested for nodes we did not previously # consider. Since partial metadata may defaults to # just an empty dict, we still need to make sure to # generate static metadata for these new nodes, as # that may trigger additional runs of metadata # processors. continue else: break def metadata_hash(self): repo_dict = {} for node in self.nodes: repo_dict[node.name] = node.metadata_hash() return hash_statedict(repo_dict) def populate_from_path(self, path): if not self.is_repo(path): raise NoSuchRepository( _("'{}' is not a bundlewrap repository").format(path) ) if path != self.path: self._set_path(path) # check requirements.txt try: with open(join(path, FILENAME_REQUIREMENTS)) as f: lines = f.readlines() except: pass else: try: require(lines) except DistributionNotFound as exc: raise MissingRepoDependency(_( "{x} Python package '{pkg}' is listed in {filename}, but wasn't found. " "You probably have to install it with `pip install {pkg}`." ).format( filename=FILENAME_REQUIREMENTS, pkg=exc.req, x=red("!"), )) except VersionConflict as exc: raise MissingRepoDependency(_( "{x} Python package '{required}' is listed in {filename}, " "but only '{existing}' was found. " "You probably have to upgrade it with `pip install {required}`." ).format( existing=exc.dist, filename=FILENAME_REQUIREMENTS, required=exc.req, x=red("!"), )) self.vault = SecretProxy(self) # populate bundles self.bundle_names = [] for dir_entry in listdir(self.bundles_dir): if validate_name(dir_entry): self.bundle_names.append(dir_entry) # populate groups self.group_dict = {} for group in groups_from_file(self.groups_file, self.libs, self.path, self.vault): self.add_group(group) # populate items self.item_classes = list(items_from_path(items.__path__[0])) for item_class in items_from_path(self.items_dir): self.item_classes.append(item_class) # populate nodes self.node_dict = {} for node in nodes_from_file(self.nodes_file, self.libs, self.path, self.vault): self.add_node(node) @utils.cached_property def revision(self): return get_rev() def _set_path(self, path): self.path = path self.bundles_dir = join(self.path, DIRNAME_BUNDLES) self.data_dir = join(self.path, DIRNAME_DATA) self.hooks_dir = join(self.path, DIRNAME_HOOKS) self.items_dir = join(self.path, DIRNAME_ITEM_TYPES) self.groups_file = join(self.path, FILENAME_GROUPS) self.libs_dir = join(self.path, DIRNAME_LIBS) self.nodes_file = join(self.path, FILENAME_NODES) self.hooks = HooksProxy(self.hooks_dir) self.libs = LibsProxy(self.libs_dir) bundlewrap-3.8.0/bundlewrap/secrets.py000066400000000000000000000271321360562404000200570ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from base64 import b64encode, urlsafe_b64decode try: from configparser import SafeConfigParser except ImportError: # Python 2 from ConfigParser import SafeConfigParser import hashlib import hmac from os import environ from os.path import join from string import ascii_letters, punctuation, digits from cryptography.fernet import Fernet from .exceptions import FaultUnavailable from .utils import Fault, get_file_contents from .utils.text import mark_for_translation as _ from .utils.ui import io HUMAN_CHARS_START = list("bcdfghjklmnprstvwxz") HUMAN_CHARS_VOWELS = list("aeiou") + ["ai", "ao", "au", "ea", "ee", "ei", "eu", "ia", "ie", "oo", "ou"] HUMAN_CHARS_CONS = HUMAN_CHARS_START + ["bb", "bl", "cc", "ch", "ck", "dd", "dr", "ds", "dt", "ff", "gg", "gn", "kl", "ll", "mb", "md", "mm", "mp", "mt", "nc", "nd", "nn", "np", "nt", "pp", "rr", "rt", "sh", "ss", "st", "tl", "ts", "tt"] FILENAME_SECRETS = ".secrets.cfg" def choice_prng(lst, prng): return lst[next(prng) % (len(lst) - 1)] def generate_initial_secrets_cfg(): return ( "# DO NOT COMMIT THIS FILE\n" "# share it with your team through a secure channel\n\n" "[generate]\nkey = {}\n\n" "[encrypt]\nkey = {}\n" ).format( SecretProxy.random_key(), SecretProxy.random_key(), ) def random(seed): """ Provides a way to get repeatable random numbers from the given seed. Unlike random.seed(), this approach provides consistent results across platforms. See also http://stackoverflow.com/a/18992474 """ while True: seed = hashlib.sha512(seed).digest() for character in seed: try: yield ord(character) except TypeError: # Python 3 yield character class SecretProxy(object): @staticmethod def random_key(): """ Provided as a helper to generate new keys from `bw debug`. """ return Fernet.generate_key().decode('utf-8') def __init__(self, repo): self.repo = repo self.keys = self._load_keys() self._call_log = {} def _decrypt(self, cryptotext=None, key=None): """ Decrypts a given encrypted password. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return "decrypted text" key, cryptotext = self._determine_key_to_use(cryptotext.encode('utf-8'), key, cryptotext) return Fernet(key).decrypt(cryptotext).decode('utf-8') def _decrypt_file(self, source_path=None, key=None): """ Decrypts the file at source_path (relative to data/) and returns the plaintext as unicode. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return "decrypted file" cryptotext = get_file_contents(join(self.repo.data_dir, source_path)) key, cryptotext = self._determine_key_to_use(cryptotext, key, source_path) f = Fernet(key) return f.decrypt(cryptotext).decode('utf-8') def _decrypt_file_as_base64(self, source_path=None, key=None): """ Decrypts the file at source_path (relative to data/) and returns the plaintext as base64. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return b64encode("decrypted file as base64").decode('utf-8') cryptotext = get_file_contents(join(self.repo.data_dir, source_path)) key, cryptotext = self._determine_key_to_use(cryptotext, key, source_path) f = Fernet(key) return b64encode(f.decrypt(cryptotext)).decode('utf-8') def _determine_key_to_use(self, cryptotext, key, entity_description): key_delim = cryptotext.find(b'$') if key_delim > -1: key_from_text = cryptotext[:key_delim].decode('utf-8') cryptotext = cryptotext[key_delim + 1:] else: key_from_text = None if key is None: if key_from_text is not None: key = key_from_text else: key = 'encrypt' try: key = self.keys[key] except KeyError: raise FaultUnavailable(_( "Key '{key}' not available for decryption of the following entity, " "check your {file}: {entity_description}" ).format( file=FILENAME_SECRETS, key=key, entity_description=entity_description, )) return key, cryptotext def _generate_human_password( self, identifier=None, digits=2, key='generate', per_word=3, words=4, ): """ Like _generate_password(), but creates a password which can be typed more easily by human beings. A "word" consists of an upper case character (usually an actual consonant), followed by an alternating pattern of "vowels" and "consonants". Those lists of characters are defined at the top of this file. Note that something like "tl" is considered "a consonant" as well. Similarly, "au" and friends are "a vowel". Words are separated by dashes. By default, you also get some digits at the end of the password. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return "generatedpassword" prng = self._get_prng(identifier, key) pwd = "" is_start = True word_length = 0 words_done = 0 while words_done < words: if is_start: add = choice_prng(HUMAN_CHARS_START, prng).upper() is_start = False is_vowel = True else: if is_vowel: add = choice_prng(HUMAN_CHARS_VOWELS, prng) else: add = choice_prng(HUMAN_CHARS_CONS, prng) is_vowel = not is_vowel pwd += add word_length += 1 if word_length == per_word: pwd += "-" word_length = 0 words_done += 1 is_start = True if digits > 0: for i in range(digits): pwd += str(next(prng) % 10) else: # Strip trailing dash which is always added by the routine # above. pwd = pwd[:-1] return pwd def _generate_password(self, identifier=None, key='generate', length=32, symbols=False): """ Derives a password from the given identifier and the shared key in the repository. This is done by seeding a random generator with an SHA512 HMAC built from the key and the given identifier. One could just use the HMAC digest itself as a password, but the PRNG allows for more control over password length and complexity. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return ("generatedpassword"*length)[:length] prng = self._get_prng(identifier, key) alphabet = ascii_letters + digits if symbols: alphabet += punctuation return "".join([choice_prng(alphabet, prng) for i in range(length)]) def _generate_random_bytes_as_base64(self, identifier=None, key='generate', length=32): if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return b64encode(bytearray([ord("a") for i in range(length)])).decode() prng = self._get_prng(identifier, key) return b64encode(bytearray([next(prng) for i in range(length)])).decode() def _get_prng(self, identifier, key): try: key_encoded = self.keys[key] except KeyError: raise FaultUnavailable(_( "Key '{key}' not available to generate password '{password}', check your {file}" ).format( file=FILENAME_SECRETS, key=key, password=identifier, )) h = hmac.new(urlsafe_b64decode(key_encoded), digestmod=hashlib.sha512) h.update(identifier.encode('utf-8')) return random(h.digest()) def _load_keys(self): config = SafeConfigParser() secrets_file = join(self.repo.path, FILENAME_SECRETS) try: config.read(secrets_file) except IOError: io.debug(_("unable to read {}").format(secrets_file)) return {} result = {} for section in config.sections(): result[section] = config.get(section, 'key').encode('utf-8') return result def decrypt(self, cryptotext, key=None): return Fault( self._decrypt, cryptotext=cryptotext, key=key, ) def decrypt_file(self, source_path, key=None): return Fault( self._decrypt_file, source_path=source_path, key=key, ) def decrypt_file_as_base64(self, source_path, key=None): return Fault( self._decrypt_file_as_base64, source_path=source_path, key=key, ) def encrypt(self, plaintext, key='encrypt'): """ Encrypts a given plaintext password and returns a string that can be fed into decrypt() to get the password back. """ key_name = key try: key = self.keys[key] except KeyError: raise KeyError(_( "Key '{key}' not available for encryption, check your {file}" ).format( file=FILENAME_SECRETS, key=key, )) return key_name + '$' + Fernet(key).encrypt(plaintext.encode('utf-8')).decode('utf-8') def encrypt_file(self, source_path, target_path, key='encrypt'): """ Encrypts the file at source_path and places the result at target_path. The source_path is relative to CWD or absolute, while target_path is relative to data/. """ key_name = key try: key = self.keys[key] except KeyError: raise KeyError(_( "Key '{key}' not available for file encryption, check your {file}" ).format( file=FILENAME_SECRETS, key=key, )) plaintext = get_file_contents(source_path) fernet = Fernet(key) target_file = join(self.repo.data_dir, target_path) with open(target_file, 'wb') as f: f.write(key_name.encode('utf-8') + b'$') f.write(fernet.encrypt(plaintext)) return target_file def human_password_for( self, identifier, digits=2, key='generate', per_word=3, words=4, ): self._call_log.setdefault(identifier, 0) self._call_log[identifier] += 1 return Fault( self._generate_human_password, identifier=identifier, digits=digits, key=key, per_word=per_word, words=words, ) def password_for(self, identifier, key='generate', length=32, symbols=False): self._call_log.setdefault(identifier, 0) self._call_log[identifier] += 1 return Fault( self._generate_password, identifier=identifier, key=key, length=length, symbols=symbols, ) def random_bytes_as_base64_for(self, identifier, key='generate', length=32): return Fault( self._generate_random_bytes_as_base64, identifier=identifier, key=key, length=length, ) bundlewrap-3.8.0/bundlewrap/utils/000077500000000000000000000000001360562404000171705ustar00rootroot00000000000000bundlewrap-3.8.0/bundlewrap/utils/__init__.py000066400000000000000000000154301360562404000213040ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from base64 import b64encode from codecs import getwriter from contextlib import contextmanager import hashlib from inspect import isgenerator from os import chmod, close, makedirs, remove from os.path import dirname, exists import stat from sys import stderr, stdout from tempfile import mkstemp from requests import get from ..exceptions import DontCache, FaultUnavailable __GETATTR_CODE_CACHE = {} __GETATTR_RESULT_CACHE = {} __GETATTR_NODEFAULT = "very_unlikely_default_value" MODE644 = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH try: STDERR_WRITER = getwriter('utf-8')(stderr.buffer) STDOUT_WRITER = getwriter('utf-8')(stdout.buffer) except AttributeError: # Python 2 STDERR_WRITER = getwriter('utf-8')(stderr) STDOUT_WRITER = getwriter('utf-8')(stdout) def cached_property(prop): """ A replacement for the property decorator that will only compute the attribute's value on the first call and serve a cached copy from then on. """ def cache_wrapper(self): if not hasattr(self, "_cache"): self._cache = {} if prop.__name__ not in self._cache: try: return_value = prop(self) if isgenerator(return_value): return_value = tuple(return_value) except DontCache as exc: return exc.obj else: self._cache[prop.__name__] = return_value return self._cache[prop.__name__] return property(cache_wrapper) def download(url, path): if not exists(dirname(path)): makedirs(dirname(path)) if exists(path): chmod(path, MODE644) with open(path, 'wb') as f: r = get(url, stream=True) r.raise_for_status() for block in r.iter_content(1024): if not block: break else: f.write(block) class Fault(object): """ A proxy object for lazy access to things that may not really be available at the time of use. This let's us gracefully skip items that require information that's currently not available. """ def __init__(self, callback, **kwargs): self._available = None self._exc = None self._value = None self.callback = callback self.kwargs = kwargs def _resolve(self): if self._available is None: try: self._value = self.callback(**self.kwargs) if isinstance(self._value, Fault): self._value = self._value.value self._available = True except FaultUnavailable as exc: self._available = False self._exc = exc def __add__(self, other): if isinstance(other, Fault): def callback(): return self.value + other.value return Fault(callback) else: def callback(): return self.value + other return Fault(callback) def __len__(self): return len(self.value) def __lt__(self, other): return self.value < other.value def __str__(self): return str(self.value) def b64encode(self): def callback(): return b64encode(self.value.encode('UTF-8')).decode('UTF-8') return Fault(callback) def format_into(self, format_string): def callback(): return format_string.format(self.value) return Fault(callback) @property def is_available(self): self._resolve() return self._available @property def value(self): self._resolve() if not self._available: raise self._exc return self._value def _make_method_callback(method_name): def method(self, *args, **kwargs): def callback(): return getattr(self.value, method_name)(*args, **kwargs) return Fault(callback) return method for method_name in ( 'format', 'lower', 'lstrip', 'replace', 'rstrip', 'strip', 'upper', 'zfill', ): setattr(Fault, method_name, _make_method_callback(method_name)) def get_file_contents(path): with open(path, 'rb') as f: content = f.read() return content def get_all_attrs_from_file(path, base_env=None): """ Reads all 'attributes' (if it were a module) from a source file. """ if base_env is None: base_env = {} if not base_env and path in __GETATTR_RESULT_CACHE: # do not allow caching when passing in a base env because that # breaks repeated calls with different base envs for the same # file return __GETATTR_RESULT_CACHE[path] if path not in __GETATTR_CODE_CACHE: source = get_file_contents(path) __GETATTR_CODE_CACHE[path] = compile(source, path, mode='exec') code = __GETATTR_CODE_CACHE[path] env = base_env.copy() try: exec(code, env) except: from .ui import io io.stderr("Exception while executing {}".format(path)) raise if not base_env: __GETATTR_RESULT_CACHE[path] = env return env def getattr_from_file(path, attrname, base_env=None, default=__GETATTR_NODEFAULT): """ Reads a specific 'attribute' (if it were a module) from a source file. """ env = get_all_attrs_from_file(path, base_env=base_env) if default == __GETATTR_NODEFAULT: return env[attrname] else: return env.get(attrname, default) def hash_local_file(path): """ Retuns the sha1 hash of a file on the local machine. """ return sha1(get_file_contents(path)) def names(obj_list): """ Iterator over the name properties of a given list of objects. repo.nodes will give you node objects names(repo.nodes) will give you node names """ for obj in obj_list: yield obj.name def sha1(data): """ Returns hex SHA1 hash for input. """ hasher = hashlib.sha1() hasher.update(data) return hasher.hexdigest() class SkipList(object): """ Used to maintain a list of nodes that have already been visited. """ def __init__(self, path): self.path = path if path and exists(path): with open(path) as f: self._list_items = set(f.read().strip().split("\n")) else: self._list_items = set() def __contains__(self, item): return item in self._list_items def add(self, item): if self.path: self._list_items.add(item) def dump(self): if self.path: with open(self.path, 'w') as f: f.write("\n".join(sorted(self._list_items)) + "\n") @contextmanager def tempfile(): handle, path = mkstemp() close(handle) yield path remove(path) bundlewrap-3.8.0/bundlewrap/utils/cmdline.py000066400000000000000000000056251360562404000211650ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from sys import exit from ..exceptions import NoSuchGroup, NoSuchItem, NoSuchNode from . import names from .text import mark_for_translation as _, red from .ui import io, QUIT_EVENT def count_items(nodes): count = 0 for node in nodes: if QUIT_EVENT.is_set(): return 0 count += len(node.items) return count def get_group(repo, group_name): try: return repo.get_group(group_name) except NoSuchGroup: io.stderr(_("{x} No such group: {group}").format( group=group_name, x=red("!!!"), )) exit(1) def get_item(node, item_id): try: return node.get_item(item_id) except NoSuchItem: io.stderr(_("{x} No such item on node '{node}': {item}").format( item=item_id, node=node.name, x=red("!!!"), )) exit(1) def get_node(repo, node_name, adhoc_nodes=False): try: return repo.get_node(node_name) except NoSuchNode: if adhoc_nodes: return repo.create_node(node_name) else: io.stderr(_("{x} No such node: {node}").format( node=node_name, x=red("!!!"), )) exit(1) def get_target_nodes(repo, target_string, adhoc_nodes=False): """ Returns a list of nodes. The input is a string like this: "node1,node2,group3,bundle:foo" Meaning: Targets are 'node1', 'node2', all nodes in 'group3', and all nodes with the bundle 'foo'. """ targets = [] for name in target_string.split(","): name = name.strip() if name.startswith("bundle:"): bundle_name = name.split(":", 1)[1] for node in repo.nodes: if bundle_name in names(node.bundles): targets.append(node) elif name.startswith("!bundle:"): bundle_name = name.split(":", 1)[1] for node in repo.nodes: if bundle_name not in names(node.bundles): targets.append(node) elif name.startswith("!group:"): group_name = name.split(":", 1)[1] for node in repo.nodes: if group_name not in names(node.groups): targets.append(node) else: try: targets.append(repo.get_node(name)) except NoSuchNode: try: targets += list(repo.get_group(name).nodes) except NoSuchGroup: if adhoc_nodes: targets.append(repo.create_node(name)) else: io.stderr(_("{x} No such node or group: {name}").format( x=red("!!!"), name=name, )) exit(1) return sorted(set(targets)) bundlewrap-3.8.0/bundlewrap/utils/dicts.py000066400000000000000000000230551360562404000206550ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from difflib import unified_diff from hashlib import sha1 from json import dumps, JSONEncoder from . import Fault from .text import bold, green, red from .text import force_text, mark_for_translation as _ try: text_type = unicode byte_type = str except NameError: text_type = str byte_type = bytes DIFF_MAX_INLINE_LENGTH = 36 DIFF_MAX_LINE_LENGTH = 1024 class _Atomic(object): """ This and the following related classes are used to mark objects as non-mergeable for the purposes of merge_dict(). """ pass class _AtomicDict(dict, _Atomic): pass class _AtomicList(list, _Atomic): pass class _AtomicSet(set, _Atomic): pass class _AtomicTuple(tuple, _Atomic): pass ATOMIC_TYPES = { dict: _AtomicDict, list: _AtomicList, set: _AtomicSet, tuple: _AtomicTuple, } def diff_keys(sdict1, sdict2): """ Compares the keys of two statedicts and returns the keys with differing values. Note that only keys in the first statedict are considered. If a key only exists in the second one, it is disregarded. """ if sdict1 is None: return [] if sdict2 is None: return sdict1.keys() differing_keys = [] for key, value in sdict1.items(): if value != sdict2[key]: differing_keys.append(key) return differing_keys def diff_value_bool(title, value1, value2): return diff_value_text( title, "yes" if value1 else "no", "yes" if value2 else "no", ) def diff_value_int(title, value1, value2): return diff_value_text( title, "{}".format(value1), "{}".format(value2), ) def diff_value_list(title, value1, value2): if isinstance(value1, set): value1 = sorted(value1) value2 = sorted(value2) else: # convert tuples and create copies of lists before possibly # appending stuff later on (see below) value1 = list(value1) value2 = list(value2) # make sure that *if* we have lines, the last one will also end with # a newline if value1: value1.append("") if value2: value2.append("") return diff_value_text( title, "\n".join([str(i) for i in value1]), "\n".join([str(i) for i in value2]), ) def diff_value_text(title, value1, value2): max_length = max(len(value1), len(value2)) value1, value2 = force_text(value1), force_text(value2) if ( "\n" not in value1 and "\n" not in value2 ): if max_length < DIFF_MAX_INLINE_LENGTH: return "{} {} → {}".format( bold(title), red(value1), green(value2), ) elif max_length < DIFF_MAX_LINE_LENGTH: return "{} {}\n{}→ {}".format( bold(title), red(value1), " " * (len(title) - 1), green(value2), ) output = bold(title) + "\n" for line in unified_diff( value1.splitlines(True), value2.splitlines(True), fromfile=_(""), tofile=_(""), ): suffix = "" if len(line) > DIFF_MAX_LINE_LENGTH: suffix += _(" (line truncated after {} characters)").format(DIFF_MAX_LINE_LENGTH) if not line.endswith("\n"): suffix += _(" (no newline at end of file)") line = line[:DIFF_MAX_LINE_LENGTH].rstrip("\n") if line.startswith("+"): line = green(line) elif line.startswith("-"): line = red(line) output += line + suffix + "\n" return output TYPE_DIFFS = { bool: diff_value_bool, byte_type: diff_value_text, float: diff_value_int, int: diff_value_int, list: diff_value_list, set: diff_value_list, text_type: diff_value_text, tuple: diff_value_list, } def diff_value(title, value1, value2): value_type = type(value1) assert value_type == type(value2), "cannot compare {} with {}".format( repr(value1), repr(value2), ) diff_func = TYPE_DIFFS[value_type] return diff_func(title, value1, value2) class FaultResolvingJSONEncoder(JSONEncoder): def default(self, obj): if isinstance(obj, Fault): return self.default(obj.value) elif isinstance(obj, set): return sorted(obj) else: return JSONEncoder.default(self, obj) def hash_statedict(sdict): """ Returns a canonical SHA1 hash to describe this dict. """ return sha1(statedict_to_json(sdict).encode('utf-8')).hexdigest() def map_dict_keys(dict_obj, _base=None): """ Return a set of key paths for the given dict. E.g.: >>> map_dict_keys({'foo': {'bar': 1}, 'baz': 2}) set([('foo', 'bar'), ('baz',)]) """ if _base is None: _base = tuple() keys = set([_base + (key,) for key in dict_obj.keys()]) for key, value in dict_obj.items(): if isinstance(value, dict): keys.update(map_dict_keys(value, _base=_base + (key,))) return keys def merge_dict(base, update): """ Recursively merges the base dict into the update dict. """ if not isinstance(update, dict): return update merged = base.copy() for key, value in update.items(): merge = ( key in base and not isinstance(value, _Atomic) and not isinstance(base[key], _Atomic) ) if merge and isinstance(base[key], dict): merged[key] = merge_dict(base[key], value) elif ( merge and isinstance(base[key], list) and ( isinstance(value, list) or isinstance(value, set) or isinstance(value, tuple) ) ): extended = base[key][:] extended.extend(value) merged[key] = extended elif ( merge and isinstance(base[key], tuple) and ( isinstance(value, list) or isinstance(value, set) or isinstance(value, tuple) ) ): merged[key] = base[key] + tuple(value) elif ( merge and isinstance(base[key], set) and ( isinstance(value, list) or isinstance(value, set) or isinstance(value, tuple) ) ): merged[key] = base[key].union(set(value)) else: merged[key] = value return merged def reduce_dict(full_dict, template_dict): """ Take a large dict and recursively remove all keys that are not present in the template dict. Also descends into lists. >>> full_dict = { 'a': [{ 'b': 1, 'c': 2, # this will be removed from final result }], 'd': 3, } >>> template_dict = { 'a': [{ 'b': None, }], 'd': None, 'e': None, } >>> reduce_dict(full_dict, template_dict) { 'a': [{ 'b': 1, }], 'd': 3, } """ if isinstance(full_dict, list): if not isinstance(template_dict, list): return full_dict result = [] for index in range(len(full_dict)): full_dict_element = full_dict[index] try: template_dict_element = template_dict[index] except IndexError: template_dict_element = full_dict_element result.append(reduce_dict(full_dict_element, template_dict_element)) return result elif isinstance(full_dict, dict): if not isinstance(template_dict, dict): return full_dict result = {} for key, value in full_dict.items(): if key in template_dict: result[key] = reduce_dict(value, template_dict[key]) return result else: return full_dict def statedict_to_json(sdict, pretty=False): """ Returns a canonical JSON representation of the given statedict. """ if sdict is None: return "" else: return dumps( sdict, cls=FaultResolvingJSONEncoder, indent=4 if pretty else None, sort_keys=True, ) def validate_statedict(sdict): """ Raises ValueError if the given statedict is invalid. """ if sdict is None: return for key, value in sdict.items(): if not isinstance(force_text(key), text_type): raise ValueError(_("non-text statedict key: {}").format(key)) if type(value) not in TYPE_DIFFS and value is not None: raise ValueError( _("invalid statedict value for key '{k}': {v}").format(k=key, v=value) ) if type(value) in (list, tuple): for index, element in enumerate(value): if type(element) not in TYPE_DIFFS and element is not None: raise ValueError(_( "invalid element #{i} in statedict key '{k}': {e}" ).format( e=element, i=index, k=key, )) def value_at_key_path(dict_obj, path): """ Given the list of keys in `path`, recursively traverse `dict_obj` and return whatever is found at the end of that path. E.g.: >>> value_at_key_path({'foo': {'bar': 5}}, ['foo', 'bar']) 5 """ if not path: return dict_obj else: return value_at_key_path(dict_obj[path[0]], path[1:]) bundlewrap-3.8.0/bundlewrap/utils/plot.py000066400000000000000000000174301360562404000205250ustar00rootroot00000000000000import re from . import names from .text import mark_for_translation as _, red def explain_item_dependency_loop(exc, node_name): """ Given an ItemDependencyLoop exception and a node name, generates output lines to help users debug the issue. """ items = remove_items_not_contributing_to_loop(exc.items) yield _( "{x} There was a dependency problem on node '{node}'. Look at the debug.svg generated " "by the following command and try to find a loop:\n\n\n" "printf '{cmd}' | dot -Tsvg -odebug.svg\n\n\n" ).format( x=red("!"), node=node_name, cmd="\\n".join(graph_for_items(node_name, items)), ) yield _( "{x} Additionally, here is a list of all items involved " "and their remaining dependencies:\n" ).format(x=red("!")) for item in items: yield "{}\t{}".format(item.id, ",".join(item._deps)) yield "\n\n\n" def graph_for_items( title, items, cluster=True, concurrency=True, static=True, regular=True, reverse=True, auto=True, ): items = sorted(items) yield "digraph bundlewrap" yield "{" # Print subgraphs *below* each other yield "rankdir = LR" # Global attributes yield ("graph [color=\"#303030\"; " "fontname=Helvetica; " "penwidth=2; " "shape=box; " "style=\"rounded,dashed\"]") yield ("node [color=\"#303030\"; " "fillcolor=\"#303030\"; " "fontcolor=white; " "fontname=Helvetica; " "shape=box; " "style=\"rounded,filled\"]") yield "edge [arrowhead=vee]" item_ids = [] for item in items: item_ids.append(item.id) if cluster: # Define which items belong to which bundle bundle_number = 0 bundles_seen = set() for item in items: if item.bundle is None or item.bundle.name in bundles_seen: continue yield "subgraph cluster_{}".format(bundle_number) bundle_number += 1 yield "{" yield "label = \"{}\"".format(item.bundle.name) if "bundle:{}".format(item.bundle.name) in item_ids: yield "\"bundle:{}\"".format(item.bundle.name) for bitem in item.bundle.items: if bitem.id in item_ids: yield "\"{}\"".format(bitem.id) yield "}" bundles_seen.add(item.bundle.name) # Define dependencies between items for item in items: if regular: for dep in item.needs: if dep in item_ids: yield "\"{}\" -> \"{}\" [color=\"#C24948\",penwidth=2]".format(item.id, dep) if auto: for dep in sorted(item._deps): if dep not in item_ids: continue if dep in getattr(item, '_concurrency_deps', []): if concurrency: yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format(item.id, dep) elif dep in item._reverse_deps: if reverse: yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format(item.id, dep) elif dep not in item.needs: if dep in item_ids: yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(item.id, dep) # Global graph title yield "fontsize = 28" yield "label = \"{}\"".format(title) yield "labelloc = \"t\"" yield "}" def plot_group(groups, nodes, show_nodes): yield "digraph bundlewrap" yield "{" # Print subgraphs *below* each other yield "rankdir = LR" # Global attributes yield ("node [color=\"#303030\"; " "fillcolor=\"#303030\"; " "fontname=Helvetica]") yield "edge [arrowhead=vee]" for group in groups: yield "\"{}\" [fontcolor=white,style=filled];".format(group.name) for node in nodes: yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name) for group in groups: for subgroup in group.immediate_subgroup_names: yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) for subgroup in group._subgroup_names_from_patterns: yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) if show_nodes: for group in groups: for node in group._nodes_from_members: yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( group.name, node.name) for node in group._nodes_from_patterns: yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( group.name, node.name) for node in nodes: if group in node._groups_dynamic: yield "\"{}\" -> \"{}\" [color=\"#FF0000\",penwidth=2]".format( group.name, node.name) yield "}" def plot_node_groups(node): yield "digraph bundlewrap" yield "{" # Print subgraphs *below* each other yield "rankdir = LR" # Global attributes yield ("node [color=\"#303030\"; " "fillcolor=\"#303030\"; " "fontname=Helvetica]") yield "edge [arrowhead=vee]" for group in node.groups: yield "\"{}\" [fontcolor=white,style=filled];".format(group.name) yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name) for group in node.groups: for subgroup in group.immediate_subgroup_names: if subgroup in names(node.groups): yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) for pattern in group.immediate_subgroup_patterns: compiled_pattern = re.compile(pattern) for group2 in node.groups: if compiled_pattern.search(group2.name) is not None and group2 != group: yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, group2.name) for group in node.groups: if node in group._nodes_from_members: yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( group.name, node.name) elif node in group._nodes_from_patterns: yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( group.name, node.name) elif group in node._groups_dynamic: yield "\"{}\" -> \"{}\" [color=\"#FF0000\",penwidth=2]".format( group.name, node.name) yield "}" def remove_items_not_contributing_to_loop(items): """ We have found a loop. By definition, each item in a loop must have at least one incoming and one outgoing dependency. We can therefore remove all items without either incoming or outgoing dependencies to make the loop more apparent. """ items_with_no_incoming_or_outgoing_deps = set() for item in items: if not item._deps: items_with_no_incoming_or_outgoing_deps.add(item) else: for other_item in items: if item == other_item: continue if item.id in other_item._deps: break else: items_with_no_incoming_or_outgoing_deps.add(item) filtered_items = list(filter( lambda item: item not in items_with_no_incoming_or_outgoing_deps, items, )) if len(filtered_items) == len(items): # nothing happened, end recursion return filtered_items else: # we removed something, this might free up other items we can # catch in a second pass return remove_items_not_contributing_to_loop(filtered_items) bundlewrap-3.8.0/bundlewrap/utils/remote.py000066400000000000000000000075231360562404000210440ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from pipes import quote from . import cached_property from .text import force_text, mark_for_translation as _ from .ui import io def stat(node, path): if node.os in node.OS_FAMILY_BSD: result = node.run( "stat -f '%Su:%Sg:%p:%z:%HT' -- {}".format(quote(path)), may_fail=True, ) else: result = node.run( "stat -c '%U:%G:%a:%s:%F' -- {}".format(quote(path)), may_fail=True, ) if result.return_code != 0: return {} owner, group, mode, size, ftype = \ force_text(result.stdout).strip().split(":", 5) mode = mode[-4:].zfill(4) # cut off BSD file type file_stat = { 'owner': owner, 'group': group, 'mode': mode, 'size': int(size), 'type': ftype.lower(), } io.debug(_("stat for '{path}' on {node}: {result}".format( node=node.name, path=path, result=repr(file_stat), ))) return file_stat class PathInfo(object): """ Serves as a proxy to get_path_type. """ def __init__(self, node, path): self.node = node self.path = path self.stat = stat(node, path) def __repr__(self): return "".format(self.node.name, quote(self.path)) @property def exists(self): return bool(self.stat) @property def group(self): return self.stat['group'] @property def is_binary_file(self): return self.is_file and not self.is_text_file @property def is_directory(self): return self.stat['type'] == "directory" @property def is_file(self): return self.stat['type'] in ("regular file", "regular empty file") @property def is_symlink(self): return self.stat['type'] == "symbolic link" @property def is_text_file(self): return self.is_file and ( "text" in self.desc or self.desc in ( "empty", "OpenSSH ED25519 public key", "OpenSSH RSA public key", "OpenSSH DSA public key", ) ) @property def mode(self): return self.stat['mode'] @property def owner(self): return self.stat['owner'] @cached_property def desc(self): return force_text(self.node.run( "file -bh -- {}".format(quote(self.path)) ).stdout).strip() @cached_property def sha1(self): if self.node.os == 'macos': result = self.node.run("shasum -a 1 -- {}".format(quote(self.path))) elif self.node.os in self.node.OS_FAMILY_BSD: result = self.node.run("sha1 -q -- {}".format(quote(self.path))) else: result = self.node.run("sha1sum -- {}".format(quote(self.path))) # sha1sum adds a leading backslash to hashes of files whose name # contains backslash-escaped characters – we must lstrip() that return force_text(result.stdout).strip().lstrip("\\").split()[0] @cached_property def sha256(self): if self.node.os == 'macos': result = self.node.run("shasum -a 256 -- {}".format(quote(self.path))) elif self.node.os in self.node.OS_FAMILY_BSD: result = self.node.run("sha256 -q -- {}".format(quote(self.path))) else: result = self.node.run("sha256sum -- {}".format(quote(self.path))) return force_text(result.stdout).strip().split()[0] @property def size(self): return self.stat['size'] @property def symlink_target(self): if not self.is_symlink: raise ValueError("{} is not a symlink".format(quote(self.path))) return force_text(self.node.run( "readlink -- {}".format(quote(self.path)), may_fail=True, ).stdout.strip()) bundlewrap-3.8.0/bundlewrap/utils/scm.py000066400000000000000000000025611360562404000203300ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from subprocess import CalledProcessError, check_output, STDOUT def get_git_branch(): try: return check_output( "git rev-parse --abbrev-ref HEAD", shell=True, stderr=STDOUT, ).strip() except CalledProcessError: return None def get_git_clean(): try: return not bool(check_output( "git status --porcelain", shell=True, stderr=STDOUT, ).strip()) except CalledProcessError: return None def get_bzr_rev(): try: return check_output( "bzr revno", shell=True, stderr=STDOUT, ).strip() except CalledProcessError: return None def get_git_rev(): try: return check_output( "git rev-parse HEAD", shell=True, stderr=STDOUT, ).strip() except CalledProcessError: return None def get_hg_rev(): try: return check_output( "hg --debug id -i", shell=True, stderr=STDOUT, ).strip().rstrip("+") except CalledProcessError: return None def get_rev(): for scm_rev in (get_git_rev, get_hg_rev, get_bzr_rev): rev = scm_rev() if rev is not None: return rev return None bundlewrap-3.8.0/bundlewrap/utils/table.py000066400000000000000000000146151360562404000206400ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os import environ from .text import ansi_clean ROW_SEPARATOR = 1 if environ.get("BW_TABLE_STYLE") == 'ascii': FRAME_TOP_LEFT = "+-" FRAME_TOP_COLUMN_SEPARATOR = "-+-" FRAME_TOP_RIGHT = "-+" FRAME_BOTTOM_LEFT = "+-" FRAME_BOTTOM_COLUMN_SEPARATOR = "-+-" FRAME_BOTTOM_RIGHT = "-+" FRAME_CENTER_LEFT = "+-" FRAME_CENTER_COLUMN_SEPARATOR = "-+-" FRAME_CENTER_RIGHT = "-+" FRAME_COLUMN_FILLER = "-" FRAME_COLUMN_WHITESPACE = " " FRAME_ROW_COLUMN_SEPARATOR_LEFT = "-| " FRAME_ROW_COLUMN_SEPARATOR_NONE = " | " FRAME_ROW_COLUMN_SEPARATOR_BOTH = "-+-" FRAME_ROW_COLUMN_SEPARATOR_RIGHT = " |-" elif environ.get("BW_TABLE_STYLE") == 'grep': FRAME_TOP_LEFT = "" FRAME_TOP_COLUMN_SEPARATOR = "" FRAME_TOP_RIGHT = "" FRAME_BOTTOM_LEFT = "" FRAME_BOTTOM_COLUMN_SEPARATOR = "" FRAME_BOTTOM_RIGHT = "" FRAME_CENTER_LEFT = "" FRAME_CENTER_COLUMN_SEPARATOR = "" FRAME_CENTER_RIGHT = "" FRAME_COLUMN_FILLER = "" FRAME_COLUMN_WHITESPACE = "" FRAME_ROW_COLUMN_SEPARATOR_LEFT = "\t" FRAME_ROW_COLUMN_SEPARATOR_NONE = "\t" FRAME_ROW_COLUMN_SEPARATOR_BOTH = "\t" FRAME_ROW_COLUMN_SEPARATOR_RIGHT = "\t" else: FRAME_TOP_LEFT = "╭─" FRAME_TOP_COLUMN_SEPARATOR = "─┬─" FRAME_TOP_RIGHT = "─╮" FRAME_BOTTOM_LEFT = "╰─" FRAME_BOTTOM_COLUMN_SEPARATOR = "─┴─" FRAME_BOTTOM_RIGHT = "─╯" FRAME_CENTER_LEFT = "├─" FRAME_CENTER_COLUMN_SEPARATOR = "─┼─" FRAME_CENTER_RIGHT = "─┤" FRAME_COLUMN_FILLER = "─" FRAME_COLUMN_WHITESPACE = " " FRAME_ROW_COLUMN_SEPARATOR_LEFT = "─┤ " FRAME_ROW_COLUMN_SEPARATOR_NONE = " │ " FRAME_ROW_COLUMN_SEPARATOR_BOTH = "─┼─" FRAME_ROW_COLUMN_SEPARATOR_RIGHT = " ├─" def _column_widths_for_rows(rows): column_widths = [0 for column in rows[0]] for row in rows: if not isinstance(row, list) and not isinstance(row, tuple): continue for i, column in enumerate(row): if column == ROW_SEPARATOR: continue column_widths[i] = max(column_widths[i], len(ansi_clean(column))) return column_widths def _border_top(column_widths): result = FRAME_TOP_LEFT result += FRAME_TOP_COLUMN_SEPARATOR.join( [FRAME_COLUMN_FILLER * width for width in column_widths] ) result += FRAME_TOP_RIGHT return result def _border_center(column_widths): result = FRAME_CENTER_LEFT result += FRAME_CENTER_COLUMN_SEPARATOR.join( [FRAME_COLUMN_FILLER * width for width in column_widths] ) result += FRAME_CENTER_RIGHT return result def _border_bottom(column_widths): result = FRAME_BOTTOM_LEFT result += FRAME_BOTTOM_COLUMN_SEPARATOR.join( [FRAME_COLUMN_FILLER * width for width in column_widths] ) result += FRAME_BOTTOM_RIGHT return result def _empty_row(row): for column_value in row: if column_value != ROW_SEPARATOR and column_value.strip(): return False return True def _row(row, column_widths, alignments): result = "" columns = [] for i, column_value in enumerate(row): alignment = alignments.get(i, 'left') if column_value == ROW_SEPARATOR: columns.append(ROW_SEPARATOR) elif alignment == 'right': columns.append( FRAME_COLUMN_WHITESPACE * (column_widths[i] - len(ansi_clean(column_value))) + column_value ) elif alignment == 'left': columns.append( column_value + FRAME_COLUMN_WHITESPACE * (column_widths[i] - len(ansi_clean(column_value))) ) elif alignment == 'center': prefix = int((column_widths[i] - len(ansi_clean(column_value))) / 2) suffix = (column_widths[i] - len(ansi_clean(column_value)) - prefix) columns.append( FRAME_COLUMN_WHITESPACE * prefix + column_value + FRAME_COLUMN_WHITESPACE * suffix ) else: raise NotImplementedError("no such alignment: {}".format(alignment)) for i, column_value in enumerate(columns): if i == 0: fill_previous_column = False else: fill_previous_column = columns[i - 1] == ROW_SEPARATOR fill_this_column = column_value == ROW_SEPARATOR if fill_previous_column and fill_this_column: result += FRAME_ROW_COLUMN_SEPARATOR_BOTH elif fill_previous_column and not fill_this_column: result += FRAME_ROW_COLUMN_SEPARATOR_LEFT elif not fill_previous_column and fill_this_column: result += FRAME_ROW_COLUMN_SEPARATOR_RIGHT else: result += FRAME_ROW_COLUMN_SEPARATOR_NONE if fill_this_column: result += FRAME_COLUMN_FILLER * column_widths[i] else: result += column_value if fill_this_column: result += FRAME_ROW_COLUMN_SEPARATOR_LEFT else: result += FRAME_ROW_COLUMN_SEPARATOR_NONE return result[1:-1] # strip exactly one whitespace character at each end def render_table(rows, alignments=None): """ Yields lines for a table. rows must be a list of lists of values, with the first row being considered the heading row. Alternatively, an entire row or individual cells can be set to ROW_SEPARATOR to turn it into a separator: rows = [ ["heading1", "heading2"], ROW_SEPARATOR, ["value1", "value2"], ["value3", ROW_SEPARATOR], ] alignments is a dict mapping column indexes to 'left' or 'right'. """ if alignments is None: alignments = {} column_widths = _column_widths_for_rows(rows) if environ.get("BW_TABLE_STYLE") != 'grep': yield _border_top(column_widths) for row_index, row in enumerate(rows): if row == ROW_SEPARATOR: if environ.get("BW_TABLE_STYLE") != 'grep': yield _row([ROW_SEPARATOR] * len(column_widths), column_widths, {}) elif row_index == 0: # heading row ignores alignments yield _row(row, column_widths, {}) elif environ.get("BW_TABLE_STYLE") != 'grep' or not _empty_row(row): yield _row(row, column_widths, alignments) if environ.get("BW_TABLE_STYLE") != 'grep': yield _border_bottom(column_widths) bundlewrap-3.8.0/bundlewrap/utils/testing.py000066400000000000000000000033231360562404000212200ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals import platform from subprocess import Popen, PIPE from ..bundle import FILENAME_BUNDLE from ..secrets import FILENAME_SECRETS HOST_OS = { "Darwin": 'macos', "Linux": 'linux', } def host_os(): return HOST_OS[platform.system()] def make_repo(tmpdir, bundles=None, groups=None, nodes=None): bundles = {} if bundles is None else bundles groups = {} if groups is None else groups nodes = {} if nodes is None else nodes bundles_dir = tmpdir.mkdir("bundles") for bundle, items in bundles.items(): bundle_dir = bundles_dir.mkdir(bundle) bundle_dir.mkdir("files") bundlepy = bundle_dir.join(FILENAME_BUNDLE) bundle_content = "# -*- coding: utf-8 -*-\n" for itemtype, itemconfig in items.items(): bundle_content += "{} = {}\n".format(itemtype, repr(itemconfig)) bundlepy.write(bundle_content) tmpdir.mkdir("data") tmpdir.mkdir("hooks") groupspy = tmpdir.join("groups.py") groupspy.write("# -*- coding: utf-8 -*-\ngroups = {}\n".format(repr(groups))) nodespy = tmpdir.join("nodes.py") nodespy.write("# -*- coding: utf-8 -*-\nnodes = {}\n".format(repr(nodes))) secrets = tmpdir.join(FILENAME_SECRETS) secrets.write("[generate]\nkey = {}\n\n[encrypt]\nkey = {}\n".format( "Fl53iG1czBcaAPOKhSiJE7RjFU9nIAGkiKDy0k_LoTc=", "DbYiUu5VMfrdeSiKYiAH4rDOAUISipvLSBJI-T0SpeY=", )) def run(command, path=None): process = Popen(command, cwd=path, shell=True, stderr=PIPE, stdout=PIPE) stdout, stderr = process.communicate() print(stdout.decode('utf-8')) print(stderr.decode('utf-8')) return (stdout, stderr, process.returncode) bundlewrap-3.8.0/bundlewrap/utils/text.py000066400000000000000000000141141360562404000205270ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime, timedelta from io import BytesIO from os import environ from os.path import normpath from random import choice import re from string import digits, ascii_letters from . import Fault, STDERR_WRITER ANSI_ESCAPE = re.compile(r'\x1b[^m]*m') HIDE_CURSOR = "\033[?25l" SHOW_CURSOR = "\033[?25h" VALID_NAME_CHARS = digits + ascii_letters + "-_.+" def ansi_clean(input_string): return ANSI_ESCAPE.sub("", force_text(input_string)) def ansi_wrapper(colorizer): if environ.get("BW_COLORS", "1") != "0": return colorizer else: return lambda s, **kwargs: s @ansi_wrapper def blue(text): return "\033[34m{}\033[0m".format(text) @ansi_wrapper def bold(text): return "\033[1m{}\033[0m".format(text) @ansi_wrapper def cyan(text): return "\033[36m{}\033[0m".format(text) @ansi_wrapper def inverse(text): return "\033[0m\033[7m{}\033[0m".format(text) @ansi_wrapper def italic(text): return "\033[3m{}\033[0m".format(text) @ansi_wrapper def green(text): return "\033[32m{}\033[0m".format(text) @ansi_wrapper def red(text): return "\033[31m{}\033[0m".format(text) @ansi_wrapper def yellow(text): return "\033[33m{}\033[0m".format(text) def cyan_unless_zero(number): if number == 0: return "0" else: return cyan(str(number)) def green_unless_zero(number): if number == 0: return "0" else: return green(str(number)) def red_unless_zero(number): if number == 0: return "0" else: return red(str(number)) def yellow_unless_zero(number): if number == 0: return "0" else: return yellow(str(number)) def error_summary(errors): if not errors: return if len(errors) == 1: STDERR_WRITER.write(_("\n{x} There was an error, repeated below.\n\n").format( x=red("!!!"), )) STDERR_WRITER.flush() else: STDERR_WRITER.write(_("\n{x} There were {count} errors, repeated below.\n\n").format( count=len(errors), x=red("!!!"), )) STDERR_WRITER.flush() for e in errors: STDERR_WRITER.write(e) STDERR_WRITER.write("\n") STDERR_WRITER.flush() def force_text(data): """ Try to return a text aka unicode object from the given data. Also has Python 2/3 compatibility baked in. Oh the humanity. """ if isinstance(data, bytes): return data.decode('utf-8', 'replace') elif isinstance(data, Fault): return data.value return data def is_subdirectory(parent, child): """ Returns True if the given child is a subdirectory of the parent. """ parent = normpath(parent) child = normpath(child) if not parent.startswith("/") or not child.startswith("/"): raise ValueError(_("directory paths must be absolute")) if parent == child: return False if parent == "/": return True return child.startswith(parent + "/") def mark_for_translation(s): return s _ = mark_for_translation def randstr(length=24): """ Returns a random alphanumeric string of the given length. """ return ''.join(choice(ascii_letters + digits) for c in range(length)) def validate_name(name): """ Checks whether the given string is a valid name for a node, group, or bundle. """ try: for char in name: assert char in VALID_NAME_CHARS assert not name.startswith(".") except AssertionError: return False return True def wrap_question(title, body, question, prefix=""): output = ("{0}\n" "{0} ╭─ {1}\n" "{0} │\n".format(prefix, title)) for line in body.splitlines(): output += "{0} │ {1}\n".format(prefix, line) output += ("{0} │\n" "{0} ╰─ ".format(prefix) + question) return output class LineBuffer(object): def __init__(self, target): self.buffer = b"" self.record = BytesIO() self.target = target if target else lambda s: None def close(self): self.flush() if self.buffer: self.record.write(self.buffer) self.target(self.buffer) def flush(self): while b"\n" in self.buffer: chunk, self.buffer = self.buffer.split(b"\n", 1) self.record.write(chunk + b"\n") self.target(chunk + b"\n") def write(self, msg): self.buffer += msg self.flush() def format_duration(duration, msec=False): """ Takes a timedelta and returns something like "1d 5h 4m 3s". """ components = [] if duration.days > 0: components.append(_("{}d").format(duration.days)) seconds = duration.seconds if seconds >= 3600: hours = int(seconds / 3600) seconds -= hours * 3600 components.append(_("{}h").format(hours)) if seconds >= 60: minutes = int(seconds / 60) seconds -= minutes * 60 components.append(_("{}m").format(minutes)) if seconds > 0 or not components: if msec: seconds += duration.microseconds / 1000000.0 components.append(_("{:.3f}s").format(seconds)) else: components.append(_("{}s").format(seconds)) return " ".join(components) def format_timestamp(timestamp): return datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") def parse_duration(duration): """ Parses a string like "1d 5h 4m 3s" into a timedelta. """ days = 0 seconds = 0 for component in duration.strip().split(" "): component = component.strip() if component[-1] == "d": days += int(component[:-1]) elif component[-1] == "h": seconds += int(component[:-1]) * 3600 elif component[-1] == "m": seconds += int(component[:-1]) * 60 elif component[-1] == "s": seconds += int(component[:-1]) else: raise ValueError(_("{} is not a valid duration string").format(repr(duration))) return timedelta(days=days, seconds=seconds) bundlewrap-3.8.0/bundlewrap/utils/ui.py000066400000000000000000000334611360562404000201660ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from contextlib import contextmanager from datetime import datetime from errno import EPIPE import fcntl from functools import wraps from os import _exit, environ, getpid, kill from os.path import join from select import select from signal import signal, SIG_DFL, SIGINT, SIGQUIT, SIGTERM import struct from subprocess import PIPE, Popen import sys import termios from time import time from threading import Event, Lock, Thread from . import STDERR_WRITER, STDOUT_WRITER from .table import render_table, ROW_SEPARATOR from .text import ( HIDE_CURSOR, SHOW_CURSOR, ansi_clean, blue, bold, format_duration, mark_for_translation as _, ) INFO_EVENT = Event() QUIT_EVENT = Event() SHUTDOWN_EVENT_HARD = Event() SHUTDOWN_EVENT_SOFT = Event() TTY = STDOUT_WRITER.isatty() if sys.version_info >= (3, 0): broken_pipe_exception = BrokenPipeError else: broken_pipe_exception = IOError def add_debug_indicator(f): @wraps(f) def wrapped(self, msg, **kwargs): return f(self, "[DEBUG] " + msg, **kwargs) return wrapped def add_debug_timestamp(f): @wraps(f) def wrapped(self, msg, **kwargs): if self.debug_mode: msg = datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + msg return f(self, msg, **kwargs) return wrapped def capture_for_debug_logfile(f): @wraps(f) def wrapped(self, msg, **kwargs): if self.debug_log_file and self._active: self.debug_log_file.write( datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + ansi_clean(msg).rstrip("\n") + "\n" ) return f(self, msg, **kwargs) return wrapped def clear_formatting(f): """ Makes sure formatting from cut-off lines can't bleed into next one """ @wraps(f) def wrapped(self, msg, **kwargs): if TTY and environ.get("BW_COLORS", "1") != "0": msg = "\033[0m" + msg return f(self, msg, **kwargs) return wrapped def sigint_handler(*args, **kwargs): """ This handler is kept short since it interrupts execution of the main thread. It's safer to handle these events in their own thread because the main thread might be holding the IO lock while it is interrupted. """ if not SHUTDOWN_EVENT_SOFT.is_set(): SHUTDOWN_EVENT_SOFT.set() else: SHUTDOWN_EVENT_HARD.set() def sigquit_handler(*args, **kwargs): """ This handler is kept short since it interrupts execution of the main thread. It's safer to handle these events in their own thread because the main thread might be holding the IO lock while it is interrupted. """ INFO_EVENT.set() def spinner(): while True: for c in "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏": yield c def term_width(): if not TTY: return 0 fd = sys.stdout.fileno() _, width = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, 'aaaa')) return width def page_lines(lines): """ View the given list of Unicode lines in a pager (e.g. `less`). """ lines = list(lines) line_width = max([len(ansi_clean(line)) for line in lines]) if TTY and line_width > term_width(): pager = Popen([environ.get("PAGER", "/usr/bin/less")], stdin=PIPE) pager.stdin.write("\n".join(lines).encode('utf-8')) pager.stdin.close() pager.communicate() else: for line in lines: io.stdout(line) def write_to_stream(stream, msg): try: if TTY: stream.write(msg) else: stream.write(ansi_clean(msg)) stream.flush() except broken_pipe_exception as e: if broken_pipe_exception == IOError: if e.errno != EPIPE: raise class DrainableStdin(object): def get_input(self): while True: if QUIT_EVENT.is_set(): return None if select([sys.stdin], [], [], 0.1)[0]: return sys.stdin.readline().strip() def drain(self): if sys.stdin.isatty(): termios.tcflush(sys.stdin, termios.TCIFLUSH) class IOManager(object): """ Threadsafe singleton class that handles all IO. """ def __init__(self): self._active = False self.debug_log_file = None self.debug_mode = False self.jobs = [] self.lock = Lock() self.progress = 0 self.progress_start = None self.progress_total = 0 self._spinner = spinner() self._last_spinner_character = next(self._spinner) self._last_spinner_update = 0 self._signal_handler_thread = None self._child_pids = [] self._status_line_present = False self._waiting_for_input = False def activate(self): self._active = True if 'BW_DEBUG_LOG_DIR' in environ: self.debug_log_file = open(join( environ['BW_DEBUG_LOG_DIR'], "{}_{}.log".format( datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), getpid(), ), ), 'a') self._signal_handler_thread = Thread( target=self._signal_handler_thread_body, ) # daemon mode is required because we need to keep the thread # around until the end of a soft shutdown to wait for a hard # shutdown signal, but don't have a feasible way of stopping # the thread once the soft shutdown has completed self._signal_handler_thread.daemon = True self._signal_handler_thread.start() signal(SIGINT, sigint_handler) signal(SIGQUIT, sigquit_handler) if TTY: write_to_stream(STDOUT_WRITER, HIDE_CURSOR) def ask(self, question, default, epilogue=None, input_handler=DrainableStdin()): assert self._active answers = _("[Y/n]") if default else _("[y/N]") question = question + " " + answers + " " self._waiting_for_input = True with self.lock: if QUIT_EVENT.is_set(): sys.exit(0) self._clear_last_job() while True: write_to_stream(STDOUT_WRITER, "\a" + question + SHOW_CURSOR) input_handler.drain() answer = input_handler.get_input() if answer is None: if epilogue: write_to_stream(STDOUT_WRITER, "\n" + epilogue + "\n") QUIT_EVENT.set() sys.exit(0) elif answer.lower() in (_("y"), _("yes")) or ( not answer and default ): answer = True break elif answer.lower() in (_("n"), _("no")) or ( not answer and not default ): answer = False break write_to_stream( STDOUT_WRITER, _("Please answer with 'y(es)' or 'n(o)'.\n"), ) if epilogue: write_to_stream(STDOUT_WRITER, epilogue + "\n") write_to_stream(STDOUT_WRITER, HIDE_CURSOR) self._waiting_for_input = False return answer def deactivate(self): self._active = False if TTY: write_to_stream(STDOUT_WRITER, SHOW_CURSOR) signal(SIGINT, SIG_DFL) signal(SIGQUIT, SIG_DFL) self._signal_handler_thread.join() if self.debug_log_file: self.debug_log_file.close() @clear_formatting @add_debug_indicator @capture_for_debug_logfile @add_debug_timestamp def debug(self, msg, append_newline=True): if self.debug_mode: with self.lock: self._write(msg, append_newline=append_newline) def job_add(self, msg): if not self._active: return with self.lock: self._clear_last_job() self.jobs.append(msg) self._write_current_job() def job_del(self, msg): if not self._active: return with self.lock: self._clear_last_job() self.jobs.remove(msg) self._write_current_job() def progress_advance(self, increment=1): with self.lock: self.progress += increment def progress_increase_total(self, increment=1): with self.lock: self.progress_total += increment def progress_set_total(self, total): self.progress = 0 self.progress_start = datetime.utcnow() self.progress_total = total def progress_show(self): if INFO_EVENT.is_set(): INFO_EVENT.clear() table = [] if self.jobs: table.append([bold(_("Running jobs")), self.jobs[0].strip()]) for job in self.jobs[1:]: table.append(["", job.strip()]) try: progress = (self.progress / float(self.progress_total)) elapsed = datetime.utcnow() - self.progress_start remaining = elapsed / progress - elapsed except ZeroDivisionError: pass else: if table: table.append(ROW_SEPARATOR) table.extend([ [bold(_("Progress")), "{:.1f}%".format(progress * 100)], ROW_SEPARATOR, [bold(_("Elapsed")), format_duration(elapsed)], ROW_SEPARATOR, [ bold(_("Remaining")), _("{} (estimate based on progress)").format(format_duration(remaining)) ], ]) output = blue("i") + "\n" if table: for line in render_table(table): output += ("{x} {line}\n".format(x=blue("i"), line=line)) else: output += _("{x} No progress info available at this time.\n").format(x=blue("i")) io.stderr(output + blue("i")) @clear_formatting @capture_for_debug_logfile @add_debug_timestamp def stderr(self, msg, append_newline=True): with self.lock: self._write(msg, append_newline=append_newline, err=True) @clear_formatting @capture_for_debug_logfile @add_debug_timestamp def stdout(self, msg, append_newline=True): with self.lock: self._write(msg, append_newline=append_newline) @contextmanager def job(self, job_text): self.job_add(job_text) try: yield finally: self.job_del(job_text) def job_wrapper(self, job_text): def outer_wrapper(wrapped_function): def inner_wrapper(*args, **kwargs): with self.job(job_text.format(*args, **kwargs)): return wrapped_function(*args, **kwargs) return inner_wrapper return outer_wrapper def _clear_last_job(self): if self._status_line_present and TTY: write_to_stream(STDOUT_WRITER, "\r\033[K") self._status_line_present = False def _signal_handler_thread_body(self): while self._active: self.progress_show() if not self._waiting_for_input: # do not block and ignore SIGINT while .ask()ing with self.lock: self._clear_last_job() self._write_current_job() if QUIT_EVENT.is_set(): if SHUTDOWN_EVENT_HARD.wait(0.1): self.stderr(_("{x} {signal} cleanup interrupted, exiting...").format( signal=bold(_("SIGINT")), x=blue("i"), )) for ssh_pid in self._child_pids: self.debug(_("killing SSH session with PID {pid}").format(pid=ssh_pid)) try: kill(ssh_pid, SIGTERM) except ProcessLookupError: pass self._clear_last_job() if TTY: write_to_stream(STDOUT_WRITER, SHOW_CURSOR) _exit(1) else: if SHUTDOWN_EVENT_SOFT.wait(0.1): QUIT_EVENT.set() self.stderr(_( "{x} {signal} canceling pending tasks... " "(hit CTRL+C again for immediate dirty exit)" ).format( signal=bold(_("SIGINT")), x=blue("i"), )) def _spinner_character(self): if time() - self._last_spinner_update > 0.2: self._last_spinner_update = time() self._last_spinner_character = next(self._spinner) return self._last_spinner_character def _write(self, msg, append_newline=True, err=False): if not self._active: return self._clear_last_job() if msg is not None: if append_newline: msg += "\n" write_to_stream(STDERR_WRITER if err else STDOUT_WRITER, msg) self._write_current_job() def _write_current_job(self): if self.jobs and TTY: line = "{} ".format(blue(self._spinner_character())) # must track line length manually as len() will count ANSI escape codes visible_length = 2 try: progress = (self.progress / float(self.progress_total)) except ZeroDivisionError: pass else: progress_text = "{:.1f}% ".format(progress * 100) line += bold(progress_text) visible_length += len(progress_text) line += self.jobs[-1][:term_width() - 1 - visible_length] write_to_stream(STDOUT_WRITER, line) self._status_line_present = True io = IOManager() bundlewrap-3.8.0/docs/000077500000000000000000000000001360562404000146155ustar00rootroot00000000000000bundlewrap-3.8.0/docs/content/000077500000000000000000000000001360562404000162675ustar00rootroot00000000000000bundlewrap-3.8.0/docs/content/CNAME000066400000000000000000000000241360562404000170310ustar00rootroot00000000000000docs.bundlewrap.org bundlewrap-3.8.0/docs/content/bundlewrap.css000066400000000000000000000022151360562404000211440ustar00rootroot00000000000000@import url('https://fonts.googleapis.com/css?family=Maven+Pro'); @import url('https://fonts.googleapis.com/css?family=Open+Sans:400,400i,700'); @import url('https://fonts.googleapis.com/css?family=Source+Code+Pro:400,700'); body, h1, h2, h3, h4, h5, h6 { background: white; font-family: "Open Sans", Helvetica, sans-serif; } h1, h2, h3, h4, h5, h6 { margin-top: 0; margin-bottom: 20px; } hr { border-top: 1px solid #f0f0f0; } .navbar { background: black; } .navbar, .navbar-fixed-top { border: 0; } .navbar-brand { font-family: "Maven Pro", Helvetica, sans-serif; } .bs-sidebar .nav > li > a { color: black; } .terminal { background-color: black; border-radius: 5px; color: #d7d7d7; font-family: source-code-pro, monospace; font-size: 12px; line-height: 140%; margin-bottom: 32px; margin-top: 32px; overflow: scroll; padding: 10px; padding-left: 15px; white-space: pre; } .btn-blue { background-color: #1a8acc; border: 1px solid white; color: #f0f0f0; } .btn-blue:hover { color: white; } .btn-blueoutline { background-color: white; border: 1px solid #1a8acc; color: #1a8acc; } .btn-blueoutline:hover { color: black; } bundlewrap-3.8.0/docs/content/guide/000077500000000000000000000000001360562404000173645ustar00rootroot00000000000000bundlewrap-3.8.0/docs/content/guide/api.md000066400000000000000000000142151360562404000204620ustar00rootroot00000000000000# API While most users will interact with BundleWrap through the `bw` command line utility, you can also use it from your own code to extract data or further automate config management tasks. Even within BundleWrap itself (e.g. templates, libs, and hooks) you are often given repo and/or node objects to work with. Their methods and attributes are documented below. Some general notes on using BundleWrap's API: * There can be an arbitrary amount of `bundlewrap.repo.Repository` objects per process. * Repositories are read as needed and not re-read when something changes. Modifying files in a repo during the lifetime of the matching Repository object may result in undefined behavior.
## Example Here's a short example of how to use BundleWrap to get the uptime for a node. from bundlewrap.repo import Repository repo = Repository("/path/to/my/repo") node = repo.get_node("mynode") uptime = node.run("uptime") print(uptime.stdout)
## Reference ### bundlewrap.repo.Repository(path) The starting point of any interaction with BundleWrap. An object of this class represents the repository at the given path.
**`.branch`** The current git branch of this repo. `None` if not in a git repo.
**`.clean`** Boolean indicating if there are uncommitted changes in git. `None` if not in a git repo.
**`.groups`** A list of all groups in the repo (instances of `bundlewrap.group.Group`)
**`.nodes`** A list of all nodes in the repo (instances of `bundlewrap.node.Node`)
**`.revision`** The current git, hg or bzr revision of this repo. `None` if no SCM was detected.
**`.get_group(group_name)`** Returns the Group object for the given name.
**`.get_node(node_name)`** Returns the Node object with the given name.
**`.nodes_in_all_groups(group_names)`** Returns a list of Node objects where every node is a member of every group name given.
**`.nodes_in_any_group(group_names)`** Returns all Node objects that are a member of at least one of the given group names.
**`.nodes_in_group(group_name)`** Returns a list of Node objects in the named group.
### bundlewrap.node.Node() A system managed by BundleWrap.
**`.bundles`** A list of all bundles associated with this node (instances of `bundlewrap.bundle.Bundle`)
**`.groups`** A list of `bundlewrap.group.Group` objects this node belongs to
**`.hostname`** The DNS name BundleWrap uses to connect to this node
**`.items`** A list of items on this node (instances of subclasses of `bundlewrap.items.Item`)
**`.magic_number`** A large number derived from the node's name. This number is very likely to be unique for your entire repository. You can, for example, use this number to easily "jitter" cronjobs: '{} {} * * * root /my/script'.format( node.magic_number % 60, node.magic_number % 2 + 4, )
**`.metadata`** A dictionary of custom metadata, merged from information in [nodes.py](../repo/nodes.py.md) and [groups.py](../repo/groups.py.md)
**`.name`** The internal identifier for this node
**`.download(remote_path, local_path)`** Downloads a file from the node. `remote_path` Which file to get from the node `local_path` Where to put the file
**`.get_item(item_id)`** Get the Item object with the given ID (e.g. "file:/etc/motd").
**`.has_bundle(bundle_name)`** `True` if the node has a bundle with the given name.
**`.has_any_bundle(bundle_names)`** `True` if the node has a bundle with any of the given names.
**`.in_group(group_name)`** `True` if the node is in a group with the given name.
**`.in_any_group(group_names)`** `True` if the node is in a group with any of the given names.
**`.run(command, may_fail=False)`** Runs a command on the node. Returns an instance of `bundlewrap.operations.RunResult`. `command` What should be executed on the node `may_fail` If `False`, `bundlewrap.exceptions.RemoteException` will be raised if the command does not return 0.
**`.upload(local_path, remote_path, mode=None, owner="", group="")`** Uploads a file to the node. `local_path` Which file to upload `remote_path` Where to put the file on the target node `mode` File mode, e.g. "0644" `owner` Username of the file owner `group` Group name of the file group
### bundlewrap.group.Group A user-defined group of nodes.
**`.name`** The name of this group
**`.nodes`** A list of all nodes in this group (instances of `bundlewrap.node.Node`, includes subgroup members)
### bundlewrap.utils.Fault A Fault acts as a lazy stand-in object for the result of a given callback function. These objects are returned from the "vault" attached to `Repository` objects: >>> repo.vault.password_for("demo") Only when the `value` property of a Fault is accessed or when the Fault is converted to a string, the callback function is executed. In the example above, this means that the password is only generated when it is really required (e.g. when used in a template). This is particularly useful when used in metadata in connection with [secrets](secrets.md). Users will be able to generate metadata with Faults in it, even if they lack the required keys for the decryption operation represented by the Fault. The key will only be required for files etc. that actually use it. If a Fault cannot be resolved (e.g. for lack of the required key), BundleWrap can just skip the item using the Fault, while still allowing other items on the same node to be applied. Faults also support some rudimentary string operations such as appending a string or another Fault, as well as some string methods: >>> f = repo.vault.password_for("1") + ":" + repo.vault.password_for("2") >>> f >>> f.value 'VOd5PC:JUgYUb' >>> f += " " >>> f.value 'VOd5PC:JUgYUb ' >>> f.strip().value 'VOd5PC:JUgYUb' >>> repo.vault.password_for("1").format_into("Password: {}").value 'Password: VOd5PC' >>> repo.vault.password_for("1").b64encode().value 'Vk9kNVA=' These string methods are supported on Faults: `format`, `lower`, `lstrip`, `replace`, `rstrip`, `strip`, `upper`, `zfill` bundlewrap-3.8.0/docs/content/guide/cli.md000066400000000000000000000065541360562404000204670ustar00rootroot00000000000000# Command Line Interface The `bw` utility is BundleWrap's command line interface.
This page is not meant as a complete reference. It provides a starting point to explore the various subcommands. If you're looking for details, --help is your friend.
## bw apply
bw apply -i mynode
The most important and most used part of BundleWrap, `bw apply` will apply your configuration to a set of [nodes](../repo/nodes.py.md). By default, it operates in a non-interactive mode. When you're trying something new or are otherwise unsure of some changes, use the `-i` switch to have BundleWrap interactively ask before each change is made.
## bw run
$ bw run mygroup "uname -a"
Unsurprisingly, the `run` subcommand is used to run commands on nodes. As with most commands that accept node names, you can also give a `group` name or any combination of node and group names, separated by commas (without spaces, e.g. `node1,group2,node3`). A third option is to use a bundle selector like `bundle:my_bundle`. It will select all nodes with the named `bundle`. You can freely mix and match node names, group names, and bundle selectors. Negation is also possible for bundles and groups. `!bundle:foo` will add all nodes without the foo bundle, while `!group:foo` will add all nodes that aren't in the foo group.
## bw debug $ bw debug bundlewrap X.Y.Z interactive repository inspector > You can access the current repository as 'repo'. >>> len(repo.nodes) 121 This command will drop you into a Python shell with direct access to BundleWrap's [API](api.md). Once you're familiar with it, it can be a very powerful tool.
## bw plot
You'll need Graphviz installed on your machine for this to be useful.
$ bw plot node mynode | dot -Tsvg -omynode.svg
You won't be using this every day, but it's pretty cool. The above command will create an SVG file (you can open these in your browser) that shows the item dependency graph for the given node. You will see bundles as dashed rectangles, static dependencies (defined in BundleWrap itself) in green, auto-generated dependencies (calculated dynamically each time you run `bw apply`) in blue and dependencies you defined yourself in red. It offers an interesting view into the internal complexities BundleWrap has to deal with when figuring out the order in which your items can be applied to your node.
## bw test
$ bw test
✓ node1  samba  pkg_apt:samba
✘ node1  samba  file:/etc/samba/smb.conf

[...]

+----- traceback from worker ------
|
|  Traceback (most recent call last):
|    File "bundlewrap/concurrency.py", line 78, in _worker_process
|      return_value = target(*msg['args'], **msg['kwargs'])
|    File "<string>", line 378, in test
|  BundleError: file:/etc/samba/smb.conf from bundle 'samba' refers to missing file '/path/to/bundlewrap/repo/bundles/samba/files/smb.conf'
|
+----------------------------------
This command is meant to be run automatically like a test suite after every commit. It will try to catch any errors in your bundles and file templates by initializing every item for every node (but without touching the network). bundlewrap-3.8.0/docs/content/guide/dev_item.md000066400000000000000000000127431360562404000215110ustar00rootroot00000000000000# Custom item types ## Step 0: Understand statedicts To represent supposed vs. actual state, BundleWrap uses statedicts. These are normal Python dictionaries with some restrictions: * keys must be Unicode text * every value must be of one of these simple data types: * bool * float * int * Unicode text * None * ...or a list/tuple containing only instances of one of the types above Additional information can be stored in statedicts by using keys that start with an underscore. You may only use this for caching purposes (e.g. storing rendered file template content while the "real" sdict information only contains a hash of this content). BundleWrap will ignore these keys and hide them from the user. The type restrictions noted above do not apply. ## Step 1: Create an item module Create a new file called `/your/bundlewrap/repo/items/foo.py`. You can use this as a template: from bundlewrap.items import Item class Foo(Item): """ A foo. """ BUNDLE_ATTRIBUTE_NAME = "foo" ITEM_ATTRIBUTES = { 'attribute': "default value", } ITEM_TYPE_NAME = "foo" REQUIRED_ATTRIBUTES = ['attribute'] @classmethod def block_concurrent(cls, node_os, node_os_version): """ Return a list of item types that cannot be applied in parallel with this item type. """ return [] def __repr__(self): return "".format(self.attributes['attribute']) def cdict(self): """ Return a statedict that describes the target state of this item as configured in the repo. An empty dict means that the item should not exist. Implementing this method is optional. The default implementation uses the attributes as defined in the bundle. """ raise NotImplementedError def sdict(self): """ Return a statedict that describes the actual state of this item on the node. An empty dict means that the item does not exist on the node. For the item to validate as correct, the values for all keys in self.cdict() have to match this statedict. """ raise NotImplementedError def display_dicts(self, cdict, sdict, keys): """ Given cdict and sdict as implemented above, modify them to better suit interactive presentation. The keys parameter is a list of keys whose values differ between cdict and sdict. Implementing this method is optional. """ return (cdict, sdict, keys) def fix(self, status): """ Do whatever is necessary to correct this item. The given ItemStatus object has the following useful information: status.keys list of cdict keys that need fixing status.cdict cached copy of self.cdict() status.sdict cached copy of self.sdict() """ raise NotImplementedError
## Step 2: Define attributes `BUNDLE_ATTRIBUTE_NAME` is the name of the variable defined in a bundle module that holds the items of this type. If your bundle looks like this: foo = { [...] } ...then you should put `BUNDLE_ATTRIBUTE_NAME = "foo"` here. `ITEM_ATTRIBUTES` is a dictionary of the attributes users will be able to configure for your item. For files, that would be stuff like owner, group, and permissions. Every attribute (even if it's mandatory) needs a default value, `None` is totally acceptable: ITEM_ATTRIBUTES = {'attr1': "default1"} `ITEM_TYPE_NAME` sets the first part of an items ID. For the file items, this is "file". Therefore, file ID look this this: `file:/path`. The second part is the name a user assigns to your item in a bundle. Example: ITEM_TYPE_NAME = "foo" `REQUIRED_ATTRIBUTES` is a list of attribute names that must be set on each item of this type. If BundleWrap encounters an item without all these attributes during bundle inspection, an exception will be raised. Example: REQUIRED_ATTRIBUTES = ['attr1', 'attr2']
Step 3: Implement methods ------------------------- You should probably start with `sdict()`. Use `self.node.run("command")` to run shell commands on the current node and check the `stdout` property of the returned object. The only other method you have to implement is `fix`. It doesn't have to return anything and just uses `self.node.run()` to fix the item. To do this efficiently, it may use the provided parameters indicating which keys differ between the should-be sdict and the actual one. Both sdicts are also provided in case you need to know their values. `block_concurrent()` must return a list of item types (e.g. `['pkg_apt']`) that cannot be applied in parallel with this type of item. May include this very item type itself. For most items this is not an issue (e.g. creating multiple files at the same time), but some types of items have to be applied sequentially (e.g. package managers usually employ locks to ensure only one package is installed at a time). If you're having trouble, try looking at the [source code for the items that come with BundleWrap](https://github.com/bundlewrap/bundlewrap/tree/master/bundlewrap/items). The `pkg_*` items are pretty simple and easy to understand while `files` is the most complex to date. Or just drop by on [IRC](irc://chat.freenode.net/bundlewrap), we're glad to help. bundlewrap-3.8.0/docs/content/guide/dev_plugin.md000066400000000000000000000052301360562404000220420ustar00rootroot00000000000000# Writing your own plugins [Plugins](../repo/plugins.md) can provide almost any file in a BundleWrap repository: bundles, custom items, hooks, libs, etc. Notable exceptions are `nodes.py` and `groups.py`. If your plugin wants to extend those, use a [lib](../repo/libs.md) instead and ask users to add the result of a function call in your lib to their nodes or groups dicts.
If your plugin depends on other libraries, make sure that it catches ImportErrors in a way that makes it obvious for the user what's missing. Keep in mind that people will often just git pull their repo and not install your plugin themselves.

## Starting a new plugin ### Step 1: Clone the plugins repo Create a clone of the [official plugins repo](https://github.com/bundlewrap/plugins) on GitHub. ### Step 2: Create a branch You should work on a branch specific to your plugin. ### Step 3: Copy your plugin files Now take the files that make up your plugin and move them into a subfolder of the plugins repo. The subfolder must be named like your plugin. ### Step 4: Create required files In your plugin subfolder, create a file called `manifest.json` from this template: { "desc": "Concise description (keep it somewhere around 80 characters)", "help": "Optional verbose help text to be displayed after installing. May\ninclude\nnewlines.", "provides": [ "bundles/example/items.py", "hooks/example.py" ], "version": 1 } The `provides` section must contain a list of all files provided by your plugin. You also have to create an `AUTHORS` file containing your name and email address. Last but not least we require a `LICENSE` file with an OSI-approved Free Software license. ### Step 5: Update the plugin index Run the `update_index.py` script at the root of the plugins repo. ### Step 6: Run tests Run the `test.py` script at the root of the plugins repo. It will tell you if there is anything wrong with your plugin. ### Step 7: Commit Commit all changes to your branch ### Step 8: Create pull request Create a pull request on GitHub to request inclusion of your new plugin in the official repo. Once your branch is merged, your plugin will become available to be installed by `bw repo plugin install yourplugin` and appear on [plugins.bundlewrap.org](http://plugins.bundlewrap.org).
## Updating an existing plugin To release a new version of your plugin: * Increase the version number in `manifest.json` * Update the list of provided files in `manifest.json` * If you're updating someone elses plugin, you should get their consent and add your name to `AUTHORS` Then just follow the instructions above from step 5 onward. bundlewrap-3.8.0/docs/content/guide/env.md000066400000000000000000000076261360562404000205110ustar00rootroot00000000000000# Environment Variables ## `BW_ADD_HOST_KEYS` As BundleWrap uses OpenSSH to connect to hosts, host key checking is involved. By default, strict host key checking is activated. This might not be suitable for your setup. You can set this variable to `1` to cause BundleWrap to set the OpenSSH option `StrictHostKeyChecking=no`. You can also use `bw -a ...` to achieve the same effect.
## `BW_COLORS` Colors are enabled by default. Setting this variable to `0` tells BundleWrap to never use any ANSI color escape sequences.
## `BW_DEBUG_LOG_DIR` Set this to an existing directory path to have BundleWrap write debug logs there (even when you're running `bw` without `--debug`).
Debug logs are verbose and BundleWrap does not rotate them for you. Putting them on a tmpfs or ramdisk will save your SSD and get rid of old logs every time you reboot your machine.

## `BW_HARDLOCK_EXPIRY` [Hard locks](locks.md) are automatically ignored after some time. By default, it's `"8h"`. You can use this variable to override that default.
## `BW_IDENTITY` When BundleWrap [locks](locks.md) a node, it stores a short description about "you". By default, this is the string `$USER@$HOSTNAME`, e.g. `john@mymachine`. You can use `BW_IDENTITY` to specify a custom string. (No variables will be evaluated in user supplied strings.)
## `BW_ITEM_WORKERS` and `BW_NODE_WORKERS` BundleWrap attempts to parallelize work. These two options specify the number of nodes and items, respectively, which will be handled concurrently. To be more precise, when setting `BW_NODE_WORKERS=8` and `BW_ITEM_WORKERS=2`, BundleWrap will work on eight nodes in parallel, each handling two items in parallel. You can also use the command line options `-p` and `-P`, e.g. `bw apply -p ... -P ... ...`, to achieve the same effect. Command line arguments override environment variables. There is no single default for these values. For example, when running `bw apply`, four nodes are being handled by default. However, when running `bw test`, only one node will be tested by default. `BW_NODE_WORKERS` and `BW_ITEM_WORKERS` apply to *all* these operations. Note that you should not set these variables to very high values. First, it can cause high memory consumption on your machine. Second, not all SSH servers can handle massive parallelism. Please refer to your OpenSSH documentation on how to tune your servers for these situations.
## `BW_REPO_PATH` Set this to a path pointing to your BundleWrap repository. If unset, the current working directory is used. Can be overridden with `bw --repository PATH`. Keep in mind that `bw` will also look for a repository in all parent directories until it finds one.
## `BW_SOFTLOCK_EXPIRY` [Soft locks](locks.md) are automatically removed from nodes after some time. By default, it's `"8h"`. You can use this variable to override that default.
## `BW_SSH_ARGS` Extra arguments to include in every call to `ssh` BundleWrap makes. Set this to "-F ~/.ssh/otherconf" to use a different SSH config with BundleWrap. Defaults to `""`.
## `BW_SCP_ARGS` Extra arguments to include in every call to `scp` BundleWrap makes. Defaults to the value of `BW_SSH_ARGS`.
## `BW_TABLE_STYLE` By default, BundleWrap uses Unicode box-drawing characters at various points in its output. Setting this env var to one of the following values changes that behavior:
asciiuse only simple ASCII characters to render tables (useful if your font doesn't properly align box-drawing characters)
grepmake output more grep- and cut-friendly

## `BW_VAULT_DUMMY_MODE` Setting this to `1` will make `repo.vault` return dummy values for every [secret](secrets.md). This is useful for running `bw test` on a CI server that you don't want to trust with your `.secrets.cfg`. bundlewrap-3.8.0/docs/content/guide/installation.md000066400000000000000000000055151360562404000224150ustar00rootroot00000000000000# Installation
You may need to install pip first. This can be accomplished through your distribution's package manager, e.g.:
aptitude install python-pip
or the manual instructions.
## Using pip It's as simple as:
pip install bundlewrap
Note that you need at least Python 2.7 to run BundleWrap. Python 3 is supported as long as it's >= 3.3.
## From git
This type of install will give you the very latest (and thus possibly broken) bleeding edge version of BundleWrap. You should only use this if you know what you're doing.
The instructions below are for installing on Ubuntu Server 12.10 (Quantal), but should also work for other versions of Ubuntu/Debian. If you're on some other distro, you will obviously have to adjust the package install commands.
The instructions assume you have root privileges.
Install basic requirements:
aptitude install build-essential git python-dev python-pip
Clone the GitHub repository:
cd /opt
git clone https://github.com/bundlewrap/bundlewrap.git
Use `pip install -e` to install in "development mode":
pip install -e /opt/bundlewrap
You can now try running the `bw` command line utility:
bw --help
That's it. To update your install, just pull the git repository and have setup.py` check for new dependencies:
cd /opt/bundlewrap
git pull
python setup.py develop

# Requirements for managed systems While the following list might appear long, even very minimal systems should provide everything that's needed. * `apt-get` (only used with [pkg_apt](../items/pkg_apt.md) items) * `cat` * `chmod` * `chown` * `dpkg` (only used with [pkg_apt](../items/pkg_apt.md) items) * `echo` * `file` * `find` (only used with [directory purging](../items/directory.md#purge)) * `grep` * `groupadd` * `groupmod` * `id` * `initctl` (only used with [svc_upstart](../items/svc_upstart.md) items) * `mkdir` * `mv` * `pacman` (only used with [pkg_pacman](../items/pkg_pacman.md) items) * `rm` * sftp-enabled SSH server (your home directory must be writable) * `sudo` * `sha1sum` * `stat` * `systemctl` (only used with [svc_systemd](../items/svc_systemd.md) items) * `useradd` * `usermod` Additionally, you need to pre-configure your SSH client so that it can connect to your nodes without having to type a password (including `sudo` on the node, which also must *not* have the `requiretty` option set). bundlewrap-3.8.0/docs/content/guide/item_file_templates.md000066400000000000000000000046201360562404000237230ustar00rootroot00000000000000# Writing file templates BundleWrap can use [Mako](http://www.makotemplates.org) or [Jinja2](http://jinja.pocoo.org) for file templating. This enables you to dynamically contruct your config files. Templates reside in the `files` subdirectory of a bundle and are bound to a file item using the `source` [attribute](../items/file.md#source). This page explains how to get started with Mako. The most basic example would be:
Hello, this is ${node.name}!
After template rendering, it would look like this:
Hello, this is myexamplenodename!
As you can see, `${...}` can be used to insert the value of a context variable into the rendered file. By default, you have access to two variables in every template: `node` and `repo`. They are `bundlewrap.node.Node` and `bundlewrap.repo.Repository` objects, respectively. You can learn more about the attributes and methods of these objects in the [API docs](api.md), but here are a few examples:
## Examples inserts the DNS hostname of the current node ${node.hostname}
a list of all nodes in your repo % for node in repo.nodes: ${node.name} % endfor
make exceptions for certain nodes % if node.name == "node1": option = foo % elif node.name in ("node2", "node3"): option = bar % else: option = baz % endif
check for group membership % if node.in_group("sparkle"): enable_sparkles = 1 % endif
check for membership in any of several groups % if node.in_any_group(("sparkle", "shiny")): enable_fancy = 1 % endif
check for bundle % if node.has_bundle("sparkle"): enable_sparkles = 1 % endif
check for any of several bundles % if node.has_any_bundle(("sparkle", "shiny")): enable_fancy = 1 % endif
list all nodes in a group % for gnode in repo.get_group("mygroup").nodes: ${gnode.name} % endfor
## Working with node metadata Quite often you will attach custom metadata to your nodes in `nodes.py`, e.g.: nodes = { "node1": { "metadata": { "interfaces": { "eth0": "10.1.1.47", "eth1": "10.1.2.47", }, }, }, } You can easily access this information in templates: % for interface, ip in sorted(node.metadata["interfaces"].items()): interface ${interface} ip = ${ip} % endfor This template will render to: interface eth0 ip = 10.1.1.47 interface eth1 ip = 10.1.2.47 bundlewrap-3.8.0/docs/content/guide/kubernetes.md000066400000000000000000000053301360562404000220560ustar00rootroot00000000000000# Kubernetes
Support for Kubernetes is experimental at this time. Backwards-incompatible changes may happen at any time.
To manage a Kubernetes cluster with BundleWrap, you first need to set up a kubectl context that works with the cluster. If you're running on Google Kubernetes Engine for example, this can be accomplished with: gcloud auth login gcloud container clusters get-credentials your-cluster --zone your-zone --project your-project You also need to make sure context names are the same on your teammates' machines.
## Setting up a node Each Kubernetes cluster you manage becomes a node. Here is an example `nodes.py`: nodes = { "my-cluster": { 'os': 'kubernetes', 'bundles': ["my-app"], 'kubectl_context': "my-context", }, }
## Kubernetes bundles You can then proceed to write bundles as with regular nodes, but using the [k8s_ items](../items/k8s.md): k8s_namespaces = { "my-app": {}, } k8s_deployments = { "my-app/my-deployment": { 'manifest': { "spec": { "selector": { "matchLabels": { "app": "nginx", }, }, "replicas": 2, "template": { "metadata": { "labels": { "app": "nginx", }, }, "spec": { "containers": [ { "name": "nginx", "image": "nginx:latest", "ports": [ {"containerPort": 80}, ] }, ], }, }, }, }, }, } All item names (except namespaces themselves) must be prefixed with the name of a namespace and a forward slash `/`. Note that BundleWrap will include defaults for the `apiVersion`, `Kind`, and `metadata/name` keys, but you can override them if you must. Alternatively, you can keep your resource definitions in manifest files: k8s_namespaces = { "my-app": {}, } k8s_deployments = { "my-app/my-deployment": { 'manifest_file': "my_deployment.yaml", }, } BundleWrap will then look for `my_deployment.yaml` in `bundles//manifests/`. You can also use [templating](../items/k8s.md#manifest_processor) in these files. bundlewrap-3.8.0/docs/content/guide/locks.md000066400000000000000000000056231360562404000210270ustar00rootroot00000000000000# Locking BundleWrap's decentralized nature makes it necessary to coordinate actions between users of a shared repository. Locking is an important part of collaborating using BundleWrap. ## Hard locks Since very early in the history of BundleWrap, what we call "hard locks" were used to prevent multiple users from using `bw apply` on the same node at the same time. When BundleWrap finds a hard lock on a node in interactive mode, it will display information about who acquired the lock (and when) and will ask whether to ignore the lock or abort the process. In non-interactive mode, the operation is always cancelled for the node in question unless `--force` is used. ## Soft locks Many teams these days are using a workflow based on pull requests. A common problem here is that changes from a feature branch might already have been applied to a set of nodes, while the master branch is still lacking these changes. While the pull request is open and waiting for review, other users might rightly use the master branch to apply to all nodes, reverting changes made by the feature branch. This can be a major nuisance. As of version 2.6.0, BundleWrap provides "soft locks" to prevent this. The author of a feature branch can now lock the node so only he or she can use `bw apply` on it:
$ bw lock add node1
✓ node1  locked with ID B9JS (expires in 8h)
This will prevent all other users from changing any items on the node for the next 8 hours. BundleWrap will tell users apart by their [BW_IDENTITY](env.md#BW_IDENTITY). Now say someone else is reviewing the pull request and wants to use `bw apply`, while still keeping others out and the original author in. This can be done by simply locking the node *again* as the reviewer. Nodes can have many soft locks. Soft locks act as an exemption from a general ban on changing items that goes into effect as soon as one or more soft locks are present on the node. Of course, if no soft locks are present, anyone can change any item. You can list all soft locks on a node with:
$ bw lock show node1
i node1  ID    Created              Expires              User   Items  Comment
› node1  Y1KD  2016-05-25 21:30:25  2016-05-26 05:30:25  alice  *      locks are awesome
› node1  B9JS  2016-05-24 13:10:11  2016-05-27 08:10:11  bob    *      me too
Note that each lock is identified by a case-insensitive 4-character ID that can be used to remove the lock:
$ bw lock remove node1 y1kd
✓ node1  lock Y1KD removed
Expired locks are automatically and silently purged whenever BundleWrap has the opportunity. Be sure to check out `bw lock add --help` for how to customize expiration time, add a short comment explaining the reason for the lock, or lock only certain items. Using `bw apply` on a soft locked node is not an error and affected items will simply be skipped. bundlewrap-3.8.0/docs/content/guide/migrate_12.md000066400000000000000000000051111360562404000216360ustar00rootroot00000000000000# Migrating from BundleWrap 1.x to 2.x As per [semver](http://semver.org), BundleWrap 2.0 breaks compatibility with repositories created for BundleWrap 1.x. This document provides a guide on how to upgrade your repositories to BundleWrap 2.x. Please read the entire document before proceeding. To aid with the transition, BundleWrap 1.6.0 has been released along with 2.0.0. It contains no new features over 1.5.x, but has builtin helpers to aid your migration to 2.0.
## items.py In every bundle, rename `bundle.py` to `items.py`. BundleWrap 1.6.0 can do this for you by running `bw migrate`.
## Default file content type The default `content_type` for [file items](../items/file.md) has changed from "mako" to "text". This means that you need to check all file items that do not define an explicit content type of "mako". Some of them might be fine because you didn't really need templating, while others may need to have their `content_type` set to "mako" explicitly. BundleWrap 1.6.0 will print warnings for every file item affected when running `bw test`.
## Metadata merging The merging behavior for node and group metadata has changed. Instead of a simple `dict.update()`, metadata dicts are now merged recursively. See [the docs](../repo/groups.py.md#metadata) for details.
## Metadata processors and item generators These two advanced features have been replaced by a single new mechanism: [metadata.py](../repo/metadata.py.md) You will need to rethink and rewrite them. BundleWrap 1.6.0 will print warnings for every group that uses metadata processors and any item generators when running `bw test`.
## Custom item types The API for defining your own items has changed. Generally, you should be able to upgrade your items with relatively little effort. Refer to [the docs](dev_item.md) for details.
## Deterministic templates While not a strict requirement, it is highly recommended to ensure your entire configuration can be created deterministically (i.e. remains exactly the same no matter how often you generate it). Otherwise, you won't be able to take advantage of the new functionality provided by `bw hash`. A common pitfall here is iteration over dictionaries in templates: % for key, value in my_dict.items(): ${value} % endfor Standard dictionaries in Python have no defined order. This may result in lines occasionally changing their position. To solve this, you can simply use `sorted()`: % for key, value in sorted(my_dict.items()): ${value} % endfor
## Hook arguments Some [hooks](../repo/hooks.md) had their arguments adjusted slightly. bundlewrap-3.8.0/docs/content/guide/migrate_23.md000066400000000000000000000053341360562404000216470ustar00rootroot00000000000000# Migrating from BundleWrap 2.x to 3.x As per [semver](http://semver.org), BundleWrap 3.0 breaks compatibility with repositories created for BundleWrap 2.x. This document provides a guide on how to upgrade your repositories to BundleWrap 3.x. Please read the entire document before proceeding.
## metadata.py BundleWrap 2.x simply used all functions in `metadata.py` whose names don't start with an underscore as metadata processors. This led to awkward imports like `from foo import bar as _bar`. BundleWrap 3.x requires a decorator for explicitly designating functions as metadata processors: @metadata_processor def myproc(metadata): return metadata, DONE You will have to add `@metadata_processor` to each metadata processor function. There is no need to import it; it is provided automatically, just like `node` and `repo`. The accepted return values of metadata processors have changed as well. Metadata processors now always have to return a tuple with the first element being a dictionary of metadata and the remaining elements made up of various options to tell BundleWrap what to do with the dictionary. In most cases, you will want to return the `DONE` options as in the example above. There is no need to import options, they're always available. When you previously returned `metadata, False` from a metadata processor, you will now have to return `metadata, RUN_ME_AGAIN`. For a more detailed description of the available options, see [the documentation](../repo/metadata.py.md).
## File and directory ownership defaults [Files](../items/file.md), [directories](../items/directory.md), and [symlinks](../items/symlink.md) now have default values for the ownership and mode attributes. Previously the default was to ignore them. It's very likely that you won't have to do anything here, just be aware.
## systemd services enabled by default Again, just be [aware](../items/svc_systemd.md), it's probably what you intended anyway.
## Environment variables The following [env vars](env.md) have been renamed (though the new names have already been available for a while, so chances are you're already using them):
OldNew
BWADDHOSTKEYSBW_ADD_HOST_KEYS
BWCOLORSBW_COLORS
BWITEMWORKERSBW_ITEM_WORKERS
BWNODEWORKERSBW_NODE_WORKERS

## Item.display_keys and Item.display_dicts If you've written your own items and used the `display_keys()` or `display_dicts()` methods or the `BLOCK_CONCURRENT` attribute, you will have to update them to the [new API](dev_item.md). bundlewrap-3.8.0/docs/content/guide/os_compatibility.md000066400000000000000000000013071360562404000232610ustar00rootroot00000000000000# OS compatibility BundleWrap by necessity takes a pragmatic approach to supporting different operating systems and distributions. Our main target is Linux, but support for other UNIXes is also evolving. We cannot guarantee to be compatible with every distribution and BSD flavor under the sun, but we try to cover the common ones.
## node.os and node.os_version You should set these attributes for every node. Giving BundleWrap this information allows us to adapt some built-in behavior.
## other node attributes In some cases (e.g. when not using sudo) you will need to manually adjust some things. Check the docs [on node-level OS overrides](../repo/nodes.py.md#os-compatibility-overrides). bundlewrap-3.8.0/docs/content/guide/quickstart.md000066400000000000000000000111551360562404000221030ustar00rootroot00000000000000Quickstart ========== This is the 10 minute intro into BundleWrap. Fasten your seatbelt. Installation ------------ First, open a terminal and install BundleWrap:
pip install bundlewrap
Create a repository ------------------- Now you'll need to create your [repository](../repo/layout.md):
mkdir my_bundlewrap_repo
cd my_bundlewrap_repo
bw repo create
You will note that some files have been created. Let's check them out:
cat nodes.py
cat groups.py
The contents should be fairly self-explanatory, but you can always check the [docs](../repo/layout.md) on these files if you want to go deeper.
It is highly recommended to use git or a similar tool to keep track of your repository. You may want to start doing that right away.
At this point you will want to edit `nodes.py` and maybe change "localhost" to the hostname of a system you have passwordless (including sudo) SSH access to.
BundleWrap will honor your ~/.ssh/config, so if ssh mynode.example.com sudo id works without any password prompts in your terminal, you're good to go.
Run a command ------------- The first thing you can do is run a command on your army of one node:
bw -a run node-1 "uptime"
The -a switch tells bw to automatically trust unknown SSH host keys (when you're connecting to a new node). By default, only known host keys will be accepted.
You should see something like this:
› node-1   20:16:26 up 34 days,  4:10,  0 users,  load average: 0.00, 0.01, 0.05
✓ node-1  completed after 0.366s
Instead of a node name ("node-1" in this case) you can also use a group name (such as "all") from your `groups.py`. Create a bundle --------------- BundleWrap stores node configuration in bundles. A bundle is a collection of *items* such as files, system packages or users. To create your first bundle, type:
bw repo bundle create mybundle
Now that you have created your bundle, it's important to tell BundleWrap which nodes will have this bundle. You can assign bundles to nodes using either groups.py or nodes.py, here we'll use the latter: nodes = { 'node-1': { 'bundles': ( "mybundle", ), 'hostname': "mynode-1.local", }, } Create a file template ---------------------- To manage a file, you need two things: 1. a file item in your bundle 2. a template for the file contents Add this to your `bundles/mybundle/items.py`: files = { '/etc/motd': { 'content_type': 'mako', # use the Mako template engine for this file 'source': "mymotd", # filename of the template }, } Then write the file template:
vim bundles/mybundle/files/mymotd
You can use this for example content:
Welcome to ${node.name}!
Note that the `source` attribute in `items.py` contains a path relative to the `files` directory of your bundle. Apply configuration ------------------- Now all that's left is to run `bw apply`:
bw apply -i node-1
BundleWrap will ask to replace your previous MOTD:
i node-1  started at 2016-02-13 21:25:45
? node-1
? node-1  ╭─ file:/etc/motd
? node-1  │
? node-1  │  content
? node-1  │  --- <node>
? node-1  │  +++ <bundlewrap>
? node-1  │  @@ -1 +1 @@
? node-1  │  -your old motd
? node-1  │  +Welcome to node-1!
? node-1  │
? node-1  ╰─ Fix file:/etc/motd? [Y/n]
That completes the quickstart tutorial! Further reading --------------- Here are some suggestions on what to do next: * set up [SSH multiplexing](https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Multiplexing) for significantly better performance * take a moment to think about what groups and bundles you will create * read up on how a [BundleWrap repository](../repo/layout.md) is laid out * ...especially what [types of items](../repo/items.py.md#item-types) you can add to your bundles * familiarize yourself with [the Mako template language](http://www.makotemplates.org/) * explore the [command line interface](cli.md) * follow [@bundlewrap](https://twitter.com/bundlewrap) on Twitter Have fun! If you have any questions, feel free to drop by [on IRC](irc://chat.freenode.net/bundlewrap). bundlewrap-3.8.0/docs/content/guide/secrets.md000066400000000000000000000122241360562404000213570ustar00rootroot00000000000000# Handling secrets We strongly recommend **not** putting any sensitive information such as passwords or private keys into your repository. This page describes the helpers available in BundleWrap to manage those secrets without checking them into version control.
Most of the functions described here return lazy Fault objects.

## .secrets.cfg When you initially ran `bw repo create`, a file called `.secrets.cfg` was put into the root level of your repo. It's an INI-style file that by default contains two random keys BundleWrap uses to protect your secrets.
You should never commit .secrets.cfg. Immediately add it to your .gitignore or equivalent.

## Derived passwords In some cases, you can control (i.e. manage with BundleWrap) both ends of the authentication process. A common example is a config file for a web application that holds credentials for a database also managed by BundleWrap. In this case, you don't really care what the password is, you just want it to be the same on both sides. To accomplish that, just write this in your template (Mako syntax shown here):
database_user = "foo"
database_password = "${repo.vault.password_for("my database")}"
In your bundle, you can then configure your database user like this: postgres_roles = { "foo": { 'password': repo.vault.password_for("my database"), }, } It doesn't really matter what string you call `password_for()` with, it just has to be the same on both ends. BundleWrap will then use that string, combine it with the default key called `generate` in your `.secrets.cfg` and derive a random password from that. This makes it easy to change all your passwords at once (e.g. when an employee leaves or when required for compliance reasons) by rotating keys.
However, it also means you have to guard your .secrets.cfg very closely. If it is compromised, so are all your passwords. Use your own judgement.
### "Human" passwords As an alternative to `password_for()`, which generates random strings, you can use `human_password_for()`.It generates strings like `Wiac-Kaobl-Teuh-Kumd-40`. They are easier to handle for human beings. You might want to use them if you have to type those passwords on a regular basis. ### Random bytes `password_for()` and `human_password_for()` are meant for passwords. If you need plain random bytes, you can use `random_bytes_as_base64_for()`. As the name implies, it will return the data base64 encoded. Some examples:
$ bw debug -c 'print(repo.vault.random_bytes_as_base64_for("foo"))'
qczM0GUKW7YlXEuW8HGPYkjCGaX4Vu9Fja5SIZWga7w=
$ bw debug -c 'print(repo.vault.random_bytes_as_base64_for("foo", length=1))'
qQ==

## Static passwords When you need to store a specific password, you can encrypt it symmetrically:
$ bw debug -c "print(repo.vault.encrypt('my password'))"
gAAAA[...]mrVMA==
You can then use this encrypted password in a template like this:
database_user = "foo"
database_password = "${repo.vault.decrypt("gAAAA[...]mrVMA==")}"

## Files You can also encrypt entire files:
$ bw debug -c "repo.vault.encrypt_file('/my/secret.file', 'encrypted.file')"
Encrypted files are always read and written relative to the data/ subdirectory of your repo.
If the source file was encoded using UTF-8, you can then simply pass the decrypted content into a file item: files = { "/secret": { 'content': repo.vault.decrypt_file("encrypted.file"), }, } If the source file is binary however (or any encoding other than UTF-8), you must use base64: files = { "/secret": { 'content': repo.vault.decrypt_file_as_base64("encrypted.file"), 'content_type': 'base64', }, }
## Key management ### Multiple keys You can always add more keys to your `.secrets.cfg`, but you should keep the defaults around. Adding more keys makes it possible to give different keys to different teams. **By default, BundleWrap will skip items it can't find the required keys for**. When using `.password_for()`, `.encrypt()` etc., you can provide a `key` argument to select the key: repo.vault.password_for("some database", key="devops") The encrypted data will be prefixed by `yourkeyname$...` to indicate that the key `yourkeyname` was used for encryption. Thus, during decryption, you can omit the `key=` parameter.
### Rotating keys
This is applicable mostly to .password_for(). The other methods use symmetric encryption and require manually updating the encrypted text after the key has changed.
You can generate a new key by running `bw debug -c "print(repo.vault.random_key())"`. Place the result in your `.secrets.cfg`. Then you need to distribute the new key to your team and run `bw apply` for all your nodes. bundlewrap-3.8.0/docs/content/index.md000066400000000000000000000024571360562404000177300ustar00rootroot00000000000000

BundleWrap documentation

If you run into a problem that is not answered in these docs, please find us on [IRC](irc://chat.freenode.net/bundlewrap) or [Twitter](https://twitter.com/bundlewrap). We’re happy to help! Should you already know your way around, just click on the part of your repo that you need help with: bundlewrap-3.8.0/docs/content/items/000077500000000000000000000000001360562404000174105ustar00rootroot00000000000000bundlewrap-3.8.0/docs/content/items/action.md000066400000000000000000000031461360562404000212130ustar00rootroot00000000000000# Actions Actions will be run on every `bw apply`. They differ from regular items in that they cannot be "correct" in the first place. They can only succeed or fail. actions = { 'check_if_its_still_linux': { 'command': "uname", 'expected_return_code': 0, 'expected_stdout': "Linux\n", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## command The only required attribute. This is the command that will be run on the node with root privileges.
## data_stdin You can pipe data directly to the command running on the node. To do so, use this attribute. If it's a string or unicode object, it will always be encoded as UTF-8. Alternatively, you can use raw bytes.
## expected_return_code Defaults to `0`. If the return code of your command is anything else, the action is considered failed. You can also set this to `None` and any return code will be accepted.
## expected_stdout If this is given, the stdout output of the command must match the given string or the action is considered failed.
## expected_stderr Same as `expected_stdout`, but with stderr.
## interactive If set to `True`, this action will be skipped in non-interactive mode. If set to `False`, this action will always be executed without asking (even in interactive mode). Defaults to `None`.
Think hard before setting this to False. People might assume that interactive mode won't do anything without their consent.
bundlewrap-3.8.0/docs/content/items/directory.md000066400000000000000000000016511360562404000217410ustar00rootroot00000000000000# Directory items directories = { "/path/to/directory": { "mode": "0755", "owner": "root", "group": "root", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## group Name of the group this directory belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## mode Directory mode as returned by `stat -c %a `. Defaults to `755`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## owner Username of the directory's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## purge Set this to `True` to remove everything from this directory that is not managed by BundleWrap. Defaults to `False`. bundlewrap-3.8.0/docs/content/items/file.md000066400000000000000000000072061360562404000206560ustar00rootroot00000000000000# File items Manage regular files. files = { "/path/to/file": { "mode": "0644", "owner": "root", "group": "root", "content_type": "mako", "encoding": "utf-8", "source": "my_template", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## content May be used instead of `source` to provide file content without a template file.
## content_type How the file pointed to by `source` or the string given to `content` should be interpreted.
ValueEffect
anyonly cares about file owner, group, and mode
base64content is decoded from base64
binaryfile is uploaded verbatim, no content processing occurs
jinja2content is interpreted by the Jinja2 template engine
makocontent is interpreted by the Mako template engine
text (default)like binary, but will be diffed in interactive mode

## context Only used with Mako and Jinja2 templates. The values of this dictionary will be available from within the template as variables named after the respective keys.
## delete When set to `True`, the path of this file will be removed. It doesn't matter if there is not a file but a directory or something else at this path. When using `delete`, no other attributes are allowed.
## encoding Encoding of the target file. Note that this applies to the remote file only, your template is still conveniently written in UTF-8 and will be converted by BundleWrap. Defaults to "utf-8". Other possible values (e.g. "latin-1") can be found [here](http://docs.python.org/2/library/codecs.html#standard-encodings).
## group Name of the group this file belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## mode File mode as returned by `stat -c %a `. Defaults to `644`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## owner Username of the file's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## source File name of the file template. If this says `my_template`, BundleWrap will look in `data/my_bundle/files/my_template` and then `bundles/my_bundle/files/my_template`. Most of the time, you will want to put config templates into the latter directory. The `data/` subdirectory is meant for files that are very specific to your infrastructure (e.g. DNS zone files). This separation allows you to write your bundles in a generic way so that they could be open-sourced and shared with other people. Defaults to the filename of this item (e.g. `foo.conf` when this item is `/etc/foo.conf`). See also: [Writing file templates](../guide/item_file_templates.md)
## verify_with This can be used to run external validation commands on a file before it is applied to a node. The file is verified locally on the machine running BundleWrap. Verification is considered successful when the exit code of the verification command is 0. Use `{}` as a placeholder for the shell-quoted path to the temporary file. Here is an example for verifying sudoers files:
visudo -cf {}
Keep in mind that all team members will have to have the verification command installed on their machines. bundlewrap-3.8.0/docs/content/items/group.md000066400000000000000000000007471360562404000210760ustar00rootroot00000000000000# Group items Manages system groups. Group members are managed through the [user item](user.md). groups = { "acme": { "gid": 2342, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## delete When set to `True`, this group will be removed from the system. When using `delete`, no other attributes are allowed.
## gid Numerical ID of the group. bundlewrap-3.8.0/docs/content/items/k8s.md000066400000000000000000000101131360562404000204330ustar00rootroot00000000000000# Kubernetes items
Support for Kubernetes is experimental at this time. Backwards-incompatible changes may happen at any time.
See also: [Guide to Kubernetes](../guide/kubernetes.md)
Manage resources in Kubernetes clusters. k8s_namespaces = { "my-app": {}, "my-previous-app": {'delete': True}, } k8s_deployments = { "my-app/my-deployment": { 'manifest': { ... }, }, } Note that the names of all items in a namespace must be prefixed with the name of their namespace and a forward slash `/`. Resource items will automatically depend on their namespace if you defined it.
## Resource types
Resource typeBundle attributeapiVersion
Cluster Rolek8s_clusterrolesrbac.authorization.k8s.io/v1
Cluster Role Bindingk8s_clusterrolebindingsrbac.authorization.k8s.io/v1
Config Mapk8s_configmapsv1
Cron Jobk8s_cronjobsbatch/v1beta1
Custom Resource Definitionk8s_crdapiextensions.k8s.io/v1beta1
Daemon Setk8s_daemonsetsv1
Deploymentk8s_deploymentsextensions/v1beta1
Ingressk8s_ingressesextensions/v1beta1
Namespacek8s_namespacesv1
Network Policyk8s_networkpoliciesnetworking.k8s.io/v1
Persistent Volume Claimk8s_pvcv1
Rolek8s_rolesrbac.authorization.k8s.io/v1
Role Bindingk8s_rolebindingsrbac.authorization.k8s.io/v1
Servicek8s_servicesv1
Service Accountk8s_serviceaccountsv1
Secretk8s_secretsv1
StatefulSetk8s_statefulsetsapps/v1
(any)k8s_raw(any)
You can define [Custom Resources](https://kubernetes.io/docs/concepts/api-extension/custom-resources/) like this: k8s_crd = { "custom-thing": { 'manifest': { 'spec': { 'names': { 'kind': "CustomThing", }, }, }, }, } k8s_raw = { "foo/CustomThing/baz": { 'manifest': { 'apiVersion': "example.com/v1", }, }, } The special `k8s_raw` items can also be used to create resources that BundleWrap does not support natively: k8s_raw = { "foo/HorizontalPodAutoscaler/baz": { 'manifest': { 'apiVersion': "autoscaling/v2beta1", }, }, } Resources outside any namespace can be created with `k8s_raw` by omitting the namespace in the item name (so that the name starts with `/`).
# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## context Only used with Mako and Jinja2 manifests (see `manifest_processing` below). The values of this dictionary will be available from within the template as variables named after the respective keys.
## delete Set this to `True` to have the resource removed.
## manifest The resource definition (as defined in the [Kubernetes API](https://kubernetes.io/docs/reference/)) formatted as a Python dictionary (will be converted to JSON and passed to `kubectl apply`). Mutually exclusive with `manifest_file`.
## manifest_file Filename of the resource definition relative to the `manifests` subdirectory of your bundle. Filenames must end in `.yaml`, `.yml`, or `.json` to indicate file format. Mutually exclusive with `manifest`.
## manifest_processor Set this to `jinja2` or `mako` if you want to use a template engine to process your `manifest_file`. Defaults to `None`. bundlewrap-3.8.0/docs/content/items/pkg_apt.md000066400000000000000000000023171360562404000213620ustar00rootroot00000000000000# APT package items Handles packages installed by `apt-get` on Debian-based systems. pkg_apt = { "foopkg": { "installed": True, # default }, "bar_i386": { # i386 multiarch variant of the "bar" package "installed": False, }, "awesome-daemon": { "when_creating": { "start_service": False, }, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be purged.
## when\_creating These attributes are only enforced during the creation of the item on the node (in this case this means when a package is installed). They are ignored in subsequent runs of `bw apply`.
### start\_service By default, daemons will be auto-started on systems like Debian or Ubuntu. This happens right after the package has been installed. You might want to set `start_service` to `False` to avoid this. This might be necessary if BundleWrap must place some more config files on the node before a daemon can actually be started. bundlewrap-3.8.0/docs/content/items/pkg_dnf.md000066400000000000000000000007511360562404000213450ustar00rootroot00000000000000# dnf package items Handles packages installed by `dnf` on RPM-based systems. pkg_dnf = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-3.8.0/docs/content/items/pkg_openbsd.md000066400000000000000000000016731360562404000222340ustar00rootroot00000000000000# OpenBSD package items Handles packages installed by `pkg_add` on OpenBSD systems. pkg_openbsd = { "foo": { "installed": True, # default }, "bar": { "installed": True, "version": "1.0", }, "baz": { "installed": False, }, "qux": { "flavor": "no_x11", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be purged.
## flavor Optional, defaults to the "normal" flavor. Can be used together with `version`. Ignored when `installed` is `False`.
## version Optional version string. Can be used to select one specific version of a package. Can be used together with `flavor`. Ignored when `installed` is `False`. bundlewrap-3.8.0/docs/content/items/pkg_opkg.md000066400000000000000000000007471360562404000215430ustar00rootroot00000000000000# opkg package items Handles packages installed by `opkg` on OpenWRT/LEDE. pkg_opkg = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-3.8.0/docs/content/items/pkg_pacman.md000066400000000000000000000023311360562404000220310ustar00rootroot00000000000000# Pacman package items Handles packages installed by `pacman` (e.g. Arch Linux). pkg_pacman = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, "somethingelse": { "tarball": "something-1.0.pkg.tar.gz", } }
System updates on Arch Linux should always be performed manually and with great care. Thus, this item type installs packages with a simple pacman -S $pkgname instead of the commonly recommended pacman -Syu $pkgname. You should manually do a full system update before installing new packages via BundleWrap!


# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if this package and all dependencies that are no longer needed should be removed.
## tarball Upload a local file to the node and install it using `pacman -U`. The value of `tarball` must point to a file relative to the `pkg_pacman` subdirectory of the current bundle. bundlewrap-3.8.0/docs/content/items/pkg_pip.md000066400000000000000000000016261360562404000213700ustar00rootroot00000000000000# pip package items Handles Python packages installed by `pip`. pkg_pip = { "foo": { "installed": True, # default "version": "1.0", # optional }, "bar": { "installed": False, }, "/path/to/virtualenv/foo": { # will install foo in the virtualenv at /path/to/virtualenv }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed.
## version Force the given exact version to be installed. You can only specify a single version here, selectors like `>=1.0` are NOT supported. If it's not given, the latest version will be installed initially, but (like the other package items) upgrades will NOT be installed. bundlewrap-3.8.0/docs/content/items/pkg_snap.md000066400000000000000000000007371360562404000215430ustar00rootroot00000000000000# snap package items Handles packages installed by `snap` command. pkg_snap = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-3.8.0/docs/content/items/pkg_yum.md000066400000000000000000000007511360562404000214100ustar00rootroot00000000000000# yum package items Handles packages installed by `yum` on RPM-based systems. pkg_yum = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-3.8.0/docs/content/items/pkg_zypper.md000066400000000000000000000007631360562404000221320ustar00rootroot00000000000000# zypper package items Handles packages installed by `zypper` on SUSE-based systems. pkg_zypper = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-3.8.0/docs/content/items/postgres_db.md000066400000000000000000000020221360562404000222410ustar00rootroot00000000000000# Postgres database items Manages Postgres databases. postgres_dbs = { "mydatabase": { "owner": "me", "when_creating": { "encoding": "LATIN1", "collation": "de_DE.ISO-8859-1", "ctype": "de_DE.ISO-8859-1", }, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## owner Name of the role which owns this database (defaults to `"postgres"`).
## when\_creating ### encoding, collation, and ctype By default, BundleWrap will only create a database using your default PostgreSQL template, which most likely is `template1`. This means it will use the same encoding and collation that `template1` uses. By specifying any of the attributes `encoding`, `collation`, or `ctype`, BundleWrap will instead create a new database from `template0`, thus allowing you to override said database attributes. These options are creation-time only. bundlewrap-3.8.0/docs/content/items/postgres_role.md000066400000000000000000000017271360562404000226300ustar00rootroot00000000000000# Postgres role items Manages Postgres roles. postgres_roles = { "me": { "superuser": True, "password": "itsamemario", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## superuser `True` if the role should be given superuser privileges (defaults to `False`).
## password Plaintext password to set for this role (will be hashed using MD5).
Please do not write any passwords into your bundles. This attribute is intended to be used with an external source of passwords and filled dynamically. If you don't have or want such an elaborate setup, specify passwords using the password_hash attribute instead.

## password_hash As an alternative to `password`, this allows setting the raw hash as it will be stored in Postgres' internal database. Should start with "md5". bundlewrap-3.8.0/docs/content/items/svc_openbsd.md000066400000000000000000000016071360562404000222430ustar00rootroot00000000000000# OpenBSD service items Handles services on OpenBSD. svc_openbsd = { "bgpd": { "enabled": True, # default "running": True, # default }, "supervisord": { "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## enabled `True` if the service shall be automatically started during system bootup; `False` otherwise. `True`, the default value, is needed on OpenBSD, as starting disabled services fails.
## running `True` if the service is expected to be running on the system; `False` if it should be stopped.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## restart Restarts the service.
## stopstart Stops and starts the service. bundlewrap-3.8.0/docs/content/items/svc_systemd.md000066400000000000000000000016251360562404000223010ustar00rootroot00000000000000# systemd service items Handles services managed by systemd. svc_systemd = { "fcron.service": { "enabled": True, # default "running": True, # default }, "sgopherd.socket": { "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## enabled `True` if the service shall be automatically started during system bootup; `False` otherwise. `None` makes BundleWrap ignore this setting.
## running `True` if the service is expected to be running on the system; `False` if it should be stopped. `None` makes BundleWrap ignore this setting.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## reload Reloads the service.
## restart Restarts the service. bundlewrap-3.8.0/docs/content/items/svc_systemv.md000066400000000000000000000012551360562404000223220ustar00rootroot00000000000000# System V service items Handles services managed by traditional System V init scripts. svc_systemv = { "apache2": { "running": True, # default }, "mysql": { "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## running `True` if the service is expected to be running on the system; `False` if it should be stopped.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## reload Reloads the service.
## restart Restarts the service. bundlewrap-3.8.0/docs/content/items/svc_upstart.md000066400000000000000000000017541360562404000223160ustar00rootroot00000000000000# Upstart service items Handles services managed by Upstart. svc_upstart = { "gunicorn": { "running": True, # default }, "celery": { "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## running `True` if the service is expected to be running on the system; `False` if it should be stopped.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## reload Reloads the service.
## restart Restarts the service.
## stopstart Stops and then starts the service. This is different from `restart` in that Upstart will pick up changes to the `/etc/init/SERVICENAME.conf` file, while `restart` will continue to use the version of that file that the service was originally started with. See [http://askubuntu.com/a/238069](http://askubuntu.com/a/238069). bundlewrap-3.8.0/docs/content/items/symlink.md000066400000000000000000000013171360562404000214220ustar00rootroot00000000000000# Symlink items symlinks = { "/some/symlink": { "group": "root", "owner": "root", "target": "/target/file", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## target File or directory this symlink points to. **This attribute is required.**
## group Name of the group this symlink belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## owner Username of the symlink's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. bundlewrap-3.8.0/docs/content/items/user.md000066400000000000000000000051211360562404000207070ustar00rootroot00000000000000# User items Manages system user accounts. users = { "jdoe": { "full_name": "Jane Doe", "gid": 2342, "groups": ["admins", "users", "wheel"], "home": "/home/jdoe", "password_hash": "$6$abcdef$ghijklmnopqrstuvwxyz", "shell": "/bin/zsh", "uid": 4747, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes) All attributes are optional.
## delete When set to `True`, this user will be removed from the system. Note that because of how `userdel` works, the primary group of the user will be removed if it contains no other users. When using `delete`, no other attributes are allowed.
## full_name Full name of the user.
## gid Primary group of the user as numerical ID or group name.
Due to how useradd works, this attribute is required whenever you don't want the default behavior of useradd (usually that means automatically creating a group with the same name as the user). If you want to use an unmanaged group already on the node, you need this attribute. If you want to use a group managed by BundleWrap, you need this attribute. This is true even if the groups mentioned are in fact named like the user.

## groups List of groups (names, not GIDs) the user should belong to. Must NOT include the group referenced by `gid`.
## hash_method One of: * `md5` * `sha256` * `sha512` Defaults to `sha512`.
## home Path to home directory. Defaults to `/home/USERNAME`.
## password The user's password in plaintext.
Please do not write any passwords into your bundles. This attribute is intended to be used with an external source of passwords and filled dynamically. If you don't have or want such an elaborate setup, specify passwords using the password_hash attribute instead.
If you don't specify a salt along with the password, BundleWrap will use a static salt. Be aware that this is basically the same as using no salt at all.

## password_hash Hashed password as it would be returned by `crypt()` and written to `/etc/shadow`.
## salt Recommended for use with the `password` attribute. BundleWrap will use 5000 rounds of SHA-512 on this salt and the provided password.
## shell Path to login shell executable.
## uid Numerical user ID. It's your job to make sure it's unique. bundlewrap-3.8.0/docs/content/misc/000077500000000000000000000000001360562404000172225ustar00rootroot00000000000000bundlewrap-3.8.0/docs/content/misc/about.md000066400000000000000000000016741360562404000206660ustar00rootroot00000000000000 # About Development on BundleWrap started in July 2012, borrowing some ideas from [Bcfg2](http://bcfg2.org/). Some key features that are meant to set BundleWrap apart from other config management systems are: * decentralized architecture * pythonic and easily extendable * easy to get started with * true item-level parallelism (in addition to working on multiple nodes simultaneously, BundleWrap will continue to fix config files while installing a package on the same node) * very customizable item dependencies * collaboration features like [node locking](../guide/locks.md) (to prevent simultaneous applies to the same node) and hooks for chat notifications * built-in testing facility (`bw test`) * can be used as a library BundleWrap is a "pure" free software project licensed under the terms of the [GPLv3](http://www.gnu.org/licenses/gpl.html>), with no *Enterprise Edition* or commercial support. bundlewrap-3.8.0/docs/content/misc/contributing.md000066400000000000000000000043411360562404000222550ustar00rootroot00000000000000# Contributing We welcome all input and contributions to BundleWrap. If you've never done this sort of thing before, maybe check out [contribution-guide.org](http://www.contribution-guide.org). But don't be afraid to make mistakes, nobody expects your first contribution to be perfect. We'll gladly help you out.
## Submitting bug reports Please use the [GitHub issue tracker](https://github.com/bundlewrap/bundlewrap/issues) and take a few minutes to look for existing reports of the same problem (open or closed!).
If you've found a security issue or are not at all sure, just contact trehn@bundlewrap.org.

## Contributing code
Before working on new features, try reaching out to one of the core authors first. We are very concerned with keeping BundleWrap lean and not introducing bloat. If your idea is not a good fit for all or most BundleWrap users, it can still be included as a plugin.
Here are the steps: 1. Write your code. Awesome! 2. If you haven't already done so, please consider writing tests. Otherwise, someone else will have to do it for you. 3. Same goes for documentation. 4. Set up a [virtualenv](http://virtualenv.readthedocs.org/en/latest/) and run `pip install -r requirements.txt`. 5. Make sure you can connect to your localhost via `ssh` without using a password and that you are able to run `sudo`. 6. Run `tox`. 7. Review and sign the Copyright Assignment Agreement (CAA) by adding your name and email to the `AUTHORS` file. (This step can be skipped if your contribution is too small to be considered intellectual property, e.g. spelling fixes) 8. Open a pull request on [GitHub](https://github.com/bundlewrap/bundlewrap). 9. Feel great. Thank you.
## Contributing documentation The process is essentially the same as detailed above for code contributions. You will find the docs in `docs/content/` and can preview them using `cd docs && mkdocs serve`.
## Help If at any point you need help or are not sure what to do, just drop by in [#bundlewrap on Freenode](irc://chat.freenode.net/bundlewrap) or poke [@bundlewrap on Twitter](https://twitter.com/bundlewrap). bundlewrap-3.8.0/docs/content/misc/deciding.md000066400000000000000000000017661360562404000213240ustar00rootroot00000000000000 ## Is BundleWrap the right tool for you? We think you will enjoy BundleWrap a lot if you: - know some Python - like to write your configuration from scratch and control every bit of it - have lots of unique nodes - are trying to get a lot of existing systems under management - are NOT trying to handle a massive amount of nodes (let’s say more than 1000) - like to start small - don’t want yet more stuff to run on your nodes (or mess with appliances as little as possible) - prefer a simple tool to a fancy one - want as much as possible in git/hg/bzr - have strongly segmented internal networks You might be better served with a different config management system if you: - are already using a config management system and don’t have any major issues - hate Python and/or JSON - like to use community-maintained configuration templates - need unattended bootstrapping of nodes - don’t trust your coworkers bundlewrap-3.8.0/docs/content/misc/faq.md000066400000000000000000000056021360562404000203160ustar00rootroot00000000000000# FAQ ## Technical ### BundleWrap says an item failed to apply, what do I do now? Try running `bw apply -i nodename` to see which attribute of the item could not be fixed. If that doesn't tell you enough, try `bw --debug apply -i nodename` and look for the command BundleWrap is using to fix the item in question. Then try running that command yourself and check for any errors.
### What happens when two people start applying configuration to the same node? BundleWrap uses a [locking mechanism](../guide/locks.md) to prevent collisions like this.
### How can I have BundleWrap reload my services after config changes? See [canned actions](../repo/items.py.md#canned_actions) and [triggers](../repo/items.py.md#triggers).
### Will BundleWrap keep track of package updates? No. BundleWrap will only care about whether a package is installed or not. Updates will have to be installed through a separate mechanism (I like to create an [action](../items/action.md) with the `interactive` attribute set to `True`). Selecting specific versions should be done through your package manager.
### Is there a probing mechanism like Ohai? No. BundleWrap is meant to be very push-focused. The node should not have any say in what configuration it will receive.
### Is BundleWrap secure? BundleWrap is more concerned with safety than security. Due to its design, it is possible for your coworkers to introduce malicious code into a BundleWrap repository that could compromise your machine. You should only use trusted repositories and plugins. We also recommend following commit logs to your repos.
## The BundleWrap Project ### Why doesn't BundleWrap provide pre-built community bundles? In our experience, bundles for even the most common pieces of software always contain some opinionated bits specific to local infrastructure. Making bundles truly universal (e.g. in terms of supported Linux distributions) would mean a lot of bloat. And since local modifications are hard to reconcile with an upstream community repository, bundles would have to be very feature-complete to be useful to the majority of users, increasing bloat even more. Maintaining bundles and thus configuration for different pieces of software is therefore out of scope for the BundleWrap project. While it might seem tedious when you're getting started, with some practice, writing your own bundles will become both easy and precise in terms of infrastructure fit.
### Why do contributors have to sign a Copyright Assignment Agreement? While it sounds scary, Copyright assignment is used to improve the enforceability of the GPL. Even the FSF does it, [read their explanation why](http://www.gnu.org/licenses/why-assign.html). The agreement used by BundleWrap is from [harmonyagreements.org](http://harmonyagreements.org). If you're still concerned, please do not hesitate to contact [@trehn](https://twitter.com/trehn). bundlewrap-3.8.0/docs/content/misc/glossary.md000066400000000000000000000024541360562404000214140ustar00rootroot00000000000000# Glossary ## action Actions are a special kind of item used for running shell commands during each `bw apply`. They allow you to do things that aren't persistent in nature.
## apply An "apply" is what we call the process of what's otherwise known as "converging" the state described by your repository and the actual status quo on the node.
## bundle A collection of items. Most of the time, you will create one bundle per application. For example, an Apache bundle will include the httpd service, the virtual host definitions and the apache2 package.
## group Used for organizing your nodes.
## hook [Hooks](../repo/hooks.md) can be used to run your own code automatically during various stages of BundleWrap operations.
## item A single piece of configuration on a node, e.g. a file or an installed package. You might be interested in [this overview of item types](../repo/items.py.md#item_types).
## lib [Libs](../repo/libs.md) are a way to store Python modules in your repository and make them accessible to your bundles and templates.
## node A managed system, no matter if physical or virtual.
## repo A repository is a directory with [some stuff](../repo/layout.md) in it that tells BundleWrap everything it needs to know about your infrastructure. bundlewrap-3.8.0/docs/content/repo/000077500000000000000000000000001360562404000172345ustar00rootroot00000000000000bundlewrap-3.8.0/docs/content/repo/groups.py.md000066400000000000000000000117411360562404000215300ustar00rootroot00000000000000# groups.py This file lets you specify or dynamically build groups of [nodes](nodes.py.md) in your environment. As with `nodes.py`, you define your groups as a dictionary: groups = { 'all': { 'member_patterns': ( r".*", ), }, 'group1': { 'members': ( 'node1', ), }, } All group attributes are optional.
# Group attribute reference This section is a reference for all possible attributes you can define for a group: groups = { 'group1': { # THIS PART IS EXPLAINED HERE 'bundles': ["bundle1", "bundle2"], 'members': ["node1"], 'members_add': lambda node: node.os == 'debian', 'members_remove': lambda node: node.os == 'ubuntu', 'member_patterns': [r"^cluster1\."], 'metadata': {'foo': "bar"}, 'os': 'linux', 'subgroups': ["group2", "group3"], 'subgroup_patterns': [r"^group.*pattern$"], }, } Note that many attributes from [nodes.py](nodes.py.md) (e.g. `bundles`) may also be set at group level, but aren't explicitly documented here again.
## member_patterns A list of regular expressions. Node names matching these expressions will be added to the group members. Matches are determined using [the search() method](http://docs.python.org/2/library/re.html#re.RegexObject.search).
## members A tuple or list of node names that belong to this group.
## members_add and members_remove For these attributes you can provide a function that takes a node object as its only argument. The function must return a boolean. The function will be called once for every node in the repo. If `True`, this node will be added (`members_add`) to or removed (`members_remove`) from this group.
Inside your function you may query node attributes and groups, but you will not see groups or attributes added as a result of a different members_add / members_remove function. Only attributes and groups that have been set statically will be available. You can, however, remove a node with members_remove that you added with members_add (but not vice-versa).
You should also avoid using node.metadata here. Since metadata ultimately depends on group memberships, only metadata set in nodes.py will be returned here.

## metadata A dictionary that will be accessible from each node's `node.metadata`. For each node, BundleWrap will merge the metadata of all of the node's groups first, then merge in the metadata from the node itself. Metadata is merged recursively by default, meaning nested dicts will overlay each other. Lists will be appended to each other, but not recursed into. In come cases, you want to overwrite instead of merge a piece of metadata. This is accomplished through the use of `bundlewrap.metadata.atomic()` and best illustrated as an example: from bundlewrap.metadata import atomic groups = { 'all': { 'metadata': { 'interfaces': { 'eth0': {}, }, 'nameservers': ["8.8.8.8", "8.8.4.4"], 'ntp_servers': ["pool.ntp.org"], }, }, 'internal': { 'metadata': 'interfaces': { 'eth1': {}, }, 'nameservers': atomic(["10.0.0.1", "10.0.0.2"]), 'ntp_servers': ["10.0.0.1", "10.0.0.2"], }, }, } A node in both groups will end up with `eth0` *and* `eth1`. The nameservers however are overwritten, so that nodes that are in both the "all" *and* the "internal" group will only have the `10.0.0.x` ones while nodes just in the "all" group will have the `8.8.x.x` nameservers. The NTP servers are appended: a node in both groups will have all three nameservers.
BundleWrap will consider group hierarchy when merging metadata. For example, it is possible to define a default nameserver for the "eu" group and then override it for the "eu.frankfurt" subgroup. The catch is that this only works for groups that are connected through a subgroup hierarchy. Independent groups will have their metadata merged in an undefined order. bw test will report conflicting metadata in independent groups as a metadata collision.
Also see the documentation for node.metadata for more information.

## subgroups A tuple or list of group names whose members should be recursively included in this group.
## subgroup_patterns A list of regular expressions. Nodes in with group names matching these expressions will be added to the group members. Matches are determined using [the search() method](http://docs.python.org/2/library/re.html#re.RegexObject.search).
## use_shadow_passwords See [node attribute documentation](nodes.py.md#use_shadow_passwords). May be overridden by subgroups or individual nodes.
bundlewrap-3.8.0/docs/content/repo/hooks.md000066400000000000000000000167431360562404000207140ustar00rootroot00000000000000# Hooks Hooks enable you to execute custom code at certain points during a BundleWrap run. This is useful for integrating with other systems e.g. for team notifications, logging or statistics. To use hooks, you need to create a subdirectory in your repo called `hooks`. In that directory you can place an arbitrary number of Python source files. If those source files define certain functions, these functions will be called at the appropriate time. ## Example `hooks/my_awesome_notification.py`: from my_awesome_notification_system import post_message def node_apply_start(repo, node, interactive=False, **kwargs): post_message("Starting apply on {}, everything is gonna be OK!".format(node.name))
Always define your hooks with **kwargs so we can pass in more information in future updates without breaking your hook.

## Functions This is a list of all functions a hook file may implement. --- **`action_run_start(repo, node, item, **kwargs)`** Called each time a `bw apply` command reaches a new action. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `item` The current action. --- **`action_run_end(repo, node, item, duration=None, status=None, **kwargs)`** Called each time a `bw apply` command completes processing an action. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `item` The current action. `duration` How long the action was running (timedelta). `status`: One of `bundlewrap.items.Item.STATUS_FAILED`, `bundlewrap.items.Item.STATUS_SKIPPED`, or `bundlewrap.items.Item.STATUS_ACTION_SUCCEEDED`. --- **`apply_start(repo, target, nodes, interactive=False, **kwargs)`** Called when you start a `bw apply` command. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `target` The group or node name you gave on the command line. `nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). `interactive` Indicates whether the apply is interactive or not. --- **`apply_end(repo, target, nodes, duration=None, **kwargs)`** Called when a `bw apply` command completes. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `target` The group or node name you gave on the command line. `nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). `duration` How long the apply took (timedelta). --- **`item_apply_start(repo, node, item, **kwargs)`** Called each time a `bw apply` command reaches a new item. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `item` The current item. --- **`item_apply_end(repo, node, item, duration=None, status_code=None, status_before=None, status_after=None, **kwargs)`** Called each time a `bw apply` command completes processing an item. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `item` The current item. `duration` How long the apply took (timedelta). `status_code` One of `bundlewrap.items.Item.STATUS_FAILED`, `bundlewrap.items.Item.STATUS_SKIPPED`, `bundlewrap.items.Item.STATUS_OK`, or `bundlewrap.items.Item.STATUS_FIXED`. `status_before` An instance of `bundlewrap.items.ItemStatus`. `status_after` See `status_before`. --- **`lock_add(repo, node, lock_id, items, expiry, comment, **kwargs)`** Called each time a soft lock is added to a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `lock_id` The random ID of the lock. `items` List of item selector strings. `expiry` UNIX timestamp of lock expiry time (int). `comment` As entered by user. --- **`lock_remove(repo, node, lock_id, **kwargs)`** Called each time a soft lock is removed from a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `lock_id` The random ID of the lock. --- **`lock_show(repo, node, lock_info, **kwargs)`** Called each time `bw lock show` finds a lock on a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `lock_info` A dict contain the lock details. --- **`node_apply_start(repo, node, interactive=False, **kwargs)`** Called each time a `bw apply` command reaches a new node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `interactive` `True` if this is an interactive apply run. To skip a node: ``` from bundlewrap.exceptions import SkipNode raise SkipNode("reason goes here") ``` --- **`node_apply_end(repo, node, duration=None, interactive=False, result=None, **kwargs)`** Called each time a `bw apply` command finishes processing a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `duration` How long the apply took (timedelta). `interactive` `True` if this was an interactive apply run. `result` An instance of `bundlewrap.node.ApplyResult`. --- **`node_run_start(repo, node, command, **kwargs)`** Called each time a `bw run` command reaches a new node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `command` The command that will be run on the node. To skip a node: ``` from bundlewrap.exceptions import SkipNode raise SkipNode("reason goes here") ``` --- **`node_run_end(repo, node, command, duration=None, return_code=None, stdout="", stderr="", **kwargs)`** Called each time a `bw run` command finishes on a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `command` The command that was run on the node. `duration` How long it took to run the command (timedelta). `return_code` Return code of the remote command. `stdout` The captured stdout stream of the remote command. `stderr` The captured stderr stream of the remote command. --- **`run_start(repo, target, nodes, command, **kwargs)`** Called each time a `bw run` command starts. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `target` The group or node name you gave on the command line. `nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). `command` The command that will be run on the node. --- **`run_end(repo, target, nodes, command, duration=None, **kwargs)`** Called each time a `bw run` command finishes. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `target` The group or node name you gave on the command line. `nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). `command` The command that was run. `duration` How long it took to run the command on all nodes (timedelta). --- **`test(repo, **kwargs)`** Called at the end of a full `bw test`. `repo` The current repository (instance of `bundlewrap.repo.Repository`). --- **`test_node(repo, node, **kwargs)`** Called during `bw test` for each node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). bundlewrap-3.8.0/docs/content/repo/items.py.md000066400000000000000000000312011360562404000213230ustar00rootroot00000000000000

items.py

Within each bundle, there may be a file called `items.py`. It defines any number of magic attributes that are automatically processed by BundleWrap. Each attribute is a dictionary mapping an item name (such as a file name) to a dictionary of attributes (e.g. file ownership information). A typical `items.py` might look like this: files = { '/etc/hosts': { 'owner': "root", 'group': "root", 'mode': "0664", [...] }, } users = { 'janedoe': { 'home': "/home/janedoe", 'shell': "/bin/zsh", [...] }, 'johndoe': { 'home': "/home/johndoe", 'shell': "/bin/bash", [...] }, } This bundle defines the attributes `files` and `users`. Within the `users` attribute, there are two `user` items. Each item maps its name to a dictionary that is understood by the specific kind of item. Below you will find a reference of all builtin item types and the attributes they understand. You can also [define your own item types](../guide/dev_item.md).
# Item types This table lists all item types included in BundleWrap along with the bundle attributes they understand.
TypeBundle attributeDescription
actionactionsActions allow you to run commands on every bw apply
directorydirectoriesManages permissions and ownership for directories
filefilesManages contents, permissions, and ownership for files
groupgroupsManages groups by wrapping groupadd, groupmod and groupdel
k8s_*k8s_*Manages resources in Kubernetes clusters by wrapping kubectl
pkg_aptpkg_aptInstalls and removes packages with APT
pkg_dnfpkg_dnfInstalls and removes packages with dnf
pkg_opkgpkg_opkgInstalls and removes packages with opkg
pkg_pacmanpkg_pacmanInstalls and removes packages with pacman
pkg_pippkg_pipInstalls and removes Python packages with pip
pkg_snappkg_snapInstalls and removes packages with snap
pkg_yumpkg_yumInstalls and removes packages with yum
pkg_zypperpkg_zypperInstalls and removes packages with zypper
postgres_dbpostgres_dbsManages Postgres databases
postgres_rolepostgres_rolesManages Postgres roles
pkg_pippkg_pipInstalls and removes Python packages with pip
pkg_openbsdpkg_openbsdInstalls and removes OpenBSD packages with pkg_add/pkg_delete
svc_openbsdsvc_openbsdStarts and stops services with OpenBSD's rc
svc_systemdsvc_systemdStarts and stops services with systemd
svc_systemvsvc_systemvStarts and stops services with traditional System V init scripts
svc_upstartsvc_upstartStarts and stops services with Upstart
symlinksymlinksManages symbolic links and their ownership
userusersManages users by wrapping useradd, usermod and userdel

# Builtin item attributes There are also attributes that can be applied to any kind of item.
## comment This is a string that will be displayed in interactive mode (`bw apply -i`) whenever the item is to be changed in any way. You can use it to warn users before they start disruptive actions.
## error_on_missing_fault This will simply skip an item instead of raising an error when a Fault used for an attribute on the item is unavailable. Faults are special objects used by `repo.vault` to [handle secrets](../guide/secrets.md). A Fault being unavailable can mean you're missing the secret key required to decrypt a secret you're trying to use as an item attribute value. Defaults to `False`.
## needs One such attribute is `needs`. It allows for setting up dependencies between items. This is not something you will have to to very often, because there are already implicit dependencies between items types (e.g. all files depend on the users owning them). Here are two examples: my_items = { 'item1': { [...] 'needs': [ 'file:/etc/foo.conf', ], }, 'item2': { ... 'needs': [ 'pkg_apt:', 'bundle:foo', ], } } The first item (`item1`, specific attributes have been omitted) depends on a file called `/etc/foo.conf`, while `item2` depends on all APT packages being installed and every item in the foo bundle.
## needed_by This attribute is an alternative way of defining dependencies. It works just like `needs`, but in the other direction. There are only three scenarios where you should use `needed_by` over `needs`: * if you need all items of a certain type to depend on something or * if you need all items in a bundle to depend on something or * if you need an item in a bundle you can't edit (e.g. because it's provided by a community-maintained [plugin](plugins.md)) to depend on something in your bundles
## tags A list of strings to tag an item with. Tagging has no immediate effect in itself, but can be useful in a number of places. For example, you can add dependencies on all items with a given tag: pkg_apt = { "mysql-server-{}".format(node.metadata.get('mysql_version', "5.5")): { 'tags': ["provides-mysqld"], }, } svc_systemd = { "myapp": { 'needs': ["tag:provides-mysqld"], }, } In this simplified example we save ourselves from duplicating the logic that gets the current MySQL version from metadata (which is probably overkill here, but you might encounter more complex situations).
## triggers and triggered In some scenarios, you may want to execute an [action](../items/action.md) only when an item is fixed (e.g. restart a daemon after a config file has changed or run `postmap` after updating an alias file). To do this, BundleWrap has the builtin atttribute `triggers`. You can use it to point to any item that has its `triggered` attribute set to `True`. Such items will only be checked (or in the case of actions: run) if the triggering item is fixed (or a triggering action completes successfully). files = { '/etc/daemon.conf': { [...] 'triggers': [ 'action:restart_daemon', ], }, } actions = { 'restart_daemon': { 'command': "service daemon restart", 'triggered': True, }, } The above example will run `service daemon restart` every time BundleWrap successfully applies a change to `/etc/daemon.conf`. If an action is triggered multiple times, it will only be run once. Similar to `needed_by`, `triggered_by` can be used to define a `triggers` relationship from the opposite direction.
## preceded_by Operates like `triggers`, but will apply the triggered item *before* the triggering item. Let's look at an example: files = { '/etc/example.conf': { [...] 'preceded_by': [ 'action:backup_example', ], }, } actions = { 'backup_example': { 'command': "cp /etc/example.conf /etc/example.conf.bak", 'triggered': True, }, } In this configuration, `/etc/example.conf` will always be copied before and only if it is changed. You would probably also want to set `cascade_skip` to `False` on the action so you can skip it in interactive mode when you're sure you don't need the backup copy. Similar to `needed_by`, `precedes` can be used to define a `preceded_by` relationship from the opposite direction.
## unless Another builtin item attribute is `unless`. For example, it can be used to construct a one-off file item where BundleWrap will only create the file once, but won't check or modify its contents once it exists. files = { "/path/to/file": { [...] "unless": "test -x /path/to/file", }, } This will run `test -x /path/to/file` before doing anything with the item. If the command returns 0, no action will be taken to "correct" the item. Another common use for `unless` is with actions that perform some sort of install operation. In this case, the `unless` condition makes sure the install operation is only performed when it is needed instead of every time you run `bw apply`. In scenarios like this you will probably want to set `cascade_skip` to `False` so that skipping the installation (because the thing is already installed) will not cause every item that depends on the installed thing to be skipped. Example: actions = { 'download_thing': { 'command': "wget http://example.com/thing.bin -O /opt/thing.bin && chmod +x /opt/thing.bin", 'unless': "test -x /opt/thing.bin", 'cascade_skip': False, }, 'run_thing': { 'command': "/opt/thing.bin", 'needs': ["action:download_thing"], }, } If `action:download_thing` would not set `cascade_skip` to `False`, `action:run_thing` would only be executed once: directly after the thing has been downloaded. On subsequent runs, `action:download_thing` will fail the `unless` condition and be skipped. This would also cause all items that depend on it to be skipped, including `action:run_thing`.
The commands you choose for unless should not change the state of your node. Otherwise, running bw verify might unexpectedly interfere with your nodes.

## cascade_skip There are some situations where you don't want to default behavior of skipping everything that depends on a skipped item. That's where `cascade_skip` comes in. Set it to `False` and skipping an item won't skip those that depend on it. Note that items can be skipped * interactively or * because they haven't been triggered or * because one of their dependencies failed or * they failed their `unless` condition or * because an [action](../items/action.md) had its `interactive` attribute set to `True` during a non-interactive run The following example will offer to run an `apt-get update` before installing a package, but continue to install the package even if the update is declined interactively. actions = { 'apt_update': { 'cascade_skip': False, 'command': "apt-get update", }, } pkg_apt = { 'somepkg': { 'needs': ["action:apt_update"], }, } `cascade_skip` defaults to `True`. However, if the item uses the `unless` attribute or is triggered, the default changes to `False`. Most of the time, this is what you'll want.
# Canned actions Some item types have what we call "canned actions". Those are pre-defined actions attached directly to an item. Take a look at this example: svc_upstart = {'mysql': {'running': True}} files = { "/etc/mysql/my.cnf": { 'source': "my.cnf", 'triggers': [ "svc_upstart:mysql:reload", # this triggers the canned action ], }, } Canned actions always have to be triggered in order to run. In the example above, a change in the file `/etc/mysql/my.cnf` will trigger the `reload` action defined by the [svc_upstart item type](../items/svc_upstart.md) for the mysql service. bundlewrap-3.8.0/docs/content/repo/layout.md000066400000000000000000000027771360562404000211100ustar00rootroot00000000000000 Repository layout ================= A BundleWrap repository contains everything you need to contruct the configuration for your systems. This page describes the various subdirectories and files than can exist inside a repo.
nodes.py This file tells BundleWrap what nodes (servers, VMs, ...) there are in your environment and lets you configure options such as hostnames.
groups.py This file allows you to organize your nodes into groups.
bundles/ This required subdirectory contains the bulk of your configuration, organized into bundles of related items. Each bundle is a subdirectory of bundles/ with an items.py or metadata.py in it (or both).
data/ This optional subdirectory contains data files that are not generic enough to be included in bundles (which are meant to be shareable).
hooks/ This optional subdirectory contains hooks you can use to act on certain events when using BundleWrap.
items/ This optional subdirectory contains the code for your custom item types.
libs/ This optional subdirectory contains reusable custom code for your bundles.
bundlewrap-3.8.0/docs/content/repo/libs.md000066400000000000000000000010671360562404000205130ustar00rootroot00000000000000 # Custom code The `libs/` subdirectory of your repository provides a convenient place to put reusable code used throughout your bundles and hooks. A Python module called `example.py` placed in this directory will be available as `repo.libs.example` wherever you have access to a `bundlewrap.repo.Repository` object. In `nodes.py` and `groups.py`, you can do the same thing with just `libs.example`.
Only single files, no subdirectories or packages, are supported at the moment.
bundlewrap-3.8.0/docs/content/repo/metadata.py.md000066400000000000000000000074601360562404000217740ustar00rootroot00000000000000# metadata.py Alongside `items.py` you may create another file called `metadata.py`. It can be used to do advanced processing of the metadata you configured for your nodes and groups. Specifically, it allows each bundle to modify metadata before `items.py` is evaluated. This is accomplished through metadata processors. Metadata processors are functions that take the metadata dictionary generated so far as their single argument. You must then return a dictionary with any modifications you need to make plus at least one of several options: @metadata_processor def my_metadata_processor(metadata): metadata["foo"] = node.name return metadata, DONE You must always return the modified metadata dictionary as the first element. After that, there are a few options you can return. Every metadata processor from every bundle is called *repeatedly* with the latest metadata dictionary until it indicates that it is done by returning the `DONE` option or until *all* remaining metadata processors return `RUN_ME_AGAIN`. You must always return one of `DONE` or `RUN_ME_AGAIN`. Use the latter if your metadata processor depends on metadata that is generated by another metadata processor (which may be called after yours). Here is another example: @metadata_processor def first_metadata_processor(metadata): metadata["foo"] = node.name return metadata, DONE @metadata_processor def second_metadata_processor(metadata): if "foo" in metadata: metadata["bar"] = metadata["foo"] return metadata, DONE else: return metadata, RUN_ME_AGAIN In this example, `"bar"` can only be set once `"foo"` is available and thus the `second_metadata_processor` has to wait and request to `RUN_ME_AGAIN` until `first_metadata_processor` ran. This is necessary because the running order of metadata processors is undefined.
To avoid deadlocks when accessing other nodes' metadata from within a metadata processor, use other_node.partial_metadata instead of other_node.metadata. For the same reason, always use the metadata parameter to access the current node's metadata, never node.metadata.

## Available options
OptionDescription
DONEIndicates that this metadata processor has done all it can and need not be called again. Return this whenever possible.
RUN_ME_AGAINIndicates that this metadata processor is still waiting for metadata from another metadata processor to become available.
DEFAULTSThe returned metadata dictionary will only be used to provide default values. The actual metadata generated so far will be recursively merged into the returned dict. When using this flag, you must not return the original metadata dictionary but construct a new one as in the example below.
OVERWRITEThe returned metadata dictionary will be recursively merged into the actual metadata generated so far (inverse of DEFAULTS). When using this flag, you must not return the original metadata dictionary but construct a new one as in the `DEFAULTS` example below.
Here is an example of how to use `DEFAULTS`: @metadata_processor def my_metadata_processor(metadata): return { "foo": { "bar": 47, }, }, DONE, DEFAULTS This means `node.metadata["foo"]["bar"]` will be 47 by default, but can also be overridden in static metadata at the node/group level.
For your convenience, you can access repo, node, metadata_processor and all the options in metadata.py without importing them.
bundlewrap-3.8.0/docs/content/repo/nodes.py.md000066400000000000000000000115631360562404000213230ustar00rootroot00000000000000# nodes.py This file lets you specify or dynamically build a list of nodes in your environment. All you have to do here is define a Python dictionary called `nodes`. It should look something like this: nodes = { "node-1": { 'hostname': "node-1.example.com", }, } With BundleWrap, the DNS name and the internal identifier for a node ("node-1" in this case) are two separate things. All fields for a node (including `hostname`) are optional. If you don't give one, BundleWrap will attempt to use the internal identifier to connect to a node: nodes = { "node-1.example.com": {}, }
# Dynamic node list You are not confined to the static way of defining a node list as shown above. You can also assemble the `nodes` dictionary dynamically: def get_my_nodes_from_ldap(): [...] return ldap_nodes nodes = get_my_nodes_from_ldap()
# One file per node Especially in larger installations, a single nodes.py can become inconvenient to work with. This example reads nodes from a `nodes/` directory. from glob import glob from os.path import join nodes = {} for node in glob(join(repo_path, "nodes", "*.py")): with open(node, 'r') as f: exec(f.read()) Node files would then append `nodes`, like this: # nodes/node-1.py nodes['node-1'] = { 'hostname': "node-1.example.com", }
# Node attribute reference This section is a reference for all possible attributes you can define for a node: nodes = { 'node-1': { # THIS PART IS EXPLAINED HERE }, } All attributes can also be set at the group level, unless noted otherwise.
## Regular attributes ### bundles A list of bundle names to be assigned to this node. Bundles set at [group level](groups.py.md) will be added.
### dummy Set this to `True` to prevent BundleWrap from creating items for and connecting to this node. This is useful for unmanaged nodes because you can still assign them bundles and metadata like regular nodes and access that from managed nodes (e.g. for monitoring).
### hostname A string used as a DNS name when connecting to this node. May also be an IP address.
The username and SSH private key for connecting to the node cannot be configured in BundleWrap. If you need to customize those, BundleWrap will honor your ~/.ssh/config.
Cannot be set at group level.
### metadata This can be a dictionary of arbitrary data (some type restrictions apply). You can access it from your templates as `node.metadata`. Use this to attach custom data (such as a list of IP addresses that should be configured on the target node) to the node. Note that you can also define metadata at the [group level](groups.py.md#metadata), but node metadata has higher priority. You are restricted to using only the following types in metadata: * `dict` * `list` * `tuple` * `set` * `bool` * `text` / `unicode` * `bytes` / `str` (only if decodable into text using UTF-8) * `int` * `None` * `bundlewrap.utils.Fault`
Also see the documentation for group.metadata for more information.

### os Defaults to `"linux"`. A list of supported OSes can be obtained with `bw debug -n ANY_NODE_NAME -c "print(node.OS_KNOWN)"`.
### os_version Set this to your OS version. Note that it must be a tuple of integers, e.g. if you're running Ubuntu 16.04 LTS, it should be `(16, 4)`. Tuples of integers can be used for easy comparison of versions: `(12, 4) < (16, 4)`
### template_node Copy all attributes and merge all metadata from this node. This is useful for temporary clones of single specific nodes, where you don't want to create a group to deduplicate all the node-level configuration. Cannot be set at group level.
## OS compatibility overrides ### cmd_wrapper_outer Used whenever a command needs to be run on a node. Defaults to `"sudo sh -c {}"`. `{}` will be replaced by the quoted command to be run (after `cmd_wrapper_inner` has been applied). You will need to override this if you're not using `sudo` to gain root privileges (e.g. `doas`) on the node.
### cmd_wrapper_inner Used whenever a command needs to be run on a node. Defaults to `"export LANG=C; {}"`. `{}` will be replaced by the command to be run. You will need to override this if the shell on your node sets environment variables differently.
### use_shadow_passwords
Changing this setting will affect the security of the target system. Only do this for legacy systems that don't support shadow passwords.
This setting will affect how the [user item](../items/user.md) item operates. If set to `False`, password hashes will be written directly to `/etc/passwd` and thus be accessible to any user on the system. If the OS of the node is set to "openbsd", this setting has no effect as `master.shadow` is always used. bundlewrap-3.8.0/docs/content/repo/plugins.md000066400000000000000000000023351360562404000212420ustar00rootroot00000000000000# Plugins The plugin system in BundleWrap is an easy way of integrating third-party code into your repository.
While plugins are subject to some superficial code review by BundleWrap developers before being accepted, we cannot make any guarantees as to the quality and trustworthiness of plugins. Always do your due diligence before running third-party code.

## Finding plugins It's as easy as `bw repo plugin search `. Or you can browse [plugins.bundlewrap.org](http://plugins.bundlewrap.org).
## Installing plugins You probably guessed it: `bw repo plugin install ` Installing the first plugin in your repo will create a file called `plugins.json`. You should commit this file (and any files installed by the plugin of course) to version control.
Avoid editing files provided by plugins at all costs. Local modifications will prevent future updates to the plugin.

## Updating plugins You can update all installed plugins with this command: `bw repo plugin update`
## Removing a plugin `bw repo plugin remove `
## Writing your own See the [guide on publishing your own plugins](../guide/dev_plugin.md). bundlewrap-3.8.0/docs/content/repo/requirements.txt.md000066400000000000000000000014271360562404000231230ustar00rootroot00000000000000 # requirements.txt This optional file can be used to ensure minimum required versions of BundleWrap and other Python packages on every machine that uses a repository. `bw repo create` will initially add your current version of BundleWrap:
bundlewrap>=2.4.0
You can add more packages as you like (you do not have to specify a version for each one), just append each package in a separate line. When someone then tries to use your repo without one of those packages, BundleWrap will exit early with a friendly error message:
! Python package 'foo' is listed in requirements.txt, but wasn't found. You probably have to install it with `pip install foo`.
bundlewrap-3.8.0/docs/mkdocs.yml000066400000000000000000000043731360562404000166270ustar00rootroot00000000000000site_name: BundleWrap docs_dir: content site_dir: build extra_css: - bundlewrap.css repo_url: "https://github.com/bundlewrap/bundlewrap" remote_name: github copyright: "BundleWrap is published under the GPL license.

Donations welcome in Bitcoin 13AJYksqncZromPF8HvDUXsmHChAm3Y7W7 or Ethereum 0x5Eb3037e197d3C0d2E014bcfC2e027EB0AD42812." google_analytics: ['UA-33891245-2', 'docs.bundlewrap.org'] pages: - : index.md - Guides: - Quickstart: guide/quickstart.md - Installation: guide/installation.md - CLI: guide/cli.md - Environment Variables: guide/env.md - File templates: guide/item_file_templates.md - Handling secrets: guide/secrets.md - Locking: guide/locks.md - Kubernetes: guide/kubernetes.md - Custom items: guide/dev_item.md - Writing plugins: guide/dev_plugin.md - Python API: guide/api.md - OS compatibility: guide/os_compatibility.md - Migrating to 2.0: guide/migrate_12.md - Migrating to 3.0: guide/migrate_23.md - Repository: - Overview: repo/layout.md - nodes.py: repo/nodes.py.md - groups.py: repo/groups.py.md - requirements.txt: repo/requirements.txt.md - bundles/.../items.py: repo/items.py.md - bundles/.../metadata.py: repo/metadata.py.md - hooks/: repo/hooks.md - libs/: repo/libs.md - Plugins: repo/plugins.md - Items: - action: items/action.md - directory: items/directory.md - file: items/file.md - group: items/group.md - k8s_*: items/k8s.md - pkg_apt: items/pkg_apt.md - pkg_dnf: items/pkg_dnf.md - pkg_openbsd: items/pkg_openbsd.md - pkg_opkg: items/pkg_opkg.md - pkg_pacman: items/pkg_pacman.md - pkg_pip: items/pkg_pip.md - pkg_snap: items/pkg_snap.md - pkg_yum: items/pkg_yum.md - pkg_zypper: items/pkg_zypper.md - postgres_db: items/postgres_db.md - postgres_role: items/postgres_role.md - svc_openbsd: items/svc_openbsd.md - svc_systemd: items/svc_systemd.md - svc_systemv: items/svc_systemv.md - svc_upstart: items/svc_upstart.md - symlink: items/symlink.md - user: items/user.md - Misc: - About: misc/about.md - Why BundleWrap: misc/deciding.md - Glossary: misc/glossary.md - FAQ: misc/faq.md - Contributing: misc/contributing.md bundlewrap-3.8.0/requirements.txt000066400000000000000000000001101360562404000171410ustar00rootroot00000000000000# deps in this file are for local dev purposes only mkdocs pytest wheel bundlewrap-3.8.0/setup.cfg000066400000000000000000000002351360562404000155060ustar00rootroot00000000000000[flake8] max-line-length = 100 max-complexity = 10 [tool:pytest] python_files=*.py python_classes=Test python_functions=test_* [bdist_wheel] universal = 1 bundlewrap-3.8.0/setup.py000066400000000000000000000041001360562404000153720ustar00rootroot00000000000000from sys import version_info from setuptools import find_packages, setup dependencies = [ "cryptography", "Jinja2", "Mako", "passlib", "pyyaml", "requests >= 1.0.0", "six", ] if version_info < (3, 2, 0): dependencies.append("futures") setup( name="bundlewrap", version="3.8.0", description="Config management with Python", long_description=( "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n" "While most other config management systems rely on a client-server architecture, BundleWrap works off a repository cloned to your local machine. It then automates the process of SSHing into your servers and making sure everything is configured the way it's supposed to be. You won't have to install anything on managed servers." ), author="Torsten Rehn", author_email="torsten@rehn.email", license="GPLv3", url="http://bundlewrap.org", packages=find_packages(), entry_points={ 'console_scripts': [ "bw=bundlewrap.cmdline:main", ], }, keywords=["configuration", "config", "management"], classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: System Administrators", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Natural Language :: English", "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: System :: Installation/Setup", "Topic :: System :: Systems Administration", ], install_requires=dependencies, extras_require={ # used for wheels ':python_version=="2.7"': ["futures"], }, zip_safe=False, ) bundlewrap-3.8.0/tests/000077500000000000000000000000001360562404000150275ustar00rootroot00000000000000bundlewrap-3.8.0/tests/integration/000077500000000000000000000000001360562404000173525ustar00rootroot00000000000000bundlewrap-3.8.0/tests/integration/bw_adhoc_nodes.py000066400000000000000000000026751360562404000226740ustar00rootroot00000000000000from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_apply(tmpdir): make_repo( tmpdir, bundles={ "bundle1": { 'files': { join(str(tmpdir), "test"): { 'content': "test", }, }, }, }, groups={ "adhoc-localhost": { 'bundles': ["bundle1"], 'member_patterns': ["localhost"], 'os': host_os(), }, }, ) assert not exists(join(str(tmpdir), "test")) stdout, stderr, rcode = run("bw -A apply localhost", path=str(tmpdir)) assert rcode == 0 assert exists(join(str(tmpdir), "test")) def test_apply_fail(tmpdir): make_repo( tmpdir, bundles={ "bundle1": { 'files': { join(str(tmpdir), "test"): { 'content': "test", }, }, }, }, groups={ "adhoc-localhost": { 'bundles': ["bundle1"], 'member_patterns': ["localhost"], 'os': host_os(), }, }, ) assert not exists(join(str(tmpdir), "test")) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 1 assert not exists(join(str(tmpdir), "test")) bundlewrap-3.8.0/tests/integration/bw_apply_actions.py000066400000000000000000000043021360562404000232600ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from bundlewrap.utils.testing import host_os, make_repo, run def test_action_success(tmpdir): make_repo( tmpdir, bundles={ "test": { 'actions': { "success": { 'command': "true", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) def test_action_fail(tmpdir): make_repo( tmpdir, bundles={ "test": { 'actions': { "failure": { 'command': "false", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) def test_action_pipe_binary(tmpdir): make_repo( tmpdir, bundles={ "test": { 'actions': { "pipe": { 'command': "cat", 'data_stdin': b"hello\000world", 'expected_stdout': b"hello\000world", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) def test_action_pipe_utf8(tmpdir): make_repo( tmpdir, bundles={ "test": { 'actions': { "pipe": { 'command': "cat", 'data_stdin': "hello 🐧\n", 'expected_stdout': "hello 🐧\n", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) bundlewrap-3.8.0/tests/integration/bw_apply_autoonly.py000066400000000000000000000022671360562404000235020ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_only_bundle_with_dep(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'any', 'needs': ["file:" + join(str(tmpdir), "bar")], }, }, }, "test2": { 'files': { join(str(tmpdir), "bar"): { 'content_type': 'any', }, join(str(tmpdir), "baz"): { 'content_type': 'any', }, }, }, }, nodes={ "localhost": { 'bundles': ["test", "test2"], 'os': host_os(), }, }, ) run("bw apply -o bundle:test localhost", path=str(tmpdir)) assert exists(join(str(tmpdir), "foo")) assert exists(join(str(tmpdir), "bar")) assert not exists(join(str(tmpdir), "baz")) bundlewrap-3.8.0/tests/integration/bw_apply_autoskip.py000066400000000000000000000111351360562404000234610ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_skip_bundle(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip bundle:test localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_group(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, groups={ "foo": {'members': ["localhost"]}, }, ) result = run("bw apply --skip group:foo localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_id(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip file:{} localhost".format(join(str(tmpdir), "foo")), path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_node(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip node:localhost localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_tag(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", 'tags': ["nope"], }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip tag:nope localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_type(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip file: localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_trigger(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", 'tags': ["nope"], 'triggers': ["file:{}".format(join(str(tmpdir), "bar"))], }, join(str(tmpdir), "bar"): { 'content': "nope", 'triggered': True, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip tag:nope localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) assert not exists(join(str(tmpdir), "bar")) bundlewrap-3.8.0/tests/integration/bw_apply_directories.py000066400000000000000000000063031360562404000241370ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os import mkdir from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_purge(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "purgedir", "managed_file"): { 'content': "content", }, join(str(tmpdir), "purgedir", "subdir1", "managed_file"): { 'content': "content", }, }, 'directories': { join(str(tmpdir), "purgedir"): { 'purge': True, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) mkdir(join(str(tmpdir), "purgedir")) mkdir(join(str(tmpdir), "purgedir", "subdir2")) mkdir(join(str(tmpdir), "purgedir", "subdir3")) with open(join(str(tmpdir), "purgedir", "unmanaged_file"), 'w') as f: f.write("content") with open(join(str(tmpdir), "purgedir", "subdir3", "unmanaged_file"), 'w') as f: f.write("content") run("bw apply localhost", path=str(tmpdir)) assert not exists(join(str(tmpdir), "purgedir", "unmanaged_file")) assert not exists(join(str(tmpdir), "purgedir", "subdir3", "unmanaged_file")) assert not exists(join(str(tmpdir), "purgedir", "subdir2")) assert exists(join(str(tmpdir), "purgedir", "subdir1", "managed_file")) assert exists(join(str(tmpdir), "purgedir", "managed_file")) def test_purge_special_chars(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "purgedir", "mänäged_file"): { 'content': "content", }, join(str(tmpdir), "purgedir", "managed_`id`_file"): { 'content': "content", }, }, 'directories': { join(str(tmpdir), "purgedir"): { 'purge': True, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) mkdir(join(str(tmpdir), "purgedir")) with open(join(str(tmpdir), "purgedir", "unmänäged_file"), 'w') as f: f.write("content") with open(join(str(tmpdir), "purgedir", "unmanaged_`uname`_file"), 'w') as f: f.write("content") with open(join(str(tmpdir), "purgedir", "unmanaged_:'_file"), 'w') as f: f.write("content") run("bw apply localhost", path=str(tmpdir)) assert not exists(join(str(tmpdir), "purgedir", "unmänäged_file")) assert not exists(join(str(tmpdir), "purgedir", "unmanaged_`uname`_file")) assert not exists(join(str(tmpdir), "purgedir", "unmanaged_:'_file")) assert exists(join(str(tmpdir), "purgedir", "mänäged_file")) assert exists(join(str(tmpdir), "purgedir", "managed_`id`_file")) bundlewrap-3.8.0/tests/integration/bw_apply_files.py000066400000000000000000000127271360562404000227340ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from base64 import b64encode from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_any_content_create(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'any', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"" def test_any_content_exists(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'any', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "foo"), 'wb') as f: f.write(b"existing content") run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"existing content" def test_binary_inline_content(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo.bin"): { 'content_type': 'base64', 'content': b64encode("ö".encode('latin-1')), }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo.bin"), 'rb') as f: content = f.read() assert content.decode('latin-1') == "ö" def test_binary_template_content(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo.bin"): { 'encoding': 'latin-1', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "bundles", "test", "files", "foo.bin"), 'wb') as f: f.write("ö".encode('utf-8')) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo.bin"), 'rb') as f: content = f.read() assert content.decode('latin-1') == "ö" def test_delete(tmpdir): with open(join(str(tmpdir), "foo"), 'w') as f: f.write("foo") make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'delete': True, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) assert not exists(join(str(tmpdir), "foo")) def test_mako_template_content(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'mako', 'content': "${node.name}", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"localhost" def test_mako_template_content_with_secret(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'mako', 'content': "${repo.vault.password_for('testing')}", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"faCTT76kagtDuZE5wnoiD1CxhGKmbgiX" def test_text_template_content(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'text', 'content': "${node.name}", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"${node.name}" bundlewrap-3.8.0/tests/integration/bw_apply_postgres.py000066400000000000000000000033261360562404000234730ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from json import loads from os import environ from bundlewrap.utils.testing import host_os, make_repo, run if environ.get('TRAVIS') == "true": def test_create(tmpdir): make_repo( tmpdir, bundles={ "test": { 'postgres_dbs': { "bw-test1": { 'owner': "bw-test1", }, }, 'postgres_roles': { "bw-test1": { 'superuser': True, 'password': 'potato', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 stdout, stderr, rcode = run("bw items --state localhost postgres_db:bw-test1", path=str(tmpdir)) assert rcode == 0 assert loads(stdout.decode()) == {'owner': "bw-test1"} stdout, stderr, rcode = run("bw items --state localhost postgres_role:bw-test1", path=str(tmpdir)) assert rcode == 0 assert loads(stdout.decode()) == { 'can_login': True, 'password_hash': "md5ecba3aec62c5aabf6480de6352182004", 'superuser': True, } stdout, stderr, rcode = run("dropdb bw-test1", path=str(tmpdir)) assert rcode == 0 stdout, stderr, rcode = run("dropuser bw-test1", path=str(tmpdir)) assert rcode == 0 bundlewrap-3.8.0/tests/integration/bw_apply_precedes.py000066400000000000000000000152721360562404000234220ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_precedes(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["tag:tag1"], }, }, 'actions': { "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'needs': ["action:action2"], }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "1\n2\n3\n" def test_precedes_unless(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["tag:tag1"], }, }, 'actions': { "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'unless': 'true', }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'needs': ["action:action2"], }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "1\n3\n" def test_precedes_unless2(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["tag:tag1"], }, }, 'actions': { "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'unless': 'true', }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'needs': ["action:action2"], 'unless': 'true', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) assert not exists(join(str(tmpdir), "file")) def test_precedes_unless3(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["tag:tag1"], 'unless': 'true', }, }, 'actions': { "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'needs': ["action:action2"], }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "2\n3\n" def test_precedes_unless4(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["action:action3"], }, }, 'actions': { "action2": { 'command': "false", 'needs': ["file:{}".format(join(str(tmpdir), "file"))], }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'needs': ["action:action2"], }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "1\n" def test_precedes_action(tmpdir): make_repo( tmpdir, bundles={ "test": { 'actions': { "action1": { 'command': "echo 1 > {}".format(join(str(tmpdir), "file")), 'precedes': ["action:action2"], 'triggered': True, }, "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "1\n2\n" bundlewrap-3.8.0/tests/integration/bw_apply_secrets.py000066400000000000000000000130121360562404000232660ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_fault_content(tmpdir): make_repo( tmpdir, bundles={ "test": {}, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: f.write(""" files = {{ "{}": {{ 'content': repo.vault.password_for("test"), }}, }} """.format(join(str(tmpdir), "secret"))) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "secret")) as f: content = f.read() assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" def test_fault_content_mako(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "secret"): { 'content': "${repo.vault.password_for('test')}", 'content_type': 'mako', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "secret")) as f: content = f.read() assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" def test_fault_content_mako_metadata(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "secret"): { 'content': "${node.metadata['secret']}", 'content_type': 'mako', }, }, }, }, ) with open(join(str(tmpdir), "nodes.py"), 'w') as f: f.write(""" nodes = {{ "localhost": {{ 'bundles': ["test"], 'metadata': {{'secret': vault.password_for("test")}}, 'os': "{}", }}, }} """.format(host_os())) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "secret")) as f: content = f.read() assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" def test_fault_content_jinja2(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "secret"): { 'content': "{{ repo.vault.password_for('test') }}", 'content_type': 'jinja2', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "secret")) as f: content = f.read() assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" def test_fault_content_skipped(tmpdir): make_repo( tmpdir, bundles={ "test": {}, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: f.write(""" files = {{ "{}": {{ 'content': repo.vault.password_for("test", key='unavailable'), }}, }} """.format(join(str(tmpdir), "secret"))) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert not exists(join(str(tmpdir), "secret")) def test_fault_content_skipped_mako(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "secret"): { 'content': "${repo.vault.password_for('test', key='unavailable')}", 'content_type': 'mako', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert not exists(join(str(tmpdir), "secret")) def test_fault_content_skipped_jinja2(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "secret"): { 'content': "{{ repo.vault.password_for('test', key='unavailable') }}", 'content_type': 'jinja2', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) def test_fault_content_error(tmpdir): make_repo( tmpdir, bundles={ "test": {}, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: f.write(""" files = {{ "{}": {{ 'content': repo.vault.password_for("test", key='unavailable'), 'error_on_missing_fault': True, }}, }} """.format(join(str(tmpdir), "secret"))) stdout, stderr, rcode = run("bw -d apply localhost", path=str(tmpdir)) print(stdout) assert rcode == 1 bundlewrap-3.8.0/tests/integration/bw_apply_symlinks.py000066400000000000000000000054201360562404000234730ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os import mkdir, readlink, symlink from os.path import join from bundlewrap.utils.testing import host_os, make_repo, run def test_create(tmpdir): make_repo( tmpdir, bundles={ "test": { 'symlinks': { join(str(tmpdir), "foo"): { 'target': "/dev/null", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert readlink(join(str(tmpdir), "foo")) == "/dev/null" def test_fix(tmpdir): symlink(join(str(tmpdir), "bar"), join(str(tmpdir), "foo")) make_repo( tmpdir, bundles={ "test": { 'symlinks': { join(str(tmpdir), "foo"): { 'target': "/dev/null", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert readlink(join(str(tmpdir), "foo")) == "/dev/null" def test_fix_dir(tmpdir): mkdir(join(str(tmpdir), "foo")) make_repo( tmpdir, bundles={ "test": { 'symlinks': { join(str(tmpdir), "foo"): { 'target': "/dev/null", }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert readlink(join(str(tmpdir), "foo")) == "/dev/null" def test_fix_dir_target(tmpdir): mkdir(join(str(tmpdir), "dir1")) mkdir(join(str(tmpdir), "dir2")) symlink(join(str(tmpdir), "dir1"), join(str(tmpdir), "link")) make_repo( tmpdir, bundles={ "test": { 'symlinks': { join(str(tmpdir), "link"): { 'target': join(str(tmpdir), "dir2"), }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert readlink(join(str(tmpdir), "link")) == join(str(tmpdir), "dir2") bundlewrap-3.8.0/tests/integration/bw_groups.py000066400000000000000000000120451360562404000217350ustar00rootroot00000000000000from json import loads from os.path import join from bundlewrap.utils.testing import make_repo, run def test_group_members_add(tmpdir): make_repo( tmpdir, nodes={ "node1": {'os': 'centos'}, "node2": {'os': 'debian'}, "node3": {'os': 'ubuntu'}, }, ) with open(join(str(tmpdir), "groups.py"), 'w') as f: f.write(""" groups = { "group1": { 'members_add': lambda node: node.os == 'centos', }, "group2": { 'members': ["node2"], 'members_add': lambda node: node.os != 'centos', }, "group3": { 'members_add': lambda node: not node.in_group("group2"), }, "group4": { 'members': ["node3"], }, } """) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir)) assert stdout == b"""group1\tnode1 group2\tnode2,node3 group3\tnode1,node3 group4\tnode3 """ assert stderr == b"" assert rcode == 0 def test_group_members_remove(tmpdir): make_repo( tmpdir, nodes={ "node1": {'os': 'centos'}, "node2": {'os': 'debian'}, "node3": {'os': 'ubuntu'}, "node4": {'os': 'ubuntu'}, }, ) with open(join(str(tmpdir), "groups.py"), 'w') as f: f.write(""" groups = { "group1": { 'members_add': lambda node: node.os == 'ubuntu', }, "group2": { 'members_add': lambda node: node.os == 'ubuntu', 'members_remove': lambda node: node.name == "node3", }, "group3": { 'members_add': lambda node: not node.in_group("group3"), }, "group4": { 'subgroups': ["group3"], 'members_remove': lambda node: node.os == 'debian', }, } """) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir)) assert stdout == b"""group1\tnode3,node4 group2\tnode4 group3\tnode1,node2,node3,node4 group4\tnode1,node3,node4 """ assert stderr == b"" assert rcode == 0 def test_group_members_partial_metadata(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': {'foo': 1}, }, "node2": {}, }, ) with open(join(str(tmpdir), "groups.py"), 'w') as f: f.write(""" groups = { "group1": { 'members_add': lambda node: node.metadata.get('foo') == 1, }, "group2": { 'members': ["node2"], 'metadata': {'foo': 1}, }, } """) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2 nodes", path=str(tmpdir)) assert stdout == b"""group1\tnode1 group2\tnode2 """ assert stderr == b"" assert rcode == 0 def test_group_members_remove_based_on_metadata(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': {'remove': False}, }, "node2": {}, }, ) with open(join(str(tmpdir), "groups.py"), 'w') as f: f.write(""" groups = { "group1": { 'members_add': lambda node: not node.metadata.get('remove', False), 'members_remove': lambda node: node.metadata.get('remove', False), }, "group2": { 'members': ["node2"], 'metadata': {'remove': True}, }, "group3": { 'subgroups': ["group1"], 'members_remove': lambda node: node.name.endswith("1") and node.metadata.get('redherring', True), }, } """) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3 nodes", path=str(tmpdir)) assert stdout == b"""group1\tnode1,node2 group2\tnode2 group3\tnode2 """ assert stderr == b"" assert rcode == 0 # make sure there is no metadata deadlock stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode('utf-8')) == {'remove': False} assert stderr == b"" assert rcode == 0 def test_group_members_removed_from_supergroup(tmpdir): make_repo( tmpdir, nodes={ 'node_in_group': { 'hostname': "localhost", }, 'node_NOT_in_group': { 'hostname': "localhost", 'metadata': { 'remove_from_group': True, }, }, }, ) with open(join(str(tmpdir), "groups.py"), 'w') as f: f.write(""" groups = { 'super_group': { 'subgroups': ['intermediate_group'], }, 'intermediate_group': { 'members_remove': lambda node: node.metadata.get('remove_from_group', False), 'subgroups': ['inner_group'], }, 'inner_group': { 'member_patterns': ( r".*", ), }, } """) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i inner_group,intermediate_group,intermediate_group nodes", path=str(tmpdir)) assert stdout == b"""inner_group\tnode_NOT_in_group,node_in_group intermediate_group\tnode_in_group intermediate_group\tnode_in_group """ assert stderr == b"" assert rcode == 0 bundlewrap-3.8.0/tests/integration/bw_hash.py000066400000000000000000000206051360562404000213420ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import make_repo, run def test_empty(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw hash", path=str(tmpdir)) assert stdout == b"bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f\n" assert stderr == b"" def test_nondeterministic(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content_type': 'mako', 'content': "<% import random %>${random.randint(1, 9999)}", }, }, }, }, ) hashes = set() for i in range(3): stdout, stderr, rcode = run("bw hash", path=str(tmpdir)) hashes.add(stdout.strip()) assert len(hashes) > 1 def test_deterministic(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "${node.name}", 'group': None, # BSD has a different default and we don't want to # deal with that here }, }, }, }, ) hashes = set() for i in range(3): stdout, stderr, rcode = run("bw hash", path=str(tmpdir)) hashes.add(stdout.strip()) assert len(hashes) == 1 assert hashes.pop() == b"2203e7acc35608bbff471c023b7b7498e5b385d9" def test_dict(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "yes please", 'group': None, # BSD has a different default and we don't want to # deal with that here }, }, }, }, ) stdout, stderr, rcode = run("bw hash -d", path=str(tmpdir)) assert rcode == 0 assert stdout == b"93e7a2c6e8cdc71fb4df5426bc0d0bb978d84381 node1\n" stdout, stderr, rcode = run("bw hash -d node1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"59d1a7c79640ccdfd3700ab141698a9389fcd0b7 file:/test\n" stdout, stderr, rcode = run("bw hash -d node1 file:/test", path=str(tmpdir)) assert rcode == 0 assert stdout == ( b"content_hash\tc05a36d547e2b1682472f76985018038d1feebc5\n" b"mode\t0644\n" b"owner\troot\n" b"type\tfile\n" ) def test_metadata_empty(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': {}, }, }, ) stdout, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f\n" def test_metadata_fault(tmpdir): make_repo(tmpdir) with open(join(str(tmpdir), "nodes.py"), 'w') as f: f.write(""" nodes = { 'node1': { 'metadata': {'foo': vault.password_for("testing")}, }, 'node2': { 'metadata': {'foo': vault.password_for("testing").value}, }, 'node3': { 'metadata': {'foo': "faCTT76kagtDuZE5wnoiD1CxhGKmbgiX"}, }, 'node4': { 'metadata': {'foo': "something else entirely"}, }, } """) print(run("bw debug -c 'print(repo.vault.password_for(\"testing\"))'", path=str(tmpdir))) stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) assert stdout1 == b"d0c998fd17a68322a03345954bb0a75301d3a127\n" assert stderr == b"" assert rcode == 0 stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir)) assert stdout2 == stdout1 assert stderr == b"" assert rcode == 0 stdout3, stderr, rcode = run("bw hash -m node3", path=str(tmpdir)) assert stdout3 == stdout1 assert stderr == b"" assert rcode == 0 stdout4, stderr, rcode = run("bw hash -m node4", path=str(tmpdir)) assert stdout4 != stdout1 assert stderr == b"" assert rcode == 0 def test_metadata_nested_sort(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { 'nested': { 'one': True, 'two': False, 'three': 3, 'four': "four", 'five': None, }, }, }, "node2": { 'metadata': { 'nested': { 'five': None, 'four': "four", 'one': True, 'three': 3, 'two': False, }, }, }, }, ) stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) assert rcode == 0 assert stdout1 == b"bc403a093ca3399cd3efa7a64ec420e0afef5e70\n" stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir)) assert rcode == 0 assert stdout1 == stdout2 def test_metadata_repo(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { 'foo': 47, }, }, }, ) stdout, stderr, rcode = run("bw hash -m", path=str(tmpdir)) assert rcode == 0 assert stdout == b"c0cc160ab1b6e71155cd4f65139bc7f66304d7f3\n" def test_metadata_repo_dict(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { 'foo': 47, }, }, }, ) stdout, stderr, rcode = run("bw hash -md", path=str(tmpdir)) assert rcode == 0 assert stdout == b"node1\t013b3a8199695eb45c603ea4e0a910148d80e7ed\n" def test_groups_repo(tmpdir): make_repo( tmpdir, groups={ "group1": {}, "group2": {}, }, ) stdout, stderr, rcode = run("bw hash -g", path=str(tmpdir)) assert rcode == 0 assert stdout == b"479c737e191339e5fae20ac8a8903a75f6b91f4d\n" def test_groups_repo_dict(tmpdir): make_repo( tmpdir, groups={ "group1": {}, "group2": {}, }, ) stdout, stderr, rcode = run("bw hash -dg", path=str(tmpdir)) assert rcode == 0 assert stdout == b"group1\ngroup2\n" def test_groups(tmpdir): make_repo( tmpdir, groups={ "group1": {'members': ["node1", "node2"]}, "group2": {'members': ["node3"]}, }, nodes={ "node1": {}, "node2": {}, "node3": {}, }, ) stdout, stderr, rcode = run("bw hash -g group1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"59f5a812acd22592b046b20e9afedc1cfcd37c77\n" def test_groups_dict(tmpdir): make_repo( tmpdir, groups={ "group1": {'members': ["node1", "node2"]}, "group2": {'members': ["node3"]}, }, nodes={ "node1": {}, "node2": {}, "node3": {}, }, ) stdout, stderr, rcode = run("bw hash -dg group1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"node1\nnode2\n" def test_groups_node(tmpdir): make_repo( tmpdir, groups={ "group1": {'members': ["node1", "node2"]}, "group2": {'members': ["node3"]}, }, nodes={ "node1": {}, "node2": {}, "node3": {}, }, ) stdout, stderr, rcode = run("bw hash -g node1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"6f4615dc71426549e22df7961bd2b88ba95ad1fc\n" def test_groups_node_dict(tmpdir): make_repo( tmpdir, groups={ "group1": {'members': ["node1", "node2"]}, "group2": {'members': ["node3"]}, }, nodes={ "node1": {}, "node2": {}, "node3": {}, }, ) stdout, stderr, rcode = run("bw hash -dg node1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"group1\n" bundlewrap-3.8.0/tests/integration/bw_items.py000066400000000000000000000054311360562404000215400ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from bundlewrap.utils.testing import make_repo, run def test_file_preview(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "föö", 'encoding': 'latin-1', }, }, }, }, ) stdout, stderr, rcode = run("bw items -f node1 file:/test", path=str(tmpdir)) assert stdout == "föö".encode('utf-8') # our output is always utf-8 assert rcode == 0 def test_multiple_file_preview(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "föö", }, "/testdir/test2": { 'content': "bar", }, }, }, }, ) stdout, stderr, rcode = run("bw items -w itemprev node1", path=str(tmpdir)) assert rcode == 0 assert tmpdir.join("itemprev/test").exists() assert tmpdir.join("itemprev/testdir/test2").exists() def test_fault_unavailable(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "${repo.vault.password_for('test', key='404')}", 'content_type': 'mako', }, }, }, }, ) stdout, stderr, rcode = run("bw items -f node1 file:/test", path=str(tmpdir)) assert rcode == 1 def test_fault_unavailable_multiple(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "föö", }, "/testdir/test3": { 'content': "${repo.vault.password_for('test', key='404')}", 'content_type': 'mako', }, }, }, }, ) stdout, stderr, rcode = run("bw items -w itemprev node1", path=str(tmpdir)) assert rcode == 0 assert tmpdir.join("itemprev/test").exists() assert not tmpdir.join("itemprev/testdir/test3").exists() bundlewrap-3.8.0/tests/integration/bw_lock.py000066400000000000000000000024021360562404000213420ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from re import search from bundlewrap.utils.testing import host_os, make_repo, run def get_lock_id(output): return search(r"locked with ID (\w+) ", output).groups()[0] def test_add_lock_apply_remove(tmpdir): make_repo( tmpdir, nodes={ "localhost": { 'bundles': ["bundle1"], 'os': host_os(), }, }, bundles={ "bundle1": { 'files': { "/tmp/bw_test_lock_add": { 'content': "foo", }, }, }, }, ) run("rm -f /tmp/bw_test_lock_add") stdout, stderr, rcode = run("BW_IDENTITY=jdoe bw lock add -c höhöhö -e 1m -i file:/tmp/bw_test_lock_add localhost", path=str(tmpdir)) assert rcode == 0 lock_id = get_lock_id(stdout.decode('utf-8')) assert len(lock_id) == 4 stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 stdout, stderr, rcode = run("cat /tmp/bw_test_lock_add", path=str(tmpdir)) assert rcode != 0 stdout, stderr, rcode = run("bw lock remove localhost {}".format(lock_id), path=str(tmpdir)) assert rcode == 0 bundlewrap-3.8.0/tests/integration/bw_metadata.py000066400000000000000000000267161360562404000222100ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from json import loads from os.path import join from bundlewrap.utils.testing import make_repo, run def test_empty(tmpdir): make_repo( tmpdir, nodes={ "node1": {}, }, ) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert stdout == b"{}\n" assert stderr == b"" assert rcode == 0 def test_simple(tmpdir): make_repo( tmpdir, nodes={ "node1": {'metadata': {"foo": "bar"}}, }, ) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == {"foo": "bar"} assert stderr == b"" assert rcode == 0 def test_object(tmpdir): make_repo(tmpdir) with open(join(str(tmpdir), "nodes.py"), 'w') as f: f.write("nodes = {'node1': {'metadata': {'foo': object}}}") stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode == 1 def test_merge(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { "foo": { "bar": "baz", }, }, }, }, groups={ "group1": { 'members': ["node1"], 'metadata': { "ding": 5, "foo": { "bar": "ZAB", "baz": "bar", }, }, }, }, ) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "ding": 5, "foo": { "bar": "baz", "baz": "bar", }, } assert stderr == b"" assert rcode == 0 def test_template_node(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'template_node': "node2", }, "node2": { 'metadata': { "foo": 2, }, }, }, groups={ "group1": { 'members': ["node1"], 'metadata': { "foo": 3, }, }, }, ) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == {"foo": 2} assert stderr == b"" assert rcode == 0 def test_template_node_override(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { "foo": 1, }, 'template_node': "node2", }, "node2": { 'metadata': { "foo": 2, }, }, }, groups={ "group1": { 'members': ["node1"], 'metadata': { "foo": 3, }, }, }, ) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == {"foo": 1} assert stderr == b"" assert rcode == 0 def test_metadatapy(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): metadata["baz"] = node.name return metadata, DONE """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "baz": "node1", "foo": "bar", } assert stderr == b"" assert rcode == 0 def test_metadatapy_defaults(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return { "foo": "baz", "baz": "foo", }, DONE, DEFAULTS """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "baz": "foo", "foo": "bar", } assert stderr == b"" assert rcode == 0 def test_metadatapy_defaults_atomic(tmpdir): make_repo( tmpdir, bundles={"test": {}}, ) with open(join(str(tmpdir), "nodes.py"), 'w') as f: f.write( """ from bundlewrap.metadata import atomic nodes = { "node1": { 'bundles': ["test"], 'metadata': {"foo": atomic({"bar": "baz"})}, }, } """) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return { "foo": { "bar": "frob", "baz": "gobble", }, }, DONE, DEFAULTS """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "foo": {"bar": "baz"}, } assert stderr == b"" assert rcode == 0 def test_metadatapy_update(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return { "foo": "baz", "baz": "foo", }, DONE, OVERWRITE """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "baz": "foo", "foo": "baz", } assert stderr == b"" assert rcode == 0 def test_metadatapy_invalid_number_of_elements(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return metadata """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode != 0 def test_metadatapy_invalid_first_element_not_dict(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return DONE, metadata """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode != 0 def test_metadatapy_invalid_defaults_plus_original_dict(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return metadata, DONE, DEFAULTS """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode != 0 def test_metadatapy_invalid_overwrite_plus_original_dict(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return metadata, DONE, OVERWRITE """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode != 0 def test_metadatapy_invalid_option(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return metadata, DONE, 1000 """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode != 0 def test_metadatapy_invalid_done_and_again(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return metadata, DONE, RUN_ME_AGAIN """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode != 0 def test_metadatapy_invalid_no_done_or_again(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return {}, DEFAULTS """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode != 0 def test_metadatapy_invalid_defaults_and_overwrite(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_processor def foo(metadata): return {}, DEFAULTS, OVERWRITE, DONE """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode != 0 def test_table(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { "foo_dict": { "bar": "baz", }, "foo_list": ["bar", 1], "foo_int": 47, "foo_umlaut": "föö", }, }, "node2": { 'metadata': { "foo_dict": { "baz": "bar", }, "foo_list": [], "foo_int": -3, "foo_umlaut": "füü", }, }, }, groups={ "all": { 'members': ["node1", "node2"], }, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw metadata --table all foo_dict bar, foo_list, foo_int, foo_umlaut", path=str(tmpdir)) assert stdout.decode('utf-8') == """node\tfoo_dict bar\tfoo_list\tfoo_int\tfoo_umlaut node1\tbaz\tbar, 1\t47\tföö node2\t\t\t-3\tfüü """ assert stderr == b"" assert rcode == 0 def test_table_no_key(tmpdir): make_repo( tmpdir, nodes={ "node1": {}, }, ) stdout, stderr, rcode = run("bw metadata --table node1", path=str(tmpdir)) assert rcode == 1 bundlewrap-3.8.0/tests/integration/bw_nodes.py000066400000000000000000000040721360562404000215270ustar00rootroot00000000000000from bundlewrap.utils.testing import make_repo, run def test_empty(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw nodes", path=str(tmpdir)) assert stdout == b"" assert stderr == b"" assert rcode == 0 def test_single(tmpdir): make_repo(tmpdir, nodes={"node1": {}}) stdout, stderr, rcode = run("bw nodes", path=str(tmpdir)) assert stdout == b"node1\n" assert stderr == b"" assert rcode == 0 def test_hostname(tmpdir): make_repo( tmpdir, groups={"all": {'members': ["node1"]}}, nodes={"node1": {'hostname': "node1.example.com"}}, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all hostname | cut -f 2", path=str(tmpdir)) assert stdout == b"node1.example.com\n" assert stderr == b"" assert rcode == 0 def test_bundles(tmpdir): make_repo( tmpdir, bundles={ "bundle1": {}, "bundle2": {}, }, groups={"all": {'members': ["node1", "node2"]}}, nodes={ "node1": {'bundles': ["bundle1", "bundle2"]}, "node2": {'bundles': ["bundle2"]}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all bundles | grep node1 | cut -f 2", path=str(tmpdir)) assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2"] assert stderr == b"" assert rcode == 0 def test_template_node(tmpdir): make_repo( tmpdir, nodes={ "node1": {'template_node': "node2"}, "node2": {'dummy': True}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 dummy | grep node1 | cut -f 2", path=str(tmpdir)) assert stdout.decode().strip() == "True" assert stderr == b"" assert rcode == 0 def test_template_node_cascade(tmpdir): make_repo( tmpdir, nodes={ "node1": {'template_node': "node2"}, "node2": {'template_node': "node1"}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 dummy", path=str(tmpdir)) assert rcode == 1 bundlewrap-3.8.0/tests/integration/bw_plot.py000066400000000000000000000035731360562404000214020ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import make_repo, run def test_groups_for_node(tmpdir): make_repo( tmpdir, nodes={ "node-foo": {}, "node-bar": {}, "node-baz": {}, "node-pop": {}, }, ) with open(join(str(tmpdir), "groups.py"), 'w') as f: f.write(""" groups = { "group-foo": { 'members': ["node-foo"], 'member_patterns': [r".*-bar"], }, "group-bar": { 'subgroups': ["group-foo"], }, "group-baz": { 'members': ["node-pop"], 'members_add': lambda node: node.name == "node-pop", }, "group-pop": { 'subgroup_patterns': [r"ba"], }, } """) stdout, stderr, rcode = run("bw plot groups-for-node node-foo", path=str(tmpdir)) assert stdout == b"""digraph bundlewrap { rankdir = LR node [color="#303030"; fillcolor="#303030"; fontname=Helvetica] edge [arrowhead=vee] "group-bar" [fontcolor=white,style=filled]; "group-foo" [fontcolor=white,style=filled]; "group-pop" [fontcolor=white,style=filled]; "node-foo" [fontcolor="#303030",shape=box,style=rounded]; "group-bar" -> "group-foo" [color="#6BB753",penwidth=2] "group-pop" -> "group-bar" [color="#6BB753",penwidth=2] "group-foo" -> "node-foo" [color="#D18C57",penwidth=2] } """ assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run("bw plot groups-for-node node-pop", path=str(tmpdir)) assert stdout == b"""digraph bundlewrap { rankdir = LR node [color="#303030"; fillcolor="#303030"; fontname=Helvetica] edge [arrowhead=vee] "group-baz" [fontcolor=white,style=filled]; "group-pop" [fontcolor=white,style=filled]; "node-pop" [fontcolor="#303030",shape=box,style=rounded]; "group-pop" -> "group-baz" [color="#6BB753",penwidth=2] "group-baz" -> "node-pop" [color="#D18C57",penwidth=2] } """ assert stderr == b"" assert rcode == 0 bundlewrap-3.8.0/tests/integration/bw_repo.py000066400000000000000000000006261360562404000213650ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import make_repo, run def test_not_a_repo_test(tmpdir): assert run("bw nodes", path=str(tmpdir))[2] == 1 def test_subdir_invocation(tmpdir): make_repo(tmpdir, nodes={"node1": {}}) stdout, stderr, rcode = run("bw nodes", path=join(str(tmpdir), "bundles")) assert stdout == b"node1\n" assert stderr == b"" assert rcode == 0 bundlewrap-3.8.0/tests/integration/bw_run.py000066400000000000000000000013621360562404000212220ustar00rootroot00000000000000from bundlewrap.utils.testing import host_os, make_repo, run def test_run_ok(tmpdir): make_repo( tmpdir, nodes={ "localhost": { 'os': host_os(), }, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw run localhost true", path=str(tmpdir)) assert rcode == 0 assert b"localhost\t0" in stdout assert stderr == b"" def test_run_fail(tmpdir): make_repo( tmpdir, nodes={ "localhost": { 'os': host_os(), }, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw run localhost false", path=str(tmpdir)) assert rcode == 0 assert b"localhost\t1" in stdout assert stderr == b"" bundlewrap-3.8.0/tests/integration/bw_stats.py000066400000000000000000000025111360562404000215510ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from bundlewrap.utils.testing import make_repo, run def test_nondeterministic(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "foo", }, "/test2": { 'content': "foo", }, }, }, }, ) stdout, stderr, rcode = run("bw stats", path=str(tmpdir)) assert stdout == """╭───────┬─────────────────────╮ │ count │ type │ ├───────┼─────────────────────┤ │ 1 │ nodes │ │ 0 │ groups │ │ 1 │ bundles │ │ 0 │ metadata processors │ │ 2 │ items │ ├───────┼─────────────────────┤ │ 2 │ file │ ╰───────┴─────────────────────╯ """.encode('utf-8') bundlewrap-3.8.0/tests/integration/bw_test.py000066400000000000000000000370241360562404000214010ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import make_repo, run def test_empty(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw test", path=str(tmpdir)) assert stdout == b"" assert stderr == b"" assert rcode == 0 def test_bundle_not_found(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, ) assert run("bw test", path=str(tmpdir))[2] == 1 def test_hooks(tmpdir): make_repo( tmpdir, nodes={ "node1": {}, "node2": {}, }, ) with open(join(str(tmpdir), "hooks", "test.py"), 'w') as f: f.write("""from bundlewrap.utils.ui import io def test(repo, **kwargs): io.stdout("AAA") def test_node(repo, node, **kwargs): io.stdout("BBB") """) assert b"AAA" in run("bw test -H", path=str(tmpdir))[0] assert b"BBB" in run("bw test -J", path=str(tmpdir))[0] def test_circular_dep_direct(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "pkg_apt": { "foo": { 'needs': ["pkg_apt:bar"], }, "bar": { 'needs': ["pkg_apt:foo"], }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_circular_dep_indirect(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "pkg_apt": { "foo": { 'needs': ["pkg_apt:bar"], }, "bar": { 'needs': ["pkg_apt:baz"], }, "baz": { 'needs': ["pkg_apt:foo"], }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_circular_dep_self(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "pkg_apt": { "foo": { 'needs': ["pkg_apt:foo"], }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_circular_trigger_self(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "pkg_apt": { "foo": { 'triggers': ["pkg_apt:foo"], }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_file_invalid_attribute(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "files": { "/foo": { "potato": "yes", }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_file_template_error(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "files": { "/foo": { 'content_type': 'mako', 'content': "${broken", }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_group_loop(tmpdir): make_repo( tmpdir, groups={ "group1": { 'subgroups': ["group2"], }, "group2": { 'subgroups': ["group3"], }, "group3": { 'subgroups': ["group1"], }, }, ) assert run("bw test -S", path=str(tmpdir))[2] == 1 def test_group_metadata_collision(tmpdir): make_repo( tmpdir, nodes={"node1": {}}, groups={ "group1": { 'members': ["node1"], 'metadata': { 'foo': { 'baz': 1, }, 'bar': 2, }, }, "group2": { 'metadata': { 'foo': { 'baz': 3, }, 'snap': 4, }, 'subgroups': ["group3"], }, "group3": { 'members': ["node1"], }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 1 def test_group_metadata_collision_subgroups(tmpdir): make_repo( tmpdir, nodes={"node1": {}}, groups={ "group1": { 'members': ["node1"], 'metadata': { 'foo': { 'baz': 1, }, 'bar': 2, }, }, "group2": { 'metadata': { 'foo': { 'baz': 3, }, 'snap': 4, }, 'subgroups': ["group1", "group3"], }, "group3": { 'members': ["node1"], }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 0 def test_group_metadata_collision_list(tmpdir): make_repo( tmpdir, nodes={"node1": {}}, groups={ "group1": { 'members': ["node1"], 'metadata': { 'foo': [1], }, }, "group2": { 'members': ["node1"], 'metadata': { 'foo': [2], }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 1 def test_group_metadata_collision_dict(tmpdir): make_repo( tmpdir, nodes={"node1": {}}, groups={ "group1": { 'members': ["node1"], 'metadata': { 'foo': {'bar': 1}, }, }, "group2": { 'members': ["node1"], 'metadata': { 'foo': 2, }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 1 def test_group_metadata_collision_dict_ok(tmpdir): make_repo( tmpdir, nodes={"node1": {}}, groups={ "group1": { 'members': ["node1"], 'metadata': { 'foo': {'bar': 1}, }, }, "group2": { 'members': ["node1"], 'metadata': { 'foo': {'baz': 2}, }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 0 def test_group_metadata_collision_set(tmpdir): make_repo( tmpdir, nodes={"node1": {}}, groups={ "group1": { 'members': ["node1"], 'metadata': { 'foo': set([1]), }, }, "group2": { 'members': ["node1"], 'metadata': { 'foo': 2, }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 1 def test_group_metadata_collision_set_ok(tmpdir): make_repo( tmpdir, nodes={"node1": {}}, groups={ "group1": { 'members': ["node1"], 'metadata': { 'foo': set([1]), }, }, "group2": { 'members': ["node1"], 'metadata': { 'foo': set([2]), }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 0 def test_fault_missing(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "files": { "/foo": { 'content_type': 'mako', 'content': "${repo.vault.decrypt('bzzt', key='unavailable')}", }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 assert run("bw test -iI", path=str(tmpdir))[2] == 0 def test_metadata_determinism_ok(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write("""@metadata_processor def test(metadata): metadata['test'] = 1 return metadata, DONE """) assert run("bw test -m 3", path=str(tmpdir))[2] == 0 def test_metadata_determinism_broken(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write("""from random import randint @metadata_processor def test(metadata): metadata.setdefault('test', randint(1, 99999)) return metadata, DONE """) assert run("bw test -m 3", path=str(tmpdir))[2] == 1 def test_config_determinism_ok(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "files": { "/test": { 'content': "1", 'content_type': 'mako', }, }, }, }, ) assert run("bw test -d 3", path=str(tmpdir))[2] == 0 def test_config_determinism_broken(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "files": { "/test": { 'content': "<% from random import randint %>\n${randint(1, 99999)\n}", 'content_type': 'mako', }, }, }, }, ) assert run("bw test -d 3", path=str(tmpdir))[2] == 1 def test_unknown_subgroup(tmpdir): make_repo( tmpdir, nodes={ "node1": {}, }, groups={ "group1": {'subgroups': ["missing-group"]}, "group2": {'members': ["node1"]}, }, ) assert run("bw test", path=str(tmpdir))[2] == 1 assert run("bw test group1", path=str(tmpdir))[2] == 1 assert run("bw test group2", path=str(tmpdir))[2] == 1 def test_empty_group(tmpdir): make_repo( tmpdir, nodes={ "node1": {}, }, groups={ "group1": {}, "group2": {'members': ["node1"]}, }, ) assert run("bw test", path=str(tmpdir))[2] == 0 assert run("bw test -e", path=str(tmpdir))[2] == 1 def test_group_user_dep_deleted(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "users": { "user1": { 'groups': ["group1"], }, }, "groups": { "group1": { 'delete': True, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_group_user_dep_ok(tmpdir): # regression test for #341 make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "users": { "user1": {}, }, "groups": { "group1": {'delete': True}, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 0 def test_group_user_dep_deleted_gid(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { "users": { "user1": { 'gid': "group1", }, }, "groups": { "group1": { 'delete': True, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_secret_identifier_only_once(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "${repo.vault.password_for('testing')}", 'content_type': 'mako', }, }, }, }, ) assert run("bw test -s ''", path=str(tmpdir))[2] == 1 assert run("bw test -s 'test'", path=str(tmpdir))[2] == 0 assert run("bw test -s 'test,foo'", path=str(tmpdir))[2] == 0 def test_secret_identifier_twice(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, "node2": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "${repo.vault.password_for('testing')}", 'content_type': 'mako', }, }, }, }, ) assert run("bw test -s ''", path=str(tmpdir))[2] == 0 assert run("bw test -s 'test'", path=str(tmpdir))[2] == 0 assert run("bw test -s 'test,foo'", path=str(tmpdir))[2] == 0 def test_reverse_dummy_dep(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1", "bundle2"], }, }, bundles={ "bundle1": { 'files': { "/test": { 'content': "test", }, }, }, "bundle2": { 'files': { "/test2": { 'content': "test", 'needed_by': ["bundle:bundle1"], }, }, }, }, ) stdout, stderr, rcode = run("bw test", path=str(tmpdir)) assert rcode == 0 bundlewrap-3.8.0/tests/integration/bw_verify.py000066400000000000000000000013731360562404000217240ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from os.path import join from bundlewrap.utils.testing import host_os, make_repo, run def test_empty_verify(tmpdir): make_repo( tmpdir, bundles={ "test": { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'any', }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "foo"), 'w') as f: f.write("test") stdout, stderr, rcode = run("bw verify localhost", path=str(tmpdir)) assert rcode == 0 bundlewrap-3.8.0/tests/integration/secrets.py000066400000000000000000000126361360562404000214040ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from base64 import b64decode from os.path import join from bundlewrap.utils.testing import make_repo, run def test_b64encode_fault(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").b64encode())'", path=str(tmpdir)) assert stdout == b"ZmFDVFQ3NmthZ3REdVpFNXdub2lEMUN4aEdLbWJnaVg=\n" assert stderr == b"" assert rcode == 0 def test_encrypt(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\"))'", path=str(tmpdir)) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir)) assert stdout == b"test\n" assert stderr == b"" assert rcode == 0 def test_encrypt_different_key_autodetect(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\", key=\"generate\"))'", path=str(tmpdir)) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir)) assert stdout == b"test\n" assert stderr == b"" assert rcode == 0 def test_encrypt_file(tmpdir): make_repo(tmpdir) source_file = join(str(tmpdir), "data", "source") with open(source_file, 'w') as f: f.write("ohai") stdout, stderr, rcode = run( "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( source_file, "encrypted", ), path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format( "encrypted", ), path=str(tmpdir), ) assert stdout == b"ohai\n" assert stderr == b"" assert rcode == 0 def test_encrypt_file_different_key_autodetect(tmpdir): make_repo(tmpdir) source_file = join(str(tmpdir), "data", "source") with open(source_file, 'w') as f: f.write("ohai") stdout, stderr, rcode = run( "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\", \"{}\")'".format( source_file, "encrypted", "generate", ), path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format( "encrypted", ), path=str(tmpdir), ) assert stdout == b"ohai\n" assert stderr == b"" assert rcode == 0 def test_encrypt_file_base64(tmpdir): make_repo(tmpdir) source_file = join(str(tmpdir), "data", "source") with open(source_file, 'wb') as f: f.write("öhai".encode('latin-1')) stdout, stderr, rcode = run( "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( source_file, "encrypted", ), path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\"))'".format( "encrypted", ), path=str(tmpdir), ) assert b64decode(stdout.decode('utf-8')) == "öhai".encode('latin-1') assert stderr == b"" assert rcode == 0 def test_format_password(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").format_into(\"format: {}\"))'", path=str(tmpdir)) assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n" assert stderr == b"" assert rcode == 0 def test_human_password(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\"))'", path=str(tmpdir)) assert stdout == b"Xaint-Heep-Pier-Tikl-76\n" assert stderr == b"" assert rcode == 0 def test_human_password_digits(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", digits=4))'", path=str(tmpdir)) assert stdout == b"Xaint-Heep-Pier-Tikl-7608\n" assert stderr == b"" assert rcode == 0 def test_human_password_per_word(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", per_word=1))'", path=str(tmpdir)) assert stdout == b"X-D-F-H-42\n" assert stderr == b"" assert rcode == 0 def test_human_password_words(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", words=2))'", path=str(tmpdir)) assert stdout == b"Xaint-Heep-13\n" assert stderr == b"" assert rcode == 0 def test_random_bytes_as_base64(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\"))'", path=str(tmpdir)) assert stdout == b"rt+Dgv0yA10DS3ux94mmtEg+isChTJvgkfklzmWkvyg=\n" assert stderr == b"" assert rcode == 0 def test_random_bytes_as_base64_length(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\", length=1))'", path=str(tmpdir)) assert stdout == b"rg==\n" assert stderr == b"" assert rcode == 0 bundlewrap-3.8.0/tests/unit/000077500000000000000000000000001360562404000160065ustar00rootroot00000000000000bundlewrap-3.8.0/tests/unit/metadata.py000066400000000000000000000037511360562404000201460ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from bundlewrap.utils.dicts import merge_dict from bundlewrap.metadata import atomic, blame_changed_paths def test_atomic_no_merge_base(): assert merge_dict( {1: atomic([5])}, {1: [6, 7]}, ) == {1: [6, 7]} def test_atomic_no_merge_update(): assert merge_dict( {1: [5]}, {1: atomic([6, 7])}, ) == {1: [6, 7]} def test_blame_and_merge(): dict1 = { 'key1': 11, 'key2': { 'key21': 121, 'key22': 122, }, 'key3': { 'key31': { 'key311': [1311], }, }, } dict2 = { 'key2': { 'key21': 221, }, 'key3': { 'key31': { 'key311': [2311], 'key312': 2312, }, }, 'key4': 24, } from pprint import pprint blame = {} merged = merge_dict( {}, dict1, ) blame_changed_paths( {}, merged, blame, "dict1", ) pprint(blame) merged2 = merge_dict( merged, dict2, ) blame_changed_paths( merged, merged2, blame, "dict2", ) pprint(blame) should = { ('key1',): ("dict1",), ('key2',): ("dict1", "dict2"), ('key2', 'key21'): ("dict2",), ('key2', 'key22'): ("dict1",), ('key3',): ("dict1", "dict2"), ('key3', 'key31',): ("dict1", "dict2"), ('key3', 'key31', 'key311'): ("dict1", "dict2"), ('key3', 'key31', 'key312'): ("dict2",), ('key4',): ("dict2",), } pprint(should) assert blame == should assert merged2 == { 'key1': 11, 'key2': { 'key21': 221, 'key22': 122, }, 'key3': { 'key31': { 'key311': [1311, 2311], 'key312': 2312, }, }, 'key4': 24, } bundlewrap-3.8.0/tests/unit/pkg_openbsd.py000066400000000000000000000050561360562404000206610ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from bundlewrap.items.pkg_openbsd import parse_pkg_name from pytest import raises def test_not_found(): found, version, flavor = parse_pkg_name("rsync", "irssi-1.0.4p0-socks") assert found is False def test_only_version(): found, version, flavor = parse_pkg_name("irssi", "irssi-1.0.4p0") assert found is True assert version == "1.0.4p0" assert flavor == "" def test_version_and_flavor(): found, version, flavor = parse_pkg_name("irssi", "irssi-1.0.4p0-socks") assert found is True assert version == "1.0.4p0" assert flavor == "socks" def test_dashname_not_found(): found, version, flavor = parse_pkg_name("rsync", "cyrus-sasl-2.1.26p24-pgsql") assert found is False def test_dashname_only_version(): found, version, flavor = parse_pkg_name("cyrus-sasl", "cyrus-sasl-2.1.26p24") assert found is True assert version == "2.1.26p24" assert flavor == "" def test_dashname_version_and_flavor(): found, version, flavor = parse_pkg_name("cyrus-sasl", "cyrus-sasl-2.1.26p24-pgsql") assert found is True assert version == "2.1.26p24" assert flavor == "pgsql" def test_dashflavor_not_found(): found, version, flavor = parse_pkg_name("rsync", "vim-8.0.0987p0-gtk2-lua") assert found is False def test_dashflavor_version_and_flavor(): found, version, flavor = parse_pkg_name("vim", "vim-8.0.0987p0-gtk2-lua") assert found is True assert version == "8.0.0987p0" assert flavor == "gtk2-lua" def test_dashall_not_found(): found, version, flavor = parse_pkg_name("rsync", "graphical-vim-8.0.0987p0-gtk2-lua") assert found is False def test_dashall_not_found(): found, version, flavor = parse_pkg_name("graphical-vim", "graphical-vim-8.0.0987p0-gtk2-lua") assert found is True assert version == "8.0.0987p0" assert flavor == "gtk2-lua" def test_illegal_version_ends_with_dash(): with raises(AssertionError): parse_pkg_name("dummy", "foo-1.0-") def test_illegal_flavor_ends_with_dash(): with raises(AssertionError): parse_pkg_name("dummy", "foo-1.0-bar-") def test_illegal_no_version(): with raises(AssertionError): parse_pkg_name("dummy", "foo-bar") def test_illegal_no_name(): with raises(AssertionError): parse_pkg_name("dummy", "1.0-flavor") def test_illegal_only_version(): with raises(AssertionError): parse_pkg_name("dummy", "1.0") def test_illegal_empty_line(): with raises(AssertionError): parse_pkg_name("dummy", "") bundlewrap-3.8.0/tests/unit/utils_dicts.py000066400000000000000000000024351360562404000207120ustar00rootroot00000000000000from bundlewrap.metadata import atomic from bundlewrap.utils.dicts import map_dict_keys, reduce_dict def test_dictmap(): assert set(map_dict_keys({ 'key1': 1, 'key2': { 'key3': [3, 3, 3], 'key4': atomic([4, 4, 4]), 'key5': { 'key6': "6", }, 'key7': set((7, 7, 7)), }, })) == set([ ("key1",), ("key2",), ("key2", "key3"), ("key2", "key4"), ("key2", "key5"), ("key2", "key5", "key6"), ("key2", "key7"), ]) def test_reduce_dict_two_lists(): assert reduce_dict( [1, 2, 3], [1, 2], ) == [1, 2, 3] def test_reduce_dict_list_and_dict(): assert reduce_dict( [1, 2, 3], {'a': 4}, ) == [1, 2, 3] def test_reduce_dict_simple(): assert reduce_dict( {'a': 1, 'b': 2}, {'a': 3}, ) == {'a': 1} def test_reduce_dict_nested(): full_dict = { 'a': [{ 'b': 1, 'c': 2, }], 'd': 3, } template_dict = { 'a': [{ 'b': None, }], 'd': None, 'e': None, } assert reduce_dict(full_dict, template_dict) == { 'a': [{ 'b': 1, }], 'd': 3, } bundlewrap-3.8.0/tests/unit/utils_table.py000066400000000000000000000007451360562404000206750ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from bundlewrap.utils.table import ROW_SEPARATOR, render_table def test_render_table(): assert "\n".join(render_table([ ["head1", "h2"], ROW_SEPARATOR, ["1", "2"] ], alignments={0: 'right'})) == """ ╭───────┬────╮ │ head1 │ h2 │ ├───────┼────┤ │ 1 │ 2 │ ╰───────┴────╯ """.strip() bundlewrap-3.8.0/tests/unit/utils_text.py000066400000000000000000000026471360562404000205750ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import timedelta from bundlewrap.utils.text import ( ansi_clean, bold, format_duration, red, parse_duration, ) def test_ansi_clean(): assert red("test") != "test" assert len(red("test")) != len("test") assert ansi_clean(red("test")) == "test" assert ansi_clean(bold(red("test"))) == "test" def test_format_duration(): assert format_duration(timedelta()) == "0s" assert format_duration(timedelta(seconds=10)) == "10s" assert format_duration(timedelta(minutes=10)) == "10m" assert format_duration(timedelta(hours=10)) == "10h" assert format_duration(timedelta(days=10)) == "10d" assert format_duration(timedelta(days=1, hours=2, minutes=3, seconds=4)) == "1d 2h 3m 4s" def test_parse_duration(): assert parse_duration("0s") == timedelta() assert parse_duration("10s") == timedelta(seconds=10) assert parse_duration("10m") == timedelta(minutes=10) assert parse_duration("10h") == timedelta(hours=10) assert parse_duration("10d") == timedelta(days=10) assert parse_duration("1d 2h 3m 4s") == timedelta(days=1, hours=2, minutes=3, seconds=4) def test_parse_format_inverse(): for duration in ( "0s", "1s", "1m", "1h", "1d", "1d 4h", "1d 4h 7s", ): assert format_duration(parse_duration(duration)) == duration