pax_global_header00006660000000000000000000000064141750227430014517gustar00rootroot0000000000000052 comment=cfb498135a2c31215910690c28ae1d9d24e57c21 bundlewrap-4.13.6/000077500000000000000000000000001417502274300137555ustar00rootroot00000000000000bundlewrap-4.13.6/.deepsource.toml000066400000000000000000000002101417502274300170570ustar00rootroot00000000000000version = 1 test_patterns = ["tests/**"] [[analyzers]] name = "python" enabled = true [analyzers.meta] runtime_version = "3.x.x" bundlewrap-4.13.6/.github/000077500000000000000000000000001417502274300153155ustar00rootroot00000000000000bundlewrap-4.13.6/.github/workflows/000077500000000000000000000000001417502274300173525ustar00rootroot00000000000000bundlewrap-4.13.6/.github/workflows/tests.yml000066400000000000000000000022231417502274300212360ustar00rootroot00000000000000name: Tests on: push: branches: [ main ] pull_request: branches: [ main ] jobs: build: runs-on: ubuntu-latest strategy: matrix: python-version: - "3.6" - "3.7" - "3.8" - "3.9" - "3.10" steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install -r requirements.txt python -m pip install . - name: Set up SSH run: | ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -N "" cp ~/.ssh/id_ed25519.pub ~/.ssh/authorized_keys chmod og-rw ~ ssh -o StrictHostKeyChecking=no localhost id mkdir -p ~/.ssh/controlmasters echo -e "Host localhost\n ControlPath ~/.ssh/controlmasters/%C\n ControlPersist 1h\n ControlMaster auto" > ~/.ssh/config - name: Test with pytest run: | pytest --assert=plain tests # https://github.com/pytest-dev/pytest/issues/9174 bundlewrap-4.13.6/.gitignore000066400000000000000000000004041417502274300157430ustar00rootroot00000000000000# Please add only project-specific paths here and make use of your # global gitignore for everything else: # https://docs.github.com/en/github/using-git/ignoring-files#configuring-ignored-files-for-all-repositories-on-your-computer /build/ /dist/ /docs/build/ bundlewrap-4.13.6/AUTHORS000066400000000000000000000006121417502274300150240ustar00rootroot00000000000000# By adding your name to this file you agree to the Copyright Assignment # Agreement found in the CAA.md file in this repository. Torsten Rehn Peter Hofmann Tim Buchwaldt Rico Ullmann Christian Nicolai Galen Abell Franziska Kunsmann bundlewrap-4.13.6/CAA.md000066400000000000000000000217141417502274300146700ustar00rootroot00000000000000# BundleWrap Individual Contributor Copyright Assignment Agreement Thank you for your interest in contributing to the BundleWrap open-source project, currently owned and represented by [Torsten Rehn](mailto:torsten@rehn.email) ("We" or "Us"). This contributor agreement ("Agreement") documents the rights granted by contributors to Us. To make this document effective, please sign it and send it to Us by email or electronic submission, following the instructions at [http://docs.bundlewrap.org/misc/contributing](http://docs.bundlewrap.org/misc/contributing). This is a legally binding document, so please read it carefully before agreeing to it. The Agreement may cover more than one software project managed by Us. ## 1. Definitions "You" means the individual who Submits a Contribution to Us. "Contribution" means any work of authorship that is Submitted by You to Us in which You own or assert ownership of the Copyright. If You do not own the Copyright in the entire work of authorship, please follow the instructions in [http://docs.bundlewrap.org/misc/contributing](http://docs.bundlewrap.org/misc/contributing). "Copyright" means all rights protecting works of authorship owned or controlled by You, including copyright, moral and neighboring rights, as appropriate, for the full term of their existence including any extensions by You. "Material" means the work of authorship which is made available by Us to third parties. When this Agreement covers more than one software project, the Material means the work of authorship to which the Contribution was Submitted. After You Submit the Contribution, it may be included in the Material. "Submit" means any form of electronic, verbal, or written communication sent to Us or our representatives, including but not limited to electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, Us for the purpose of discussing and improving the Material, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution." "Submission Date" means the date on which You Submit a Contribution to Us. "Effective Date" means the date You execute this Agreement or the date You first Submit a Contribution to Us, whichever is earlier. ## 2. Grant of Rights ### 2.1 Copyright Assignment 1) At the time the Contribution is Submitted, You assign to Us all right, title, and interest worldwide in all Copyright covering the Contribution; provided that this transfer is conditioned upon compliance with Section 2.3. 2) To the extent that any of the rights in Section 2.1.1 cannot be assigned by You to Us, You grant to Us a perpetual, worldwide, exclusive, royalty-free, transferable, irrevocable license under such non-assigned rights, with rights to sublicense through multiple tiers of sublicensees, to practice such non-assigned rights, including, but not limited to, the right to reproduce, modify, display, perform and distribute the Contribution; provided that this license is conditioned upon compliance with Section 2.3. 3) To the extent that any of the rights in Section 2.1.1 can neither be assigned nor licensed by You to Us, You irrevocably waive and agree never to assert such rights against Us, any of our successors in interest, or any of our licensees, either direct or indirect; provided that this agreement not to assert is conditioned upon compliance with Section 2.3. 4) Upon such transfer of rights to Us, to the maximum extent possible, We immediately grant to You a perpetual, worldwide, non-exclusive, royalty-free, transferable, irrevocable license under such rights covering the Contribution, with rights to sublicense through multiple tiers of sublicensees, to reproduce, modify, display, perform, and distribute the Contribution. The intention of the parties is that this license will be as broad as possible and to provide You with rights as similar as possible to the owner of the rights that You transferred. This license back is limited to the Contribution and does not provide any rights to the Material. ### 2.2 Patent License For patent claims including, without limitation, method, process, and apparatus claims which You own, control or have the right to grant, now or in the future, You grant to Us a perpetual, worldwide, non-exclusive, transferable, royalty-free, irrevocable patent license, with the right to sublicense these rights to multiple tiers of sublicensees, to make, have made, use, sell, offer for sale, import and otherwise transfer the Contribution and the Contribution in combination with the Material (and portions of such combination). This license is granted only to the extent that the exercise of the licensed rights infringes such patent claims; and provided that this license is conditioned upon compliance with Section 2.3. ### 2.3 Outbound License As a condition on the grant of rights in Sections 2.1 and 2.2, We agree to license the Contribution only under the terms of the license or licenses which We are using on the Submission Date for the Material (including any rights to adopt any future version of a license if permitted). ### 2.4 Moral Rights If moral rights apply to the Contribution, to the maximum extent permitted by law, You waive and agree not to assert such moral rights against Us or our successors in interest, or any of our licensees, either direct or indirect. ### 2.5 Our Rights You acknowledge that We are not obligated to use Your Contribution as part of the Material and may decide to include any Contribution We consider appropriate. ### 2.6 Reservation of Rights Any rights not expressly assigned or licensed under this section are expressly reserved by You. ## 3. Agreement You confirm that: 1) You have the legal authority to enter into this Agreement. 2) You own the Copyright and patent claims covering the Contribution which are required to grant the rights under Section 2. 3) The grant of rights under Section 2 does not violate any grant of rights which You have made to third parties, including Your employer. If You are an employee, You have had Your employer approve this Agreement or sign the Entity version of this document. If You are less than eighteen years old, please have Your parents or guardian sign the Agreement. 4) You have followed the instructions in [http://docs.bundlewrap.org/misc/contributing](http://docs.bundlewrap.org/misc/contributing), if You do not own the Copyright in the entire work of authorship Submitted. ## 4. Disclaimer EXCEPT FOR THE EXPRESS WARRANTIES IN SECTION 3, THE CONTRIBUTION IS PROVIDED "AS IS". MORE PARTICULARLY, ALL EXPRESS OR IMPLIED WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE EXPRESSLY DISCLAIMED BY YOU TO US AND BY US TO YOU. TO THE EXTENT THAT ANY SUCH WARRANTIES CANNOT BE DISCLAIMED, SUCH WARRANTY IS LIMITED IN DURATION TO THE MINIMUM PERIOD PERMITTED BY LAW. ## 5. Consequential Damage Waiver TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU OR US BE LIABLE FOR ANY LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF DATA, INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL AND EXEMPLARY DAMAGES ARISING OUT OF THIS AGREEMENT REGARDLESS OF THE LEGAL OR EQUITABLE THEORY (CONTRACT, TORT OR OTHERWISE) UPON WHICH THE CLAIM IS BASED. ## 6. Miscellaneous ### 6.1 This Agreement will be governed by and construed in accordance with the laws of Germany excluding its conflicts of law provisions. Under certain circumstances, the governing law in this section might be superseded by the United Nations Convention on Contracts for the International Sale of Goods ("UN Convention") and the parties intend to avoid the application of the UN Convention to this Agreement and, thus, exclude the application of the UN Convention in its entirety to this Agreement. ### 6.2 This Agreement sets out the entire agreement between You and Us for Your Contributions to Us and overrides all other agreements or understandings. ### 6.3 If You or We assign the rights or obligations received through this Agreement to a third party, as a condition of the assignment, that third party must agree in writing to abide by all the rights and obligations in the Agreement. ### 6.4 The failure of either party to require performance by the other party of any provision of this Agreement in one situation shall not affect the right of a party to require such performance at any time in the future. A waiver of performance under a provision in one situation shall not be considered a waiver of the performance of the provision in the future or a waiver of the provision in its entirety. ### 6.5 If any provision of this Agreement is found void and unenforceable, such provision will be replaced to the extent possible with a provision that comes closest to the meaning of the original provision and which is enforceable. The terms and conditions set forth in this Agreement shall apply notwithstanding any failure of essential purpose of this Agreement or any limited remedy to the maximum extent possible under law. bundlewrap-4.13.6/CHANGELOG.md000066400000000000000000000707761417502274300156070ustar00rootroot00000000000000# 4.13.6 2022-01-28 * show file path on TOML parse error * improved performance of `bw test -M` * fixed `bw plot groups-for-node` not showing all memberships # 4.13.5 2022-01-15 * fixed some issues in `bw plot` * fixed `zfs_dataset` not allowing unmanaged pools * fixed `zfs_dataset` creating deps based on identical mountpoints * fixed `node.metadata` not being recognized as a Mapping # 4.13.4 2021-12-01 * fixed addressing for some routeros items * fixed occasional socket errors for routeros items # 4.13.3 2021-11-18 * fixed empty comments not returned by RouterOS API # 4.13.2 2021-11-24 * fixed concurrency issues with RouterOS * added workaround for setting `comment` on RouterOS items # 4.13.1 2021-11-10 * fixed tomlkit types leaking into nodes/metadata # 4.13.0 2021-11-05 * added support for Python 3.10 * added filtering for internal attributes in `bw items` * added `pkg_pamac` * added `svc_freebsd` * added support for arbitrary `zfs_dataset` attributes * relaxed metadata type conversion * fixed `pkg_pip` not showing error output * fixed concurrent execution of some package manager operations # 4.12.0 2021-09-22 * added `test_with` to `file` items * added `download` `content_type` to `file` items * added `bw pw` # 4.11.2 2021-08-16 * fixed detection of non-existing `zfs_pools` * fixed `config` and `ashift` attributes of `zfs_pool` items not being marked as `when_creating` * fixed `when_creating` attributes not being shown in diffs on apply # 4.11.1 2021-08-11 * fixed another premature `MetadataPersistentKeyError` # 4.11.0 2021-08-11 * added `zfs_pool` and `zfs_dataset` items * added `bw plot reactors` * added `bw lock show -i` * improved metadata reactor performance and debug logging * improved exception handling during `bw apply` * CTRL+C now results in exit code 130 * fixed directory permissions not being applied reliably with GNU `chmod` * fixed `bw test -p` not catching some invalid returns * fixed item name validation allowing empty names * fixed display of "missing" attributes * fixed display of created directories # 4.10.1 2021-07-07 * fixed a premature `MetadataPersistentKeyError` # 4.10.0 2021-07-06 * enable iteration over Faults * fixed using sets as metadata in TOML nodes # 4.9.0 2021-06-28 * added `repo.vault.cmd()` * postgres items can now be used with `doas` instead of `sudo` * improved error reporting of `git_deploy` * fixed dependencies being skipped when using `bw apply -o` * fixed user and group management on BSD # 4.8.2 2021-05-27 * fixed clobbered env vars for `git_deploy` * fixed `pkg_pip` failing with underscores in package names # 4.8.1 2021-05-19 * improved exception reporting for `bw verify` and `git_deploy` * fixed metadata source attribution in `bw metadata -b` * fixed `AttributeError` in `bw diff -i -b` * fixed late detection of duplicate items * fixed `bw diff` not showing anything useful for single nodes * fixed and optimized checking order of item skip conditions * fixed garbled output in files produced by `BW_DEBUG_LOG_DIR` # 4.8.0 2021-05-02 * added support for RouterOS * fixed k8s objects not being retrieved with the correct apiVersion # 4.7.1 2021-03-29 * fixed `bw lock show` hiding output if it failed to connect to any host * fixed `bw test -m` not handling cdict mismatches properly # 4.7.0 2021-03-24 * added `skip` item attribute * added `before` and `after` soft dependencies * added `--only` and `--skip` to `bw verify` * improved `bw plot node` to properly show all 7 types of item dependencies * fixed metadata reactors being able to corrupt metadata in some cases # 4.6.0 2021-02-25 * added `pkg_apk` and `svc_openrc` items * actions can now be inspected with `bw items` * `bw test -d` now shows a diff for config changes * fixed display of strings in `bw items --attrs` # 4.5.1 2021-02-19 * fixed actions that set `None` as `expected_return_code` # 4.5.0 2021-02-19 * added diffs to the default output of `bw apply` and `bw verify` * added `bw apply --no-diff` * added `bw verify --no-diff` * added `pkg_freebsd` * added canned `stop` actions for services * added `masked` attribute for `svc_systemd` * added multiple expected return codes for actions * improved error message for incompatible types in diff * fixed group management on FreeBSD * fixed types from tomlkit not being diffable * fixed using Faults for user password salts * fixed `bw repo create` clobbering existing repos # 4.4.2 2021-01-22 * full tracebacks are now shown by default for exceptions in file templates * fixed a `RuntimeError` related to a metadata concurrency issue # 4.4.1 2021-01-20 * fixed `bw test -p` quietness * fixed dependency loop detection between empty tags * fixed missing dict methods on `node.metadata` # 4.4.0 2021-01-20 * added `Fault.as_htpasswd_entry()` * added tag inheritance through `bundle.py` * optimized performance of metadata generation based on `@metadata_reactor.provides()` * fixed `TypeError` in `bw plot` * fixed `needs` from tags not being applied to items * fixed unused tags not passing on their dependencies * removed experimental metadata caching # 4.3.0 2020-12-23 * added support for Python 3.9 * added supergroups as a reverse direction for the existing subgroups * added `bundle.py` * added metadata caching (EXPERIMENTAL) * added `@metadata_reactor.provides()` (EXPERIMENTAL) * reworked item selectors * sorted summary table for `bw apply` * fixed handling of k8s apiVersions * fixed canned actions not being skipped if their parent item is skipped * pipe output to `less` if there are too many lines # 4.2.2 2020-10-30 * fixed tomlkit types not being accepted as statedict values # 4.2.1 2020-10-15 * fixed unintended Fault evaluation in metadata collision error message * fixed sorting of Faults with other types * fixed display of paged output on large macOS terminals * fixed svc_openbsd being applied concurrently * fixed services being reloaded and restarted at the same time * fixed possible mangling of group metadata from items.py # 4.2.0 2020-09-21 * added `BW_GIT_DEPLOY_CACHE` * added `lock_dir` node attribute * added `pip_command` node attribute * Fault callbacks can now accept some unhashable parameters (such as dicts) # 4.1.1 2020-08-12 * improved reporting of invalid types in metadata * improved error output of `bw test -m` * fixed recognition of JSON files as text * fixed a rare case of nodes not having their metadata built to completion * fixed a column sorting issue in `bw nodes` # 4.1.0 2020-07-27 * added `bw test --quiet` * `apply_start` hook can now raise GracefulApplyException * performance improvements in metadata generation * improved reporting of persistent metadata KeyErrors * clashing metadata keys are now allowed for equal values * git_deploy: fixed attempted shallow clones over HTTP * k8s: improved handling of absent `apiVersion` * fixed `cascade_skip` not affecting recursively skipped items * fixed `bw metadata -b -k` * fixed metadata reactors seeing their own previous results * fixed SCM information being returned as bytes # 4.0.0 2020-06-22 * new metadata processor API (BACKWARDS INCOMPATIBLE) * removed `template_node` node attribute (BACKWARDS INCOMPATIBLE) * removed support for Python 2.7 (BACKWARDS INCOMPATIBLE) * removed support for Python 3.4 (BACKWARDS INCOMPATIBLE) * removed support for Python 3.5 (BACKWARDS INCOMPATIBLE) * removed `members_add/remove` attribute for groups (BACKWARDS INCOMPATIBLE) * removed `bw --adhoc-nodes` (BACKWARDS INCOMPATIBLE) * added `locking_node` node attribute * added `bw diff` * added `bw metadata -b` * added `bw metadata --hide-defaults` * added `bw metadata --hide-reactors` * added `bw metadata --hide-groups` * added `bw metadata --hide-node` * added `git_deploy` items (formerly a plugin) * added paging and color-coding for metadata sources to `bw metadata` * removed `bw metadata --table`, now done automatically (BACKWARDS INCOMPATIBLE) * removed `bw repo plugin` (BACKWARDS INCOMPATIBLE) * removed `bw test --secret-rotation` (BACKWARDS INCOMPATIBLE) * renamed `bw test --metadata-collisions` to `bw test --metadata-conflicts` (BACKWARDS INCOMPATIBLE) * reworked passing multi-value options on CLI (BACKWARDS INCOMPATIBLE) * `bw apply` will now exit with return code 1 if even a single item fails * `items/` is now searched recursively * failed items will now show what commands they ran and what their output was # 3.9.0 2020-05-04 * added lambda expressions for CLI node selection * added `groups` attribute to nodes * added support for Python 3.8 * k8s: bumped `apiVersion` where appropriate * fixed handling of `apiVersion` and `status` * fixed KeyError on k8s item collision # 3.8.0 2020-01-09 * `k8s_raw`: added support for items without a namespace * `k8s_raw`: fixed overriding resource name in YAML * `k8s_raw`: allow using builtin item types if there are no actual conflicts * decryption keys can now be set within encrypted files * improved detection of incorrect metadata processor usage * fixed excessive skipping of items because of concurrency dependencies * fixed `preceded_by` not working for actions # 3.7.0 2019-10-07 * Faults are now accepted as item attribute values * Filter objects, iterators and such can now be used as item attribute values * `BW_VAULT_DUMMY_MODE` will now yield dummy passwords of requested length * added `repo.vault.random_bytes_as_base64_for()` # 3.6.2 2019-07-25 * fixed `None` not being accepted as a file/directory mode * fixed overriding resource name in k8s manifests # 3.6.1 2019-03-12 * Faults can now be sorted * fixed detection of runtime-enabled `svc_systemd` * fixed resolving nested Faults # 3.6.0 2019-02-27 * added `bw apply --only` * added `Fault.b64encode()` * added support for using Faults in k8s manifests * improved display of some skipped items * improved error handling during `bw apply` * improved handling of offline nodes in `bw verify` * fixed corrupted hard lock warning * fix interactively overwriting symlinks/dirs # 3.5.3 2018-12-27 * added error message when trying to access node bundles from `members_add/remove` * improved performance for file verification * fixed symlinks being mistaken for directories in some circumstances # 3.5.2 2018-12-11 * fixed IO activation/deactivation when using bw as a library * fixed `atomic()` being removed prematurely during metadata processing # 3.5.1 2018-07-08 * added support for Python 3.7 * fixed merged metadata not overwriting atomic() values # 3.5.0 2018-06-12 * added `template_node` node attribute * actions are now included in `bw verify` * improved error message for KeyErrors in Mako templates * fixed hashing for filenames with escaped characters * fixed AttributeError when reverse-depending on `bundle:` items # 3.4.0 2018-05-02 * added k8s_clusterrole items * added k8s_clusterrolebinding items * added k8s_crd items * added k8s_networkpolicy items * added k8s_raw items * added k8s_role items * added k8s_rolebinding items * added Kubernetes item preview with `bw items -f` * improved handling of exceptions during `bw verify` and `bw apply` * improved progress display during `bw run` # 3.3.0 2018-03-09 * added experimental support for Kubernetes * some hooks can now raise an exception to skip nodes * fixed ED25519 public keys not being recognized as text files * fixed package names with hyphens for pkg_openbsd * fixed diff for user groups # 3.2.1 2018-01-08 * fixed metadata key filter for `bw metadata --blame` * fixed pkg_openbsd reported incorrectly as having having wrong flavor installed * fixed crash when declining actions interactively # 3.2.0 2018-01-01 * items skipped because of "unless" or "not triggered" are no longer shown during `bw apply` * added `BW_SCP_ARGS` * added `bw metadata --blame` * added `bw test --metadata-keys` * added flavor support to pkg_openbsd * fixed changing symlink targets if previous target is a dir * fixed display of some item attributes during `bw apply` and `bw verify` * fixed handling of postgres DBs/roles with hyphens in them # 3.1.1 2017-10-24 * will now detect bad wrappers around metadata processors * fixed crash in `bw plot` * fixed cut off status lines # 3.1.0 2017-10-10 * added pkg_opkg items * added `bw test -s` * improved error messages for unknown reverse triggers * fixed hash_method md5 on user items * fixed cursor sometimes not being restored # 3.0.3 2017-10-04 * dropped support for Python 3.3 * fixed `bw` trying to hide the cursor without a TTY present * fixed `ImportError` with Python 2.7 # 3.0.2 2017-10-04 * improved status line * `bw test` is now more responsive to SIGINT * sorted bundle and group lists in `bw nodes` output * fixed an issue with symlinks failing if fixing both target and ownership * fixed `bw run` with dummy nodes * fixed progress exceeding 100% during `bw apply` * fixed progress intermittently being stuck at 100% during `bw test` * fixed incorrent display of fixed item properties * fixed `bw metadata --table` being unable to show None * fixed `bw metadata` hiding KeyErrors # 3.0.1 2017-09-25 * fixed `bw run` * fixed `bw test -e` # 3.0.0 2017-09-24 * new metadata processor API and options (BACKWARDS INCOMPATIBLE) * files, directories, and symlinks now have defaults for owner, group, and mode (BACKWARDS INCOMPATIBLE) * overhauled options and output of `bw groups` (BACKWARDS INCOMPATIBLE) * overhauled options and output of `bw nodes` (BACKWARDS INCOMPATIBLE) * overhauled options and output of `bw run` (BACKWARDS INCOMPATIBLE) * overhauled options of `bw test` (BACKWARDS INCOMPATIBLE) * svc_systemd services are now 'enabled' by default (BACKWARDS INCOMPATIBLE) * `bw items --file-preview` no longer uses a separate file path argument (BACKWARDS INCOMPATIBLE) * removed `bw apply --profiling` (BACKWARDS INCOMPATIBLE) * removed `Item.display_keys()` (BACKWARDS INCOMPATIBLE) * changed return value of `Item.display_dicts()` (BACKWARDS INCOMPATIBLE) * changed `Item.BLOCK_CONCURRENT` into a class method (BACKWARDS INCOMPATIBLE) * removed `repo.vault.format()` (BACKWARDS INCOMPATIBLE) * removed env vars: BWADDHOSTKEYS, BWCOLORS, BWITEMWORKERS, BWNODEWORKERS (BACKWARDS INCOMPATIBLE) # 2.20.1 2017-09-21 * improved performance of metadata processors * pkg_* and svc_* items no longer throw exceptions when their commands fail * fixed BW_DEBUG_LOG_DIR with `bw debug` * fixed 'precedes' attribute for actions # 2.20.0 2017-08-15 * added progress info shown on SIGQUIT (CTRL+\\) * added pkg_snap items * fixed checking for dummy nodes during `bw lock` * fixed handling of missing Faults for actions * fixed handling of missing Faults for `bw items -w` # 2.19.0 2017-07-05 * actions can now receive data over stdin * added `Node.magic_number` * added `bw apply --resume-file` * added hooks for `bw lock` * added `bw metadata --table` # 2.18.1 2017-06-01 * fixed display of comments for actions # 2.18.0 2017-05-22 * added encoding and collation to postgres_db items * added the 'comment' attribute for all items * fixed group deletion * fixed accidental modification of lists in statedicts # 2.17.1 2017-04-19 * fixed parent groups not being removed by subgroups' members_remove * fixed `bw lock` trying to connect to dummy nodes # 2.17.0 2017-03-26 * pkg_apt: added start_service attribute * pkg_apt: added support for multiarch packages * improved reporting of exceptions in metadata processors * fixed package cache leaking across nodes # 2.16.0 2017-02-23 * added `BW_TABLE_STYLE` * added more Unicode tables * added number of bundles and metadata processors to `bw stats` * added oraclelinux to `OS_FAMILY_REDHAT` * added option to ignore running status of systemd services * improved circular dependency debugging * improved reporting of dependency errors * fixed avoidance of circular dependencies * fixed dealing with SUID and SGID on directories * fixed debug logging on Python 2.7 * fixed duplicates in `Group.subgroups` * fixed handling of subgroup patterns in `bw plot group` # 2.15.0 2017-01-19 * added item and attribute arguments to `bw items` * added orphaned bundle warnings to `bw test` * fixed regression when removing soft locks # 2.14.0 2017-01-16 * added key filtering to `bw metadata` * added `repo.vault.human_password_for()` * added `BW_REPO_PATH` and `bw --repo-path` * quotes are no longer required around commands with `bw run` * fixed intermittent circular dependencies with multiple custom items using BLOCK_CONCURRENT * fixed exception when removing non-existent soft lock # 2.13.0 2017-01-05 * added tuple return option to metadata processors * improved CLI output in various places * improved performance during dependency processing * improved performance when checking packages * fixed hashing of metadata containing sets * fixed exception with `svc_upstart` when service doesn't exist # 2.12.2 2016-12-23 * added support for Python 3.6 * changed diff line length limit from 128 to 1024 characters * fixed deadlock in Group.members_remove * fixed unknown subgroups not being detected properly # 2.12.1 2016-12-20 * fixed exception when changing owner of postgres databases * fixed postgres roles requiring a password even when deleted * fixed incorrect exit codes in some situations with `bw test` # 2.12.0 2016-11-28 * added `BW_DEBUG_LOG_DIR` * improved reporting of action failures * fixed `bw plot groups` and `bw plot groups-for-node` * fixed access to partial metadata in `Group.members_add` and `_remove` # 2.11.0 2016-11-14 * added `bw nodes --inline` * added `Group.members_add` and `.members_remove` * fixed symlinks not overwriting other path types * fixed `precedes` and `triggers` for bundle, tag and type items * fixed diffs for sets and tuples # 2.10.0 2016-11-03 * added pkg_dnf items * added rudimentary string operations on Faults * added Fault documentation * added `bw test --config-determinism` and `--metadata-determinism` * improved debugging facilities for metadata processor loops * improved handling and reporting of missing Faults # 2.9.1 2016-10-18 * fixed `bw verify` without `-S` * fixed asking for changes to directory items # 2.9.0 2016-10-17 * added directory purging * added `bw --adhoc-nodes` * improve handling of unknown nodes/groups * improvements to `bw nodes` # 2.8.0 2016-09-12 * added `BW_HARDLOCK_EXPIRY` env var * added `bw hash --group` * added `subgroup_patterns` * added `bw test --ignore-missing-faults` * added `node.cmd_wrapper_inner` and `_outer` * added `node.os_version` * fixed exception handling under Python 2 * fixed partial metadata not being completed in some cases # 2.7.1 2016-07-15 * improved responsiveness to SIGINT during metadata generation * fixed SIGINT handling on Python 2.7 # 2.7.0 2016-07-15 * `bw lock show` can now show entire groups * `bw` can now be invoked from any subdirectory of a repository * added `bw hash --metadata` * added `bw nodes --attrs` * added `repo.vault.format` * added graceful handling of SIGINT * added log level indicator to debug output * added `node.dummy` attribute * added `BW_SSH_ARGS` environment variable * `bash` is no longer required on nodes * `node.os` and `node.use_shadow_passwords` can now be set at the group level * sets are now allowed in metadata * optimized execution of metadata processors * fixed `bw apply --force` with unlocked nodes * fixed `bw test` not detecting merge of lists in unrelated groups' metadata * fixed installation of some pkg_openbsd * fixed piping into `bw apply -i` * fixed handling user names with non-ASCII characters * fixed skipped and failed items sometimes being handled incorrectly * fixed error with autoskipped triggered items * fixed skip reason for some soft locked items # 2.6.1 2016-05-29 * fixed accidentally changed default salt for user items # 2.6.0 2016-05-29 * added support for OpenBSD packages and services * added soft locking mechanism * added `enabled` option for `svc_systemd` * fixed running compound commands # 2.5.2 2016-05-04 * fixed compatibility with some exotic node shells * fixed quitting at question prompts * fixed creating files with content_type 'any' # 2.5.1 2016-04-07 * fixed false positive on metadata collision check # 2.5.0 2016-04-04 * improved performance and memory usage * added metadata conflict detection to `bw test` * added metadata type validation * added `BW_VAULT_DUMMY_MODE` * added q(uit) option to questions * output disabled by default when using as a library * fixed `bw hash -d` * fixed excessive numbers of open files * fixed partial metadata access from metadata processors # 2.4.0 2016-03-20 * added `bw plot group` * added `bw plot groups-for-node` * `bw` will now check requirements.txt in your repo before doing anything * improved output of `--help` * metadata processors now have access to partial node metadata while it is being compiled * fixed `bw test` when using more than the default number of node workers * fixed passing Faults to `postgres_role` and `users` * fixed detection of non-existent paths on CentOS and others # 2.3.1 2016-03-15 * fixed handling of 'generate' keys for `repo.vault` # 2.3.0 2016-03-15 * added `repo.vault` for handling secrets * circular dependencies are now detected by `bw test` * fixed handling of broken pipes in internal subprocesses * fixed previous input being read when asking a question * fixed reading non-ASCII templates on systems with ASCII locale * `bw apply` and `bw verify` now exit with return code 1 if there are errors # 2.2.0 2016-03-02 * added item tagging * added `bw apply --skip` * fixed newline warning on long diff files * fixed calling `bw` without arguments # 2.1.0 2016-02-25 * added `bw stats` * added `bw items --file-preview` * added hooks for `bw test` * reason for skipping an item is now displayed in regular output * fixed exception handling for invalid cdicts/sdicts * fixed handling of SSH errors * fixed broken diffs caused by partial file downloads * fixed interactive prompts sometimes not reading input correctly # 2.0.1 2016-02-22 * fixed display of failed actions * updated display of interactive lock override prompt * improved robustness of internal output subsystem # 2.0.0 2016-02-22 * added support for Python 3.3+ * switched from Fabric/Paramiko to OpenSSH * removed SSH and sudo passwords **(BACKWARDS INCOMPATIBLE)** * metadata is now merged recursively **(BACKWARDS INCOMPATIBLE)** * file items: the source attribute now has a default **(BACKWARDS INCOMPATIBLE)** * file items: the default content_type is now text **(BACKWARDS INCOMPATIBLE)** * reworked command line options for `bw verify` **(BACKWARDS INCOMPATIBLE)** * `cascade_skip` now defaults to `False` if the item is triggered or uses `unless` **(BACKWARDS INCOMPATIBLE)** * `bw verify` and `bw apply` now show incorrect/fixed/failed attributes * `bw apply` now uses a status line to show current activity * generally improved output formatting # 1.6.0 2016-02-22 * added `bw migrate` **(will be removed in 2.0.0)** * added warnings for upgrading to 2.0.0 **(will be removed in 2.0.0)** # 1.5.1 2015-06-11 * clean up local lock files * fixed detection of some types of directories * fixed exception spam when trying to load internal attributes as libs # 1.5.0 2015-05-10 * added postgres_db and postgres_role items * added `bw verify --only-needs-fixing` * added `bw verify --summary` * added `Repository.nodes_in_group()` * added `verify_with` attribute for file items * libs now have access to `repo_path` * user items: fixed asking for password hash change * file items: fixed `bw items -w` with `content_type: 'any'` * improved various error messages # 1.4.0 2015-03-02 * added virtualenv support for pkg_pip * added reverse syntax for triggers and preceded_by * lots of fixes and internal improvements around preceded_by # 1.3.0 2014-12-31 * added pkg_pip items * added pkg_yum items * added pkg_zypper items * added preceded_by item attribute * fixed detection of non-existing files on CentOS/RHEL * fixed detection of special files on Arch Linux * fixed handling UTF-8 output of failed commands # 1.2.2 2014-10-27 * fixed item classes not being restored after repo serialization # 1.2.1 2014-10-21 * fixed a critical bug in bundle serialization # 1.2.0 2014-10-19 * added item generators * added `bw test --plugin-conflict-error` * added `bw debug -c` * improved unicode handling * fixed logging issues # 1.1.0 2014-08-11 * added metadata processors * added `bw metadata` * added `bw apply --profiling` * added Repository.nodes_in_all_groups() * added Repository.nodes_in_any_group() * added the data subdirectory * improved various error messages # 1.0.0 2014-07-19 * API will now remain stable until 2.0.0 * added hooks for actions * added support for Jinja2 templates * fixed some CLI commands not terminating correctly # 0.14.0 2014-07-13 * files, directories and symlinks don't care about ownership and mode by default **(BACKWARDS INCOMPATIBLE)** * Mako file templates can now use include # 0.13.0 2014-06-19 * added password-based SSH/sudo authentication * fixed symlink items not checking existing link targets * fixed exception when triggering skipped items * output is now prefixed with `node:bundle:item_type:item_name` * `bw repo debug` is now a top-level command **(BACKWARDS INCOMPATIBLE)** * `bw repo plot` is now a top-level command **(BACKWARDS INCOMPATIBLE)** * `bw repo test` is now a top-level command **(BACKWARDS INCOMPATIBLE)** # 0.12.0 2014-05-11 * added plugins * added group metadata * user and group attributes are now optional * user groups may no longer contain primary group **(BACKWARDS INCOMPATIBLE)** * improvements to logging and output * fixed a critical bug preventing per-node customization of bundles * fixed pkg_apt choking on interactive dpkg prompts * fixed hashing of plaintext user passwords without salt # 0.11.2 2014-04-02 * packaging fixes only # 0.11.1 2014-04-02 * packaging fixes only # 0.11.0 2014-03-23 * renamed builtin item attribute 'depends' to 'needs' **(BACKWARDS INCOMPATIBLE)** * removed PARALLEL_APPLY on custom items in favor of BLOCK_CONCURRENT **(BACKWARDS INCOMPATIBLE)** * added builtin item attribute 'needed_by' * added canned actions for services * added deletion of files, groups and users * simplified output of `bw apply` * `bw repo test` now also verifies dependencies * fixed `bw repo test` for files without a template * fixed triggered actions being run every time * various fixes and improvements around dependency handling # 0.10.0 2014-03-08 * removed the 'timing' attribute on actions **(BACKWARDS INCOMPATIBLE)** * actions are now first-class items * items can now trigger each other (most useful with actions) * added System V service item * added `bw repo test` * added negated bundle and group selectors to CLI * can now manage files while ignoring their content * more control over how actions are run in interactive mode * bundles can now be assigned to nodes directly * fixed creating symlinks in nonexistent unmanaged directories # 0.9.0 2014-02-24 * added 'unless' for actions * improved exception handling * fixed actions not triggering in noninteractive mode * fixed noninteractive installation of Debian packages * slightly more verbose output # 0.8.0 2014-02-21 * move from Alpha into Beta stage * added builtin item attribute 'unless' * added lightweight git/hg/bzr integration * added -f switch to `bw apply` * template context can now be customized * added Node.has_bundle, .in_group etc. * fixed a LineBuffer bug * prevented output of some extraneous whitespace # 0.7.0 2014-02-16 * added safety checks to prevent diffs of unwieldy files * added a "text" content type for files * added support for arbitrary encodings in managed files * addes systemd and Upstart service items * added hooks * added action triggers (for service restarts after config changes) * lots of new documentation * better error messages when defining duplicate items * better dependencies between files, directories and symlinks * fixed a bug that prevented managing /etc/sudoers # 0.6.0 2014-01-01 * added actions * reworked group patterns **(BACKWARDS INCOMPATIBLE)** * reworked output verbosity **(BACKWARDS INCOMPATIBLE)** * added support for libs directory * fixed high CPU load while waiting for interactive response * various other minor fixes and improvements # 0.5.0 2013-11-09 * manage users and groups * manage symlinks * node locking * PARALLEL_APPLY setting for items * manage Arch Linux packages * plot item dependencies * encoding fixes for file handling # 0.4.0 2013-08-25 * manage directories * manage Debian packages * UI improvements # 0.3.0 2013-08-04 * basic file management * concurrency improvements * logging/output improvements * use Fabric for remote operations * lots of other small improvements # 0.2.0 2013-07-12 * bundle management * item APIs * new concurrency helpers # 0.1.0 2013-06-16 * initial release * node and group management * running commands on nodes bundlewrap-4.13.6/CONTRIBUTING.md000066400000000000000000000001251417502274300162040ustar00rootroot00000000000000Please see [the docs on contributing](http://docs.bundlewrap.org/misc/contributing). bundlewrap-4.13.6/LICENSE000066400000000000000000001045131417502274300147660ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . bundlewrap-4.13.6/MANIFEST.in000066400000000000000000000000571417502274300155150ustar00rootroot00000000000000include AUTHORS CHANGELOG.md LICENSE README.md bundlewrap-4.13.6/README.md000066400000000000000000000025051417502274300152360ustar00rootroot00000000000000BundleWrap is a decentralized configuration management system that is designed to be powerful, easy to extend and extremely versatile. For more information, have a look at [bundlewrap.org](http://bundlewrap.org) and [docs.bundlewrap.org](http://docs.bundlewrap.org). ------------------------------------------------------------------------ Latest Version   Build status   Python compatibility   DeepSource ------------------------------------------------------------------------ Donations appreciated: Bitcoin `13AJYksqncZromPF8HvDUXsmHChAm3Y7W7` Ethereum `0x5Eb3037e197d3C0d2E014bcfC2e027EB0AD42812` ------------------------------------------------------------------------ BundleWrap is © 2013 - 2021 [Torsten Rehn](mailto:torsten@rehn.email) bundlewrap-4.13.6/assets/000077500000000000000000000000001417502274300152575ustar00rootroot00000000000000bundlewrap-4.13.6/assets/icon.psd000066400000000000000000061511001417502274300167230ustar00rootroot000000000000008BPSo8BIMZ%G8BIM%}Ǿ pvN8BIM$< Adobe Photoshop CC (Macintosh) 2014-08-01T20:06:57+02:00 2017-12-11T10:25:56+01:00 2017-12-11T10:25:56+01:00 application/vnd.adobe.photoshop 3 sRGB IEC61966-2.1 bundlewrap.org bundlewrap.org xmp.did:01801174072068118C14E14EABABC3D8 xmp.iid:fc65bfbe-54dc-4678-b0ca-c63c73e1510e xmp.did:14291d37-72d9-4bdc-aba4-f5f491df7a76 xmp.did:14291d37-72d9-4bdc-aba4-f5f491df7a76 created xmp.iid:14291d37-72d9-4bdc-aba4-f5f491df7a76 2014-08-01T20:06:57+02:00 Adobe Photoshop CC 2014 (Macintosh) saved xmp.iid:392fa2c7-a547-4319-842b-de5aaa656f54 2014-08-01T20:24:10+02:00 Adobe Photoshop CC 2014 (Macintosh) / saved xmp.iid:fc65bfbe-54dc-4678-b0ca-c63c73e1510e 2017-12-11T10:25:56+01:00 Adobe Photoshop CC (Macintosh) / 8BIM: printOutputPstSboolInteenumInteClrmprintSixteenBitbool printerNameTEXT Phaser7400DNprintProofSetupObjc Proof Setup proofSetupBltnenum builtinProof proofCMYK8BIM;-printOutputOptionsCptnboolClbrboolRgsMboolCrnCboolCntCboolLblsboolNgtvboolEmlDboolIntrboolBckgObjcRGBCRd doub@oGrn doub@oBl doub@oBrdTUntF#RltBld UntF#RltRsltUntF#Pxl@r vectorDataboolPgPsenumPgPsPgPCLeftUntF#RltTop UntF#RltScl UntF#Prc@YcropWhenPrintingboolcropRectBottomlong cropRectLeftlong cropRectRightlong cropRectToplong8BIM,,8BIM&?8BIM 8BIM x8BIM8BIM 8BIM' 8BIMH/fflff/ff2Z5-8BIMp8BIM8BIM 8BIM08BIM-8BIM$@@&fٚ&fٚ8BIM8BIMnullbaseNameTEXTUserboundsObjcRct1Top longLeftlongBtomlongRghtlongslicesVlLsObjcslicesliceIDlonggroupIDlongoriginenum ESliceOrigin autoGeneratedTypeenum ESliceTypeImg boundsObjcRct1Top longLeftlongBtomlongRghtlongurlTEXTnullTEXTMsgeTEXTaltTagTEXTcellTextIsHTMLboolcellTextTEXT horzAlignenumESliceHorzAligndefault vertAlignenumESliceVertAligndefault bgColorTypeenumESliceBGColorTypeNone topOutsetlong leftOutsetlong bottomOutsetlong rightOutsetlong8BIM( ?8BIM H HLinomntrRGB XYZ  1acspMSFTIEC sRGB-HP cprtP3desclwtptbkptrXYZgXYZ,bXYZ@dmndTpdmddvuedLview$lumimeas $tech0 rTRC< gTRC< bTRC< textCopyright (c) 1998 Hewlett-Packard CompanydescsRGB IEC61966-2.1sRGB IEC61966-2.1XYZ QXYZ XYZ o8XYZ bXYZ $descIEC http://www.iec.chIEC http://www.iec.chdesc.IEC 61966-2.1 Default RGB colour space - sRGB.IEC 61966-2.1 Default RGB colour space - sRGBdesc,Reference Viewing Condition in IEC61966-2.1,Reference Viewing Condition in IEC61966-2.1view_. \XYZ L VPWmeassig CRT curv #(-27;@EJOTY^chmrw| %+28>ELRY`gnu| &/8AKT]gqz !-8COZfr~ -;HUcq~ +:IXgw'7HYj{+=Oat 2FZn  % : O d y  ' = T j " 9 Q i  * C \ u & @ Z t .Id %A^z &Ca~1Om&Ed#Cc'Ij4Vx&IlAe@e Ek*Qw;c*R{Gp@j>i  A l !!H!u!!!"'"U"""# #8#f###$$M$|$$% %8%h%%%&'&W&&&''I'z''( (?(q(())8)k))**5*h**++6+i++,,9,n,,- -A-v--..L.../$/Z///050l0011J1112*2c223 3F3334+4e4455M555676r667$7`7788P8899B999:6:t::;-;k;;<' >`>>?!?a??@#@d@@A)AjAAB0BrBBC:C}CDDGDDEEUEEF"FgFFG5G{GHHKHHIIcIIJ7J}JK KSKKL*LrLMMJMMN%NnNOOIOOP'PqPQQPQQR1R|RSS_SSTBTTU(UuUVV\VVWDWWX/X}XYYiYZZVZZ[E[[\5\\]']x]^^l^__a_``W``aOaabIbbcCccd@dde=eef=ffg=ggh?hhiCiijHjjkOkklWlmm`mnnknooxop+ppq:qqrKrss]sttptu(uuv>vvwVwxxnxy*yyzFz{{c{|!||}A}~~b~#G k͂0WGrׇ;iΉ3dʋ0cʍ1fΏ6n֑?zM _ɖ4 uL$h՛BdҞ@iءG&vVǥ8nRĩ7u\ЭD-u`ֲK³8%yhYѹJº;.! zpg_XQKFAǿ=ȼ:ɹ8ʷ6˶5̵5͵6ζ7ϸ9к<Ѿ?DINU\dlvۀ܊ݖޢ)߯6DScs 2F[p(@Xr4Pm8Ww)Km8BIM8BIM , Adobe_CMAdobed            " ?   3!1AQa"q2B#$Rb34rC%Scs5&DTdE£t6UeuF'Vfv7GWgw5!1AQaq"2B#R3$brCScs4%&5DTdEU6teuFVfv'7GWgw ?I%)$IJI$RI$I%)$IJI$RI$I%)$IOI%)$IJI_] ff[\~NUьemuK]좯+I$$I)I$JRI$I$$I)I$JI%)Z8?N}"R7m^ڷoٿUV?Uto?IOuz>zm~58j~~|T_>/SbJRI$I$$I)I$JRI$I$$I)I%)jSWF~⫣_ZJ}w%:O_6/>ӽOKYGu{Kww/zB]Yz~ Ycо龧e礒I)I+?,p0)vFVCSy'ֱc/r-E颾d]um#+/iS';GR^2:\WtYjCKKomDTS.g֜Rt9#~Ϗ?J~s Vg?+3`Sh> u$T־zt~T*ON}n]$$ )I/`ƌZUǎ˲no0.h{*k?&_NſYR~bzunolcjdfoӿWڒIJmqq hcdWXmu nI%)jSWF~⫣_ZJ~1}6kXִrֵ#g><\)_Sq/-e7TO?G'==y?a^-fc<OVO?%CEoS'\;nN6U_Yeyu_Y?N7SoW_/oS/ȯI[oOt.3>0KϣG!5*Swu.ԬJNk_CSEٲYZ3Ngf:Uˏ?gu} VOo#>qz NNED6Ξ5k[͟{,9Ft }.KlIM >V M쪦pSc5~ӪYp݊}eo֏zZ; ^ F鍎'2noMtM}G : +t#ww+[rSt/ctW\Nyb1\cQ1]#v=/ZdQ)߿7OO_^au<4}dGѥߧ{ mIJ^lcGhioc?Wgusu7}occ9gZovm$Zߢ7 $$I)I$JI%)h[ȧKx31p2=wZrI)+MlRSڏWΩ$7-E/7-E|J~9PCA$ 鯵n/7VnqqIԟd9g uI)JN936 ]FU] ϵ{}VeUIOs`ͱ?_dz!/~דusu7}oc쭈 $$I)I$JRI$I%)YNͯfA CK7V7Yź,kLZs$kǀ!洨lvvn'WQk_RnE>K\u^YBXPk[v5tߵ({Y[*[k=߿G_Oi) _S.]yy cnF>5bֹv˧oL?JԌlw;x& [nk3/蕜l:^[ꘙ-xuiʱcGC~%+mǾ:ּ 6?7$WcKZHyWioDY**;sG#cغ\11pMqvU6]ڝld>цmnCNUkkYꬣտ{k߿"ڶUSV>E暟`nycK~ߢ'aRs-uc;iׯJ?Bچe޷ھɷ?JpǾuonn?ߚpŬgh]{sWIqlgS5XNjMᕰ-vGw?X5*}?:ۙԩo2n5}jUߤ{+$ }osǴÚZYV'mch;VBJRI$I%)$IJI$RI$I%)$IIEǺƳ{ckCI$lLܜ+M-kier I).VVF]ɱ: c!$JRI$I%)$IJI$RI$I%)$IJI$RI$I%)$IO8BIM!SAdobe PhotoshopAdobe Photoshop CC8BIM"2MM*bj(1r2i-'-'Adobe Photoshop CC (Macintosh)2017:12:11 10:25:56"*(2HH8BIMmoptLTargetSettingsMttCObjc NativeQuadBl longGrn longRd longTrnsboolembedICCProfilebool fileFormatenum FileFormatPNG24 interlacedbool noMatteColorbooltransparencyDitherAlgorithmenumDitherAlgorithmNonetransparencyDitherAmountlong8BIMmsetnullHTMLBackgroundSettingsObjcnullBackgroundColorBluelongBackgroundColorGreenlongBackgroundColorRedlongBackgroundColorStatelongBackgroundImagePathTEXTUseImageAsBackgroundbool HTMLSettingsObjcnullAlwaysAddAltAttributebool AttributeCaselong CloseAllTagsboolEncodinglongFileSavingSettingsObjcnull CopyBackgroundboolDuplicateFileNameBehaviorlongHtmlFileNameComponentsVlLslonglonglonglonglonglongImageSubfolderNameTEXTimagesNameCompatibilityObjcnull NameCompatMacboolNameCompatUNIXboolNameCompatWindowsboolOutputMultipleFilesboolSavingFileNameComponentsVlLs longlonglonglonglonglonglonglonglongSliceFileNameComponentsVlLslonglonglonglonglonglongUseImageSubfolderboolUseLongExtensionsboolGoLiveCompatibleboolImageMapLocationlong ImageMapTypelongIncludeCommentsboolIncludeZeroMarginsboolIndentlong LineEndingslong OutputXHTMLboolQuoteAllAttributesboolSpacersEmptyCellslongSpacersHorizontallongSpacersVerticallong StylesFormatlong TDWidthHeightlongTagCaselongUseCSSboolUseLongHTMLExtensionboolMetadataOutputSettingsObjcnull AddCustomIRboolAddEXIFboolAddXMPboolAddXMPSourceFileURIbool ColorPolicylongMetadataPolicylongWriteMinimalXMPboolWriteXMPToSidecarFilesboolVersionlong8BIMms4w"L8BIMnorm D( Background8BIMluni Background8BIMlnsrbgnd8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf 8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ v8BIMfxrpL J J J8BIMnorm@(Rounded Rectangle8BIMSoCopnullClr ObjcRGBCRd doubGrn doubBl doub8BIMvmsk(b22>b22>>22b>22b8BIMvogknullkeyDescriptorListVlLsObjcnull keyOriginTypelongkeyOriginRRectRadiiObjcradiiunitValueQuadVersionlongtopRightUntF#Pxl@ytopLeftUntF#Pxl@y bottomLeftUntF#Pxl@y bottomRightUntF#Pxl@ykeyOriginShapeBBoxObjcunitRectunitValueQuadVersionlongTop UntF#PxlLeftUntF#PxlBtomUntF#Pxl@RghtUntF#Pxl@keyOriginIndexlongkeyOriginResolutiondoub@r8BIMluni(Rounded Rectangle8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ v8BIMsn2P8BIMfxrp8BIMlyvr|[0008BIMnorm$h(bundlewrap.org8BIMTySh# ??@w@QH2TxLrTxt TEXTbundlewrap.org textGriddingenum textGriddingNoneOrntenumOrntHrznAntAenumAnntantiAliasSharpboundsObjcboundsLeftUntF#PntTop UntF#Pnt6|RghtUntF#Pnt@pBtomUntF#Pnt@u boundingBoxObjc boundingBoxLeftUntF#Pnt@-XTop UntF#Pnt?"RghtUntF#Pnt@5BtomUntF#Pnt@eH TextIndexlong EngineDatatdta  << /EngineDict << /Editor << /Text (bundlewrap.org ) >> /ParagraphRun << /DefaultRunData << /ParagraphSheet << /DefaultStyleSheet 0 /Properties << >> >> /Adjustments << /Axis [ 1.0 0.0 1.0 ] /XY [ 0.0 0.0 ] >> >> /RunArray [ << /ParagraphSheet << /DefaultStyleSheet 0 /Properties << /Justification 0 /FirstLineIndent 0.0 /StartIndent 0.0 /EndIndent 0.0 /SpaceBefore 0.0 /SpaceAfter 0.0 /AutoHyphenate true /HyphenatedWordSize 6 /PreHyphen 2 /PostHyphen 2 /ConsecutiveHyphens 8 /Zone 36.0 /WordSpacing [ .8 1.0 1.33 ] /LetterSpacing [ 0.0 0.0 0.0 ] /GlyphSpacing [ 1.0 1.0 1.0 ] /AutoLeading 1.2 /LeadingType 0 /Hanging false /Burasagari false /KinsokuOrder 0 /EveryLineComposer false >> >> /Adjustments << /Axis [ 1.0 0.0 1.0 ] /XY [ 0.0 0.0 ] >> >> ] /RunLengthArray [ 15 ] /IsJoinable 1 >> /StyleRun << /DefaultRunData << /StyleSheet << /StyleSheetData << >> >> >> /RunArray [ << /StyleSheet << /StyleSheetData << /Font 0 /FontSize 183.33333 /AutoLeading false /Leading 770.83319 /AutoKerning true /Kerning 0 /FillColor << /Type 1 /Values [ 1.0 .61176 .61176 .61176 ] >> >> >> >> ] /RunLengthArray [ 15 ] /IsJoinable 2 >> /GridInfo << /GridIsOn false /ShowGrid false /GridSize 18.0 /GridLeading 22.0 /GridColor << /Type 1 /Values [ 0.0 0.0 0.0 1.0 ] >> /GridLeadingFillColor << /Type 1 /Values [ 0.0 0.0 0.0 1.0 ] >> /AlignLineHeightToGridFlags false >> /AntiAlias 4 /UseFractionalGlyphWidths true /Rendered << /Version 1 /Shapes << /WritingDirection 0 /Children [ << /ShapeType 1 /Procession 0 /Lines << /WritingDirection 0 /Children [ ] >> /Cookie << /Photoshop << /ShapeType 1 /BoxBounds [ 0.0 0.0 1576.60938 350.65625 ] /Base << /ShapeType 1 /TransformPoint0 [ 1.0 0.0 ] /TransformPoint1 [ 0.0 1.0 ] /TransformPoint2 [ 0.0 0.0 ] >> >> >> >> ] >> >> >> /ResourceDict << /KinsokuSet [ << /Name (PhotoshopKinsokuHard) /NoStart (00 00    0=]0 0 0 00000000A0C0E0G0I0c000000000000000000?!\)]},.:;!!  0) /NoEnd (  0;[00 0 00\([{ 0) /Keep (  %) /Hanging (00.,) >> << /Name (PhotoshopKinsokuSoft) /NoStart (00 0   0=]0 0 0 0000000) /NoEnd (  0;[00 0 00) /Keep (  %) /Hanging (00.,) >> ] /MojiKumiSet [ << /InternalName (Photoshop6MojiKumiSet1) >> << /InternalName (Photoshop6MojiKumiSet2) >> << /InternalName (Photoshop6MojiKumiSet3) >> << /InternalName (Photoshop6MojiKumiSet4) >> ] /TheNormalStyleSheet 0 /TheNormalParagraphSheet 0 /ParagraphSheetSet [ << /Name (Normal RGB) /DefaultStyleSheet 0 /Properties << /Justification 0 /FirstLineIndent 0.0 /StartIndent 0.0 /EndIndent 0.0 /SpaceBefore 0.0 /SpaceAfter 0.0 /AutoHyphenate true /HyphenatedWordSize 6 /PreHyphen 2 /PostHyphen 2 /ConsecutiveHyphens 8 /Zone 36.0 /WordSpacing [ .8 1.0 1.33 ] /LetterSpacing [ 0.0 0.0 0.0 ] /GlyphSpacing [ 1.0 1.0 1.0 ] /AutoLeading 1.2 /LeadingType 0 /Hanging false /Burasagari false /KinsokuOrder 0 /EveryLineComposer false >> >> ] /StyleSheetSet [ << /Name (Normal RGB) /StyleSheetData << /Font 2 /FontSize 12.0 /FauxBold false /FauxItalic false /AutoLeading true /Leading 0.0 /HorizontalScale 1.0 /VerticalScale 1.0 /Tracking 0 /AutoKerning true /Kerning 0 /BaselineShift 0.0 /FontCaps 0 /FontBaseline 0 /Underline false /Strikethrough false /Ligatures true /DLigatures false /BaselineDirection 2 /Tsume 0.0 /StyleRunAlignment 2 /Language 0 /NoBreak false /FillColor << /Type 1 /Values [ 1.0 0.0 0.0 0.0 ] >> /StrokeColor << /Type 1 /Values [ 1.0 0.0 0.0 0.0 ] >> /FillFlag true /StrokeFlag false /FillFirst true /YUnderline 1 /OutlineWidth 1.0 /CharacterDirection 0 /HindiNumbers false /Kashida 1 /DiacriticPos 2 >> >> ] /FontSet [ << /Name (MavenPro-Regular) /Script 0 /FontType 1 /Synthetic 0 >> << /Name (AdobeInvisFont) /Script 0 /FontType 0 /Synthetic 0 >> << /Name (MyriadPro-Regular) /Script 0 /FontType 0 /Synthetic 0 >> ] /SuperscriptSize .583 /SuperscriptPosition .333 /SubscriptSize .583 /SubscriptPosition .333 /SmallCapSize .7 >> /DocumentResources << /KinsokuSet [ << /Name (PhotoshopKinsokuHard) /NoStart (00 00    0=]0 0 0 00000000A0C0E0G0I0c000000000000000000?!\)]},.:;!!  0) /NoEnd (  0;[00 0 00\([{ 0) /Keep (  %) /Hanging (00.,) >> << /Name (PhotoshopKinsokuSoft) /NoStart (00 0   0=]0 0 0 0000000) /NoEnd (  0;[00 0 00) /Keep (  %) /Hanging (00.,) >> ] /MojiKumiSet [ << /InternalName (Photoshop6MojiKumiSet1) >> << /InternalName (Photoshop6MojiKumiSet2) >> << /InternalName (Photoshop6MojiKumiSet3) >> << /InternalName (Photoshop6MojiKumiSet4) >> ] /TheNormalStyleSheet 0 /TheNormalParagraphSheet 0 /ParagraphSheetSet [ << /Name (Normal RGB) /DefaultStyleSheet 0 /Properties << /Justification 0 /FirstLineIndent 0.0 /StartIndent 0.0 /EndIndent 0.0 /SpaceBefore 0.0 /SpaceAfter 0.0 /AutoHyphenate true /HyphenatedWordSize 6 /PreHyphen 2 /PostHyphen 2 /ConsecutiveHyphens 8 /Zone 36.0 /WordSpacing [ .8 1.0 1.33 ] /LetterSpacing [ 0.0 0.0 0.0 ] /GlyphSpacing [ 1.0 1.0 1.0 ] /AutoLeading 1.2 /LeadingType 0 /Hanging false /Burasagari false /KinsokuOrder 0 /EveryLineComposer false >> >> ] /StyleSheetSet [ << /Name (Normal RGB) /StyleSheetData << /Font 2 /FontSize 12.0 /FauxBold false /FauxItalic false /AutoLeading true /Leading 0.0 /HorizontalScale 1.0 /VerticalScale 1.0 /Tracking 0 /AutoKerning true /Kerning 0 /BaselineShift 0.0 /FontCaps 0 /FontBaseline 0 /Underline false /Strikethrough false /Ligatures true /DLigatures false /BaselineDirection 2 /Tsume 0.0 /StyleRunAlignment 2 /Language 0 /NoBreak false /FillColor << /Type 1 /Values [ 1.0 0.0 0.0 0.0 ] >> /StrokeColor << /Type 1 /Values [ 1.0 0.0 0.0 0.0 ] >> /FillFlag true /StrokeFlag false /FillFirst true /YUnderline 1 /OutlineWidth 1.0 /CharacterDirection 0 /HindiNumbers false /Kashida 1 /DiacriticPos 2 >> >> ] /FontSet [ << /Name (MavenPro-Regular) /Script 0 /FontType 1 /Synthetic 0 >> << /Name (AdobeInvisFont) /Script 0 /FontType 0 /Synthetic 0 >> << /Name (MyriadPro-Regular) /Script 0 /FontType 0 /Synthetic 0 >> ] /SuperscriptSize .583 /SuperscriptPosition .333 /SubscriptSize .583 /SubscriptPosition .333 /SmallCapSize .7 >> >>warp warpStyleenum warpStylewarpNone warpValuedoubwarpPerspectivedoubwarpPerspectiveOtherdoub warpRotateenumOrntHrzn8BIMluni bundlewrap.org8BIMlnsrrend8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ +8BIMfxrp@@4q q***8BIMnorm @( Logo centered8BIMluni Logo centered8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ b8BIMfxrp@w  """8BIMnorm8( Logo offset8BIMluni Logo offset8BIMlyid8BIMclbl8BIMinfx8BIMknko8BIMlspf8BIMlclr8BIMshmdH8BIMcust4metadata layerTimedoubA֋ v8BIMfxrp@x   "X@:886442422220000.00...0.+..0..................,,...,..,..,.,,.,.,.,,,,,,.,,,,,,,,,,,,,,,,,,,,,,,,,,,,,**(**+,,,,,,,,,,,,,,,,,,,,**,,,,,,,**,,,,**,,,+*,,,**,,**,,**,,*,,**,,*,,*,,*,,*,**,*,,*,*,,*,*,,*,*,*,**,*,*,*,*,*,*,*,*,*,*,*,****,*,*,****,*,**,*,**,****,**,*******,**,*********,**,***+**,***,***,***,***,****,**************,*****,******+*******)))))))))((((((((((((&&&&&&&&&&&&&&&&&&&&&&&&%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&&&&&&&&&&&&&&&&&&&(((((((((((()))))))))**************,*****,**************,****,***,***,***,***,******,**,*********,**,*******,**,****,**,*,**,*,****,*,*,****,*,*,*,*,*,*,*,*,*,*,*,**,*,*,*,,*,*,,*,*,,*,**,*,,*,,*,,*,,**,,*,,**,,**,,**,,,*+,,,**,,,,**,,,,,,,**,,,,,,,,,,,,,,,,,,,,+**(**,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.,,,,.,.,.,.,,.,..,..,...,,..................0..+.0..000.000022224244688:@X" *:J[kw̻ wk[J:* 'vv> (_ϖ_):yy; @@ BÅB-rs-]^:ҥ҆;VV%rr%/֛փ15ۘۈ5==44&ُ&qʌq [] >>z܄{OPGGuv67VVww((@@QQbb ss   qq ddUV@@')ssYY11fg89hi..  J޾Kbbij"#pp#ij#ffQR>?éRS +ӥ,ff  ,ס,]]ɝBCpq˘==jk$ג%??``  Ō ؋ 22GG__|| ˁ#$''**..2255;;::5612..*+''$$  xx[[BB57$$  _`AB$$hl<B"  psBDTU''  ef++  QR??QXfi$$rr%%qr##ddNO  33ʾkk88˻ff00¸VV޷tt''A@ͳUTײccrr  rsab֨PQǧ@@**ttVVǣ88vwܡHHQQtv֝88  \\rrʚ$'~ؙ33<=56ۖ00֕((ssUU<<Ғ`^5.yt DDŏ  GGÎ=>z{''``Ό  >>vvJJ}}AApp͉''QR~~ш##LLus  -.OOrsֆ34SSpp…ޅ&&==SSjjĄل  ++;;LL\\llzyÃ΃҃҃΃Ãzyll\\LL;;++  لĄjjSS==&&ޅ…ppST34ֆrsOO-.  uuLL##ш~~RS''͉pqAA}}KKvv>>  ό``''{{>>ÎGG  ŏDD yt5.a^Ғ<<UWss((֕01ܖ67==36ؙ''̚rr\\  88֝vvQQHHݡww88ǣVVtt**@AȧQRרbcst  rrccزWVͳBB((uu߷WW¸00iiϻ98ll̾34OOee##rr&'rr$%fkRZ?@ST  ++ff  ''UUCEps  #=Bim$$BB`a  $$77EJ[[xx  $$''++..2256::;;5622//**'($$́  }}`aGH23!؋!Ō  ``??%ؒ%jk==˘prCCǝUU(ӡ(  ff,ӥ,  STĩ?@RSff#Ƴ#qq#$jk  cdK߾L  ./jj9:hi11YYss)*@@VVdd rr   ss bbQRAA()wwWW78wwHHRR}܄}>? `` uҌv-ߏ߈-99==8ݘ݊81֛փ1&ss'WW;ҥ҈<__.tt/DĆD AA ;yz;)`И`) ?ww@ HxܩxH=mΠm=#NẉwN#"Kt޺tK"'JmֶmK'.LiiL. '`4݉( $d\݄(Sك'SV $dph`CL`P(y6)\`\N+: UV3>kV&VSN`=U`P#x1,Z `]Ls{Mc5$-Bc#*5 th`6`P c `4# ME <K M tf` `P+.Z`hHN>2 NUNU0`-`P>Q `! skf5c?Y]  ?q`Q`PH[ `@yi y]`m`P\O/`R<`j--` `x`P`/7`[s[Hc`L~161``l`P` ,`Lk`B,%`0W`߱mR70# '0=TtO`P` ݺhRB3* '0AXpӠ~^C2, .2He`3qXD0( #09Ri/` ղx`M?0% '6Jd޿ğkVA00 +3FZuyѩw^G=0/ %0;Gf8աaE4. +0@Vu` ղx`M?0% '6Jd޿-߳x[C:0' 04@Kf`C 6}+`P`賀T(,m3I/l`k*]asL) c`حS'ݨq<,p]ܗW+ G)F)e`حS'!ІH8|`Q . `P`ӔXM8`x d!tS`Ĕd5 ^}k @Bi`Ĕd5  }""`W`P`[ ?E?8`kqY#`S#? {e50(F-X9`S#Xt`$?@`P`$ t s`[s<s]`a %HDPo L`\ry``P`5e`c U;`H-Z$!@|`$`#n`P`{$ `8{Nx`X`Y  #` @` `P``Z m,/g]` ` D`)ko`u`P`&+`5r|7N#`.` -```P`b`_`: p`I`AH [`"`]Z`P``@MmGZ]`v` '`S``P``p`u*lb``)`wl` `. `P`|`<cY `[``Z!`x`P`)( `(Mcm_M]``Y(``+`P`_Gzz`tu]v``Cd}` $``P`(b` D'A``%u`U`T\`P`}` QDm vA]```G` `P`LM`?M*``l$0_Z```P``zd&;*``dkc``\ `P``4mpv4]```Q@`=`P`m`o #=A`` ``h`P`0` FI P``F G7```P`@`/< omR _(X`Ep tR1 `2#^``w`~`P`^`Q-Zx ` dإu6`[I1` @`U`P`y`j}d`ng`d `, `-`P`}`m3FX`'^`z`L``P`e`ql ``bn`k``P`K`n$w`v`@W`` `P`6`h)/X`` B`u`+`P`!`&`|^ `=` z* `_`0`P` `0^;`N`m%`J`<`P``0h> X`E`#`0`6`@`P``0 AeS `'`0`5`#`@`P``4NT`Ӽ`=R@` `?`P``@h Q X`峏mU@0% .0ATg~`@P@``0`P` `" ME `%Y 9f`@Z <``,`P`"`?nk`TU`@`0```P`4`heX`'@<`:e+```P`G` 58 `r[`0q" ` ` `P`Z`/O`:`'6` `#`P`p`h6yS`?-` J`5`C`P``+, `C`b`J`e`P`m`0j}``|`d``P`N`hho|OS` `>m`}`|`P`0`IK ?  `B~`lV`y`Z`P`.`#vQp`u`3@r`^`0P M`b`h_]fS``q#J`@`SK#/@``R ``S_`0`@9H8``,  :c`l`Q%`m`5^k.`X!`jtcP>S`P`)``)l`"g`/0f  `B`Jx-t``-``'"W`A`Os4`LM```c@ pS`P`/%``@,>``u`` jH{ `c`*E``Wt``v,J``8 M`zz`,UL`X` c0 XS``.6s+`( `*=`h`_L` ` ``z`<L`^` 3@=`Y`gn-K0`p`1` ` c @M``& <``ztD<`6`q-x`{N` YL`_`?Nk`J`x+T1`M``'+`-x2{`@-`!Slc (M``zeiG``c4 $`+`<au`` `u`$S[U2`%D\`LYCi$`{`a,G`K %`5@C Pk`}r`RZkcM`hGt`i v`[5u` s\%61y`NSf`? S`X]9`&P `2`F>3Q3?-=`` ;"` T%n[|`6zF`jd.`UK6`M?|=Mf4|4>.`A\>x`(SF|c.M`~9-d`ωM"`S`J >E`IF|L㻕{eK;0. #07AYxxmĖtY>0( )0AZve`SⲒqP@0* )05AVkO`BީfG70 "00=Ldx`ˡ}\H4( %002@Pct(`ൌkT90# $0:Llo`ףbE4. ,2C^|i`ҟ~^C2, +0EZuu`U`Ns`T`x`|`,G\`D`U]%AM``w``75`%-`D`N```s`0` '`*`l`;` P`! `b\`U`]=VM`Wg``y(` |+(`D`Zk`<`c`8`Z6=tn`<]``H ``y3`XI`P=`(Z ]UjH`]`j$`V`-sA:`BZ `#m`MA`-`{M`)p XhMJ`sѓJ` XҗP `v~4`/|C`Cր#` ZӓN %^͘U8~}6`b浂M` Lը}G]m~H`^从b.`Lv2`7yl%` KL 5Zväe8 OxȭwM  Jr DZy]42] ijeG( 9` ̼|gH(`.[ƫmGLq̳qJ,X|ԸrJ!`````````5t`oR`2` `s``fM```_D`:`Bd`0т3x`0FQO`0n+m`0a+ E`0˧gT>0) ,08Lbv[`0`0`0`0`0`0x`0+`$N`%dD`Xh`BuD `8b ƭ`6 HZ\\RTTTTTTTTztttrrrppprrrrrrrrrrrrrrrrnnnnnnnnrrrrrrrrrrrrrrppprrrrrrrrrvvrrrrrTTTTTTRTT\\\\<""""윳蜥霿ߜݜܜٜ؜ۜۜ֜לޜߜԜӜלߜ՜Üݜ˜ܜۜϜќӜۜќœڜɜۜ؜˜МϜۜ͜؜Ȝٜ՜ȜΜۜ͜˜֜Ɯ؜ҜŜ͜ɜۜȜӜŜלМĜ˜ǜۜƜӜĜ֜МÜʜŜۜĜӜÜ՜МœɜÜۜÜӜœԜМȜۜœӜӜМǜۜ蜼ꜽ윾ʜʜʜڜʜ՜ʜќʜʜʜʜʜʜʜʜʜҜœԜǜӜÜ՜ȜœӜĜ֜ɜĜœԜŜלʜƜܜ՜Ɯ؜˜ȜĜŜלȜٜœ̜ʜŜǜ؜ɜۜĜ͜͜ǜ˜ٜ̜ܜǜϜМȜۜ͜МޜʜќӜʜӜݜ՜Μ؜לללߜڜԜݜܜܜޜۜ眱ĜŜŜƜǜȜɜ˜͜ќלޜ HZ\\RTTTTTTTTztttrrrppprrrrrrrrrrrrrrrrnnnnnnnnrrrrrrrrrrrrrrppprrrrrrrrrvvrrrrrTTTTTTRTT\\\\<""""윳蜥霿ߜݜܜٜ؜ۜۜ֜לޜߜԜӜלߜ՜Üݜ˜ܜۜϜќӜۜќœڜɜۜ؜˜МϜۜ͜؜Ȝٜ՜ȜΜۜ͜˜֜Ɯ؜ҜŜ͜ɜۜȜӜŜלМĜ˜ǜۜƜӜĜ֜МÜʜŜۜĜӜÜ՜МœɜÜۜÜӜœԜМȜۜœӜӜМǜۜ蜼ꜽ윾ʜʜʜڜʜ՜ʜќʜʜʜʜʜʜʜʜʜҜœԜǜӜÜ՜ȜœӜĜ֜ɜĜœԜŜלʜƜܜ՜Ɯ؜˜ȜĜŜלȜٜœ̜ʜŜǜ؜ɜۜĜ͜͜ǜ˜ٜ̜ܜǜϜМȜۜ͜МޜʜќӜʜӜݜ՜Μ؜לללߜڜԜݜܜܜޜۜ眱ĜŜŜƜǜȜɜ˜͜ќלޜ HZ\\RTTTTTTTTztttrrrppprrrrrrrrrrrrrrrrnnnnnnnnrrrrrrrrrrrrrrppprrrrrrrrrvvrrrrrTTTTTTRTT\\\\<""""윳蜥霿ߜݜܜٜ؜ۜۜ֜לޜߜԜӜלߜ՜Üݜ˜ܜۜϜќӜۜќœڜɜۜ؜˜МϜۜ͜؜Ȝٜ՜ȜΜۜ͜˜֜Ɯ؜ҜŜ͜ɜۜȜӜŜלМĜ˜ǜۜƜӜĜ֜МÜʜŜۜĜӜÜ՜МœɜÜۜÜӜœԜМȜۜœӜӜМǜۜ蜼ꜽ윾ʜʜʜڜʜ՜ʜќʜʜʜʜʜʜʜʜʜҜœԜǜӜÜ՜ȜœӜĜ֜ɜĜœԜŜלʜƜܜ՜Ɯ؜˜ȜĜŜלȜٜœ̜ʜŜǜ؜ɜۜĜ͜͜ǜ˜ٜ̜ܜǜϜМȜۜ͜МޜʜќӜʜӜݜ՜Μ؜לללߜڜԜݜܜܜޜۜ眱ĜŜŜƜǜȜɜ˜͜ќלޜ(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&(()*+-+,+,,+,+,++-+,++,+,+,++-+,++-+,+,++,+,++-+,++,+,+,++-+,++-+,+,++-+,++-+,+,,+,+,++-+,++,+,+,*))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJIGGIEHIHKIJJJKGHGHGHKKJJKKIIJJJHKJIJGIIJKJJGJGHHFIHJJJKIJIKJHJIIJIKIJKLJIKHKKHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJJJHKIJKIKIJKLJFHGHGHONQQSTRTQTTQSSQRQSRTUVTRUQRRNQOSTTURTRUTOSSSSUURTTVURUSUURTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHGIHGGGJJIIHKIJHEFEFIHIHKHIHGGGGEFHGJIIIHJHJIHHGIEICHIIKEIGIJFJEGGGIEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRTTQTNTTUVOTPUVORPRRUVOTTVVPTPTRONGH???<>=>?<;;<=>>:=>>?=><>??><????<==>?>><>;5413445443456535555443455535565344454434565345554534565355553444532243653444545345653555534445431/. 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7֚ER7&] \Bq$Hۘ{Ta #hE:}:GƖkd\~.ϕ}AF<2_ bIpBۓ M] -`L<w3Iґr_`&Ɛt?μ?>=c eQ s ;َGW 3^ XйBr'IڌxXc !iDʷ<:EifZy2҉}D׵L ;-^ bشHpE܇} R` )dG<z5Iͅoa_*ʄx@ϰA=9c ePq?ڂHZ 2 `T@r-Kx[c$oAʫ=<ChsfWt7sDکQ8)_d ]ӨEo!H}Sb %h _E<z8Ho|c^~,|j@ҤE=5ai cݣKnA}N\ .b^ O=t1Ju~_`%tc@͟>=?fo fS s9wEݝT 6"_a YќAr'H|Wa !"h^C;~;GjwdZ{2}mA՘I ;0^f aڗHpF N^ + b]I;y4Jq~ `_(vg@ϓ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʎ=<CgufYx4rCٌO9*]f ^֋Fp F|Ra (e _E={8Hn|b_+yk@чB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@˂>=@gr fV s8uET7$_b \Bo$H}퀒Ta #j^F;|9Fmxe]{/~jCH :2`h dKpB O_ +b^M=w2Ir} aa&udAA>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6J˿p{a^,˾zh@?=9bk dNo>ڼ}IY 1^a S>s.J׺v\a"nbA<=Bft fUv6ԷuDQ8']e ]Eq#Gٵ|Tb $g _E<}9Gdzmzc^~-β{k@E>4_k cLpA۰ L^ /`` M<v2IԮs}^`%ŭsd>>?>do eR s9ثxFU 5!_c YAr(Hک|낻Wa   j`B;;EixdYz1Ѧ}oBJ :.^h aGpDܤ~ P_ (c]J=y4I΢o| b`)ɡwfBA<:am fQr =ٟzJZ 3^` VAt+Jٝw\c"m`C=<DfsfYv6ԚrCP9*]d ^Ep Gܘ~Ra & g ]E=y8IȖozb^~-͕{h@C=7aj dMoBړ}K] / a^ P?t0KՑu]b&Đrd@˼==@fs fTs8ՎwEܺS 7$_d [ѹBo%Hٌ|Va "k`D;|:FÊlydZz/Ή~kAյG ;1`g b۴InCڇ N^ ,b]J;z4Iυr| `_'Ƅue?ΰ?=<bn eP r<ׂyGW 4^` W?r*JxYa"j`Bʫ;<EhweXx4qCةM 9+^g _רEpE}R` 'd _E;z7In}b]+yj@ФA=8`m dOq?~J[ 0^` Q@u/Jv\b#ob@̟>=Aet fVu7vEܝS8&]Ic ^ӜDq#H{%Tb $h>^E<}9Gk9xc]~.}!i@ҘF<2aHi cܗKpB  }L^ -bE^ M>u2It1} ^a'u.d@̓@=?eDo eT q<xFޑW 5!_Ia ZϐBp(Hz)ꀁYc  k:^Dʎ<}:Ek=vfZy1nD׌L ;.`Jf b؋HpD} "R` )dD]G;z5Jo6| a^+x*g@χ?=:cGm eOq? {HY 2 `I_ T?r-Kx-[a$o7ځ`A˂;<ChBsfVt7 ՃsDO8)]Kd ]Cp!H|&S` %f @_E;{8Hm8|c\,z$وj@C=5`Hk cKpAÊL\ .`F` O=v1Js/]b%r1ڍd@>=?dCr fU t9ϏxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۔mAI <0^Jh `JpC "M^ ,dE]H;y5JϿs4~ `_(ɾw+ۙg@?<;dEn eOq =ټ˛yIX 2`I` W?s*Iںy+[a!k8ڞ`C=;Cg?tgYx 4Է٠pEO8*]Je `FpFܵ|#Ta ' e A]H={7Jʳn7{c_,̲y&ۥiAB=8`Hk eNpA۰Ƨ|J\ 0 `G_ Q?u/K׮w.\b%ĭq2۪b@>=BgBr fVs9׫ҬuET7&_Ib \Do$I۩}'Ta #!j?^E:|:Hħm<xd\{0Ц~!رkAF<2`Hj bInBڤ  M] -bE_ L<u3IТt1 _`&šv.ڶf??>=cDp eQ q ;ן̸zGW 4^Jb XBr)I؝x+Xc !i:ڻ;<Ei?Yy2њ} ٽK :-^KFpEܘ} $_ (d C;z6I͖o7^*ʕx)?=9aHNq>ړY 1^H>t.Jّv,a"n5<=BfAUv6ՎQ8']JEq#Gی|%b #g ?<}8GNJm9^~-Ή{"E=4_HLpAۇ  ^ .`F>v1Iԅs0a%ńs0@=>dCT s9؂V 6!_JCr'J|(c !"j;=};Gk=[y2K ;0`IInF !` + dC=w4Jr4`(x*A=;cFQp = Z 3`J?q+Iy,a!m8;<ChAWv4 N9*]KDp F~%` & g B;y8In9]~-y&A=7`ILpB![ / `H=u0Kt0`&p2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|H ;1_IKpC !_ ,bE=w4Ir2a'u-A?<bER r<X 5^IAr)Ix*c !k9ځ<bDj>vxz4كo i+_JeniE "}f B]x^ Ip6{~+z'ۈhc8bGkqo=NJ|w^G_ uaJv-"o4ۍ`^BeAruw8ԏtm']IcqfI{% h >^}]Hk9x~~ /}!ڔig4aJipmC "–}z bG^ u`Kҿt2} 'ƾu0ܙd`?eFo qs<ټ Λxr!_JcpdHغz(낪 i;`}]Ei=xzz1з}ؠo h.^IhpjDڵ~ !}cC_y^ I̳o4} )ɲw*ڥhd:aFors=ٰ ȧ|w^IatdJٮw-킙"m7ڪ``BfBsvy5ԫ լsl)]KdpjFܩ~&g @_{_Hȧm8|~ ,ͦz$۱jf5`HkppAڤij{`F` vbJբs/%ġr1ܶb`?dCq tv9ןѸvo"]IbqeH۝z' h<^~]Fj<w{{0К}ٽk h1^IgpkDۘ !|dD]y_ Kϖs4| )ɕw+e`=dEn qr>ٓyt!`I`qcJڑy+"l8`^Ej=wv9x4Ԏq 9!ڬi+_JgoJiEڌ~"'"®}f B_y7G^ IɊp8}~2} +ʉ{'i=0ܱd8bIkoFo?ه |0 γx`H_ u/JcJׅv/(#„o4b=;`AeCru=x7Ղ u7ظo&]Kbq$IhI{'#!!h?^}:C_Hk<x~4} 0|!k<*ڽh2_HjpFoB  - ȿ|`E_ w3IaIr1 -&t.f>7`=cDp s Bs;z4 s^Ibr)KdIx)&!i:`<@_Ei=wy8|2}p ;$j-^JgpHkE} ")d B]z5F` Io5| /*x)g=1b9cFmqCq>{2v`H_ r-IbJx,'"o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"=+aHE .`F1Is0+s1=8dD=6_K'J|)!"j=;Bk>8!ڃ;'`JI "+ bE4Hq4/v+ڈ=4bE CɊ3 ^I+Kw+'k8ٍ<?g?<׏9!]J H|#( e A8En71y&۔>.`ID!Ɩ1`H0Iw0)q2ۙ=:gB =қ6_I$J}!"i9Bk5|:)܃_F +ȅb2Hr,u>5ڈb A4Պ^)Jx%k<?j9:"ۏ_H ( Ñf 6Fp0z=1۔bD1ϖ`.Kw)p==g>8!ڛ]#J|$"g 9Em4{<+ڠ_E -ɢ`2Is+s=8٥d?5ק](Jy #i;Ai7};&۬^I~ )!Ʈc5Ho0w<2۱a B3ҳ^+Iw&m<=f;9ڸ] I~&!g 8Eo2{=-ܽaE/Ϳa0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&(()*+-+,+,,+,+,++-+,++,+,+,++-+,++-+,+,++,+,++-+,++,+,+,++-+,++-+,+,++-+,++-+,+,,+,+,++-+,++,+,+,*))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJIGGIEHIHKIJJJKGHGHGHKKJJKKIIJJJHKJIJGIIJKJJGJGHHFIHJJJKIJIKJHJIIJIKIJKLJIKHKKHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJJJHKIJKIKIJKLJFHGHGHONQQSTRTQTTQSSQRQSRTUVTRUQRRNQOSTTURTRUTOSSSSUURTTVURUSUURTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHGIHGGGJJIIHKIJHEFEFIHIHKHIHGGGGEFHGJIIIHJHJIHHGIEICHIIKEIGIJFJEGGGIEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRTTQTNTTUVOTPUVORPRRUVOTTVVPTPTRONGH???<>=>?<;;<=>>:=>>?=><>??><????<==>?>><>;5413445443456535555443455535565344454434565345554534565355553444532243653444545345653555534445431/. 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7֚ER7&] \Bq$Hۘ{Ta #hE:}:GƖkd\~.ϕ}AF<2_ bIpBۓ M] -`L<w3Iґr_`&Ɛt?μ?>=c eQ s ;َGW 3^ XйBr'IڌxXc !iDʷ<:EifZy2҉}D׵L ;-^ bشHpE܇} R` )dG<z5Iͅoa_*ʄx@ϰA=9c ePq?ڂHZ 2 `T@r-Kx[c$oAʫ=<ChsfWt7sDکQ8)_d ]ӨEo!H}Sb %h _E<z8Ho|c^~,|j@ҤE=5ai cݣKnA}N\ .b^ O=t1Ju~_`%tc@͟>=?fo fS s9wEݝT 6"_a YќAr'H|Wa !"h^C;~;GjwdZ{2}mA՘I ;0^f aڗHpF N^ + b]I;y4Jq~ `_(vg@ϓ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʎ=<CgufYx4rCٌO9*]f ^֋Fp F|Ra (e _E={8Hn|b_+yk@чB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@˂>=@gr fV s8uET7$_b \Bo$H}퀒Ta #j^F;|9Fmxe]{/~jCH :2`h dKpB O_ +b^M=w2Ir} aa&udAA>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6J˿p{a^,˾zh@?=9bk dNo>ڼ}IY 1^a S>s.J׺v\a"nbA<=Bft fUv6ԷuDQ8']e ]Eq#Gٵ|Tb $g _E<}9Gdzmzc^~-β{k@E>4_k cLpA۰ L^ /`` M<v2IԮs}^`%ŭsd>>?>do eR s9ثxFU 5!_c YAr(Hک|낻Wa   j`B;;EixdYz1Ѧ}oBJ :.^h aGpDܤ~ P_ (c]J=y4I΢o| b`)ɡwfBA<:am fQr =ٟzJZ 3^` VAt+Jٝw\c"m`C=<DfsfYv6ԚrCP9*]d ^Ep Gܘ~Ra & g ]E=y8IȖozb^~-͕{h@C=7aj dMoBړ}K] / a^ P?t0KՑu]b&Đrd@˼==@fs fTs8ՎwEܺS 7$_d [ѹBo%Hٌ|Va "k`D;|:FÊlydZz/Ή~kAյG ;1`g b۴InCڇ N^ ,b]J;z4Iυr| `_'Ƅue?ΰ?=<bn eP r<ׂyGW 4^` W?r*JxYa"j`Bʫ;<EhweXx4qCةM 9+^g _רEpE}R` 'd _E;z7In}b]+yj@ФA=8`m dOq?~J[ 0^` Q@u/Jv\b#ob@̟>=Aet fVu7vEܝS8&]Ic ^ӜDq#H{%Tb $h>^E<}9Gk9xc]~.}!i@ҘF<2aHi cܗKpB  }L^ -bE^ M>u2It1} ^a'u.d@̓@=?eDo eT q<xFޑW 5!_Ia ZϐBp(Hz)ꀁYc  k:^Dʎ<}:Ek=vfZy1nD׌L ;.`Jf b؋HpD} "R` )dD]G;z5Jo6| a^+x*g@χ?=:cGm eOq? {HY 2 `I_ T?r-Kx-[a$o7ځ`A˂;<ChBsfVt7 ՃsDO8)]Kd ]Cp!H|&S` %f @_E;{8Hm8|c\,z$وj@C=5`Hk cKpAÊL\ .`F` O=v1Js/]b%r1ڍd@>=?dCr fU t9ϏxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۔mAI <0^Jh `JpC "M^ ,dE]H;y5JϿs4~ `_(ɾw+ۙg@?<;dEn eOq =ټ˛yIX 2`I` W?s*Iںy+[a!k8ڞ`C=;Cg?tgYx 4Է٠pEO8*]Je `FpFܵ|#Ta ' e A]H={7Jʳn7{c_,̲y&ۥiAB=8`Hk eNpA۰Ƨ|J\ 0 `G_ Q?u/K׮w.\b%ĭq2۪b@>=BgBr fVs9׫ҬuET7&_Ib \Do$I۩}'Ta #!j?^E:|:Hħm<xd\{0Ц~!رkAF<2`Hj bInBڤ  M] -bE_ L<u3IТt1 _`&šv.ڶf??>=cDp eQ q ;ן̸zGW 4^Jb XBr)I؝x+Xc !i:ڻ;<Ei?Yy2њ} ٽK :-^KFpEܘ} $_ (d C;z6I͖o7^*ʕx)?=9aHNq>ړY 1^H>t.Jّv,a"n5<=BfAUv6ՎQ8']JEq#Gی|%b #g ?<}8GNJm9^~-Ή{"E=4_HLpAۇ  ^ .`F>v1Iԅs0a%ńs0@=>dCT s9؂V 6!_JCr'J|(c !"j;=};Gk=[y2K ;0`IInF !` + dC=w4Jr4`(x*A=;cFQp = Z 3`J?q+Iy,a!m8;<ChAWv4 N9*]KDp F~%` & g B;y8In9]~-y&A=7`ILpB![ / `H=u0Kt0`&p2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|H ;1_IKpC !_ ,bE=w4Ir2a'u-A?<bER r<X 5^IAr)Ix*c !k9ځ<bDj>vxz4كo i+_JeniE "}f B]x^ Ip6{~+z'ۈhc8bGkqo=NJ|w^G_ uaJv-"o4ۍ`^BeAruw8ԏtm']IcqfI{% h >^}]Hk9x~~ /}!ڔig4aJipmC "–}z bG^ u`Kҿt2} 'ƾu0ܙd`?eFo qs<ټ Λxr!_JcpdHغz(낪 i;`}]Ei=xzz1з}ؠo h.^IhpjDڵ~ !}cC_y^ I̳o4} )ɲw*ڥhd:aFors=ٰ ȧ|w^IatdJٮw-킙"m7ڪ``BfBsvy5ԫ լsl)]KdpjFܩ~&g @_{_Hȧm8|~ ,ͦz$۱jf5`HkppAڤij{`F` vbJբs/%ġr1ܶb`?dCq tv9ןѸvo"]IbqeH۝z' h<^~]Fj<w{{0К}ٽk h1^IgpkDۘ !|dD]y_ Kϖs4| )ɕw+e`=dEn qr>ٓyt!`I`qcJڑy+"l8`^Ej=wv9x4Ԏq 9!ڬi+_JgoJiEڌ~"'"®}f B_y7G^ IɊp8}~2} +ʉ{'i=0ܱd8bIkoFo?ه |0 γx`H_ u/JcJׅv/(#„o4b=;`AeCru=x7Ղ u7ظo&]Kbq$IhI{'#!!h?^}:C_Hk<x~4} 0|!k<*ڽh2_HjpFoB  - ȿ|`E_ w3IaIr1 -&t.f>7`=cDp s Bs;z4 s^Ibr)KdIx)&!i:`<@_Ei=wy8|2}p ;$j-^JgpHkE} ")d B]z5F` Io5| /*x)g=1b9cFmqCq>{2v`H_ r-IbJx,'"o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"=+aHE .`F1Is0+s1=8dD=6_K'J|)!"j=;Bk>8!ڃ;'`JI "+ bE4Hq4/v+ڈ=4bE CɊ3 ^I+Kw+'k8ٍ<?g?<׏9!]J H|#( e A8En71y&۔>.`ID!Ɩ1`H0Iw0)q2ۙ=:gB =қ6_I$J}!"i9Bk5|:)܃_F +ȅb2Hr,u>5ڈb A4Պ^)Jx%k<?j9:"ۏ_H ( Ñf 6Fp0z=1۔bD1ϖ`.Kw)p==g>8!ڛ]#J|$"g 9Em4{<+ڠ_E -ɢ`2Is+s=8٥d?5ק](Jy #i;Ai7};&۬^I~ )!Ʈc5Ho0w<2۱a B3ҳ^+Iw&m<=f;9ڸ] I~&!g 8Eo2{=-ܽaE/Ϳa0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""8BIMPatt8BIMTxt2) /98 << /0 7 >> /0 << /1 << /0 [ << /0 << /99 /CoolTypeFont /0 << /0 (MavenPro-Regular) /2 1 >> >> >> << /0 << /99 /CoolTypeFont /0 << /0 (AdobeInvisFont) /2 0 >> >> >> << /0 << /99 /CoolTypeFont /0 << /0 (MyriadPro-Regular) /2 0 >> >> >> << /0 << /99 /CoolTypeFont /0 << /0 (TimesNewRomanPSMT) /2 1 >> >> >> ] >> /2 << /0 [ << /0 << /0 () >> >> ] /1 [ << /0 0 >> ] >> /3 << /0 [ << /0 << /0 (Photoshop6MojiKumiSet4) /5 << /0 0 /3 2 >> >> >> << /0 << /0 (Photoshop6MojiKumiSet3) /5 << /0 0 /3 4 >> >> >> << /0 << /0 (Photoshop6MojiKumiSet2) /5 << /0 0 /3 3 >> >> >> << /0 << /0 (Photoshop6MojiKumiSet1) /5 << /0 0 /3 1 >> >> >> << /0 << /0 (YakumonoHankaku) /5 << /0 0 /3 1 >> >> >> << /0 << /0 (GyomatsuYakumonoHankaku) /5 << /0 0 /3 3 >> >> >> << /0 << /0 (GyomatsuYakumonoZenkaku) /5 << /0 0 /3 4 >> >> >> << /0 << /0 (YakumonoZenkaku) /5 << /0 0 /3 2 >> >> >> ] /1 [ << /0 0 >> << /0 1 >> << /0 2 >> << /0 3 >> << /0 4 >> << /0 5 >> << /0 6 >> << /0 7 >> ] >> /4 << /0 [ << /0 << /0 (None) /5 << /0 () /1 () /2 () /3 () /4 0 >> >> >> << /0 << /0 (PhotoshopKinsokuHard) /5 << /0 (!\),.:;?]}    0!! 0000 0 0 0000A0C0E0G0I0c000000000000000000000000 =]) /1 (\([{  00 0 0000 ;[) /2 (  % &) /3 (00 ) /4 1 >> >> >> << /0 << /0 (PhotoshopKinsokuSoft) /5 << /0 (  0000 0 0 00000000 =]) /1 (  00 0 000;[) /2 (  % &) /3 (00 ) /4 2 >> >> >> << /0 << /0 (Hard) /5 << /0 (!\),.:;?]}    0!! 0000 0 0 0000A0C0E0G0I0c000000000000000000000000 =]) /1 (\([{  00 0 0000 ;[) /2 (  % &) /3 (00 ) /4 1 >> >> >> << /0 << /0 (Soft) /5 << /0 (  0000 0 0 00000000 =]) /1 (  00 0 000;[) /2 (  % &) /3 (00 ) /4 2 >> >> >> ] /1 [ << /0 0 >> << /0 1 >> << /0 2 >> << /0 3 >> << /0 4 >> ] >> /5 << /0 [ << /0 << /0 (Normal RGB) /6 << /0 2 /1 12.0 /2 false /3 false /4 true /5 0.0 /6 1.0 /7 1.0 /8 0 /9 0.0 /10 0.0 /11 1 /12 0 /13 0 /14 0 /15 0 /16 0 /17 0.0 /18 true /19 false /20 false /21 false /22 false /23 false /24 false /25 false /26 false /27 false /28 false /29 false /30 0 /31 false /32 false /33 false /34 false /35 2 /36 0.0 /37 2 /38 0 /39 0 /40 false /41 2 /42 0 /43 << /0 .5 >> /44 2 /45 2 /46 7 /47 0 /48 0 /49 -1.0 /50 -1.0 /51 0 /52 false /53 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /54 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /55 << /99 /SimpleBlender >> /56 true /57 false /58 true /59 false /60 false /61 0 /62 0 /63 1.0 /64 4.0 /65 0.0 /66 [ ] /67 [ ] /68 0 /69 0 /70 0 /71 4 /72 0.0 /73 0.0 /74 false /75 false /76 false /77 true /78 true /79 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 1.0 1.0 0.0 ] >> >> /80 false /81 0 /82 3.0 /83 3.0 /84 false /85 0 /86 << /99 /SimpleCustomFeature >> /87 100.0 /88 true >> >> >> << /0 << /0 (None) /5 0 /6 << >> >> >> ] /1 [ << /0 0 >> << /0 1 >> ] >> /6 << /0 [ << /0 << /0 (Normal RGB) /5 << /0 0 /1 0.0 /2 0.0 /3 0.0 /4 0.0 /5 0.0 /6 1 /7 1.2 /8 0 /9 true /10 6 /11 2 /12 2 /13 0 /14 36.0 /15 true /16 .5 /17 [ .8 1.0 1.33 ] /18 [ 0.0 0.0 0.0 ] /19 [ 1.0 1.0 1.0 ] /20 6 /21 false /22 0 /23 true /24 0 /25 0 /27 /nil /26 false /28 /nil /29 false /30 << >> /31 36.0 /32 << >> /33 0 /34 7 /35 0 /36 /nil /37 0 /38 false /39 0 /40 2 >> >> >> << /0 << /0 (Basic Paragraph) /5 << /0 0 /1 0.0 /2 0.0 /3 0.0 /4 0.0 /5 0.0 /6 1 /7 1.2 /8 0 /9 false /10 6 /11 2 /12 2 /13 0 /14 36.0 /15 true /16 .5 /17 [ .8 1.0 1.33 ] /18 [ 0.0 0.0 0.0 ] /19 [ 1.0 1.0 1.0 ] /20 6 /21 false /22 0 /23 true /24 0 /25 0 /27 /nil /26 false /28 /nil /29 false /30 << >> /31 36.0 /32 << /0 2 /1 50.0 /2 false /3 false /4 true /5 0.0 /6 1.0 /7 1.0 /8 0 /9 0.0 /10 0.0 /11 1 /12 0 /13 0 /15 0 /16 0 /18 true /19 false /20 true /21 false /22 false /23 false /24 false /25 false /26 false /27 false /28 false /29 false /30 0 /35 1 /38 0 /53 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /54 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /68 0 /70 1 /71 4 /72 0.0 /73 0.0 /87 0.0 >> /33 0 /34 7 /36 /nil /37 0 /38 false /39 0 /40 2 >> /6 0 >> >> ] /1 [ << /0 0 >> << /0 1 >> ] >> /8 << /0 [ << /0 << /1 << /0 [ 0.0 0.0 0.0 0.0 1576.60938 0.0 1576.60938 0.0 1576.60938 0.0 1576.60938 0.0 1576.60938 350.65625 1576.60938 350.65625 1576.60938 350.65625 1576.60938 350.65625 0.0 350.65625 0.0 350.65625 0.0 350.65625 0.0 350.65625 0.0 0.0 0.0 0.0 ] >> /2 << /0 1 /6 [ -2.0 -2.0 ] /11 << /4 -2 /7 false >> >> >> >> ] >> /9 << /0 [ << /0 << /0 (kPredefinedNumericListStyleTag) /6 1 >> >> << /0 << /0 (kPredefinedUppercaseAlphaListStyleTag) /6 2 >> >> << /0 << /0 (kPredefinedLowercaseAlphaListStyleTag) /6 3 >> >> << /0 << /0 (kPredefinedUppercaseRomanNumListStyleTag) /6 4 >> >> << /0 << /0 (kPredefinedLowercaseRomanNumListStyleTag) /6 5 >> >> << /0 << /0 (kPredefinedBulletListStyleTag) /6 6 >> >> ] /1 [ << /0 0 >> << /0 1 >> << /0 2 >> << /0 3 >> << /0 4 >> << /0 5 >> ] >> >> /1 << /0 << /0 << /0 1 /1 [ << /0 ( ) /1 (1) >> << /0 ( ) /1 (6) >> << /0 ( ) /1 (0) >> << /0 ( \)) /1 (5) >> << /0 () /1 (5) >> << /0 (0) /1 (1) >> << /0 () /1 (3) >> ] >> /1 0 /2 0 /3 .583 /4 .333 /5 .583 /6 .333 /7 .7 /8 true /9 [ << /0 0 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 1 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 2 /1 () /2 () /3 ( ) /4 ( ) >> << /0 3 /1 () /2 () /3 ( ) /4 ( ) >> << /0 4 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 5 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 6 /1 () /2 () /3 ( 9) /4 ( :) >> << /0 7 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 8 /1 () /2 () /3 ( ) /4 ( ) >> << /0 9 /1 () /2 () /3 ( ) /4 ( ) >> << /0 10 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 11 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 12 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 13 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 14 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 15 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 16 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 17 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 18 /1 () /2 () /3 ( ) /4 ( ) >> << /0 19 /1 () /2 () /3 ( ) /4 ( ) >> << /0 20 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 21 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 22 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 23 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 24 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 25 /1 () /2 () /3 ( 9) /4 ( :) >> << /0 26 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 27 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 28 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 29 /1 (0) /2 (0) >> << /0 30 /1 (0 ) /2 (0 ) >> << /0 31 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 32 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 33 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 34 /1 () /2 () /3 ( ) /4 ( ) >> << /0 35 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 36 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 37 /1 () /2 () /3 ( ) /4 ( ) >> << /0 38 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 39 /1 () /2 () /3 (<) /4 (>) >> << /0 40 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 41 /1 () /2 () /3 (<) /4 (>) >> << /0 42 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 43 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> << /0 44 /1 () /2 () /3 ( 9) /4 ( :) >> << /0 45 /1 ( ) /2 ( ) /3 ( ) /4 ( ) >> ] /15 << /0 (Hunspell) >> /16 false /17 0 >> /1 [ << /0 << /0 (bundlewrap.org ) /5 << /0 [ << /0 << /0 << /0 () /5 << /0 0 /9 true >> /6 1 >> >> /1 15 >> ] >> /6 << /0 [ << /0 << /0 << /0 () /5 0 /6 << /0 0 /1 183.33333 /4 false /5 770.83319 /53 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 .61176 .61176 .61176 ] >> >> >> >> >> /1 15 >> ] >> /10 << /0 4 /2 true >> >> /1 << /0 [ << /0 0 >> ] /2 [ << /99 /PC /5 0 /6 [ << /99 /F /10 0 /1 [ 0.0 0.0 1576.60938 350.65625 ] /5 2 /6 [ << /99 /R /1 [ 0.0 0.0 1576.60938 350.65625 ] /6 [ << /99 /R /1 [ 0.0 0.0 1576.60938 350.65625 ] /5 2 /6 [ << /99 /L /10 130.1651 /14 -152.80762 /15 39.78246 /0 << /0 [ 0.0 130.1651 ] >> /1 [ 0.0 -152.80762 1576.60938 39.78246 ] /6 [ << /99 /S /15 << /0 15 /2 0 /5 false >> /6 [ << /99 /G /1 [ 0.0 -152.80762 1308.05713 39.78246 ] /5 [ 265 427 350 273 339 279 453 401 237 398 548 362 401 304 3 ] /8 [ 0.0 -22.64252 1308.05713 169.94756 ] /9 [ 0.0 -22.64252 1392.57349 169.94756 ] /11 true /12 -152.80762 /13 39.78246 /20 0 >> ] >> ] >> ] >> ] >> ] >> ] >> ] >> >> ] /2 << /0 2 /1 12.0 /2 false /3 false /4 true /5 0.0 /6 1.0 /7 1.0 /8 0 /9 0.0 /10 0.0 /11 1 /12 0 /13 0 /14 0 /15 0 /16 0 /17 0.0 /18 true /19 false /20 false /21 false /22 false /23 false /24 false /25 false /26 false /27 false /28 false /29 false /30 0 /31 false /32 false /33 false /34 false /35 2 /36 0.0 /37 2 /38 0 /39 0 /40 false /41 2 /42 0 /43 << /0 .5 >> /44 2 /45 2 /46 7 /47 0 /48 0 /49 -1.0 /50 -1.0 /51 0 /52 false /53 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /54 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 0.0 0.0 0.0 ] >> >> /55 << /99 /SimpleBlender >> /56 true /57 false /58 true /59 false /60 false /61 0 /62 0 /63 1.0 /64 4.0 /65 0.0 /66 [ ] /67 [ ] /68 0 /69 0 /70 0 /71 4 /72 0.0 /73 0.0 /74 false /75 false /76 false /77 true /78 true /79 << /99 /SimplePaint /0 << /0 1 /1 [ 1.0 1.0 1.0 0.0 ] >> >> /80 false /81 0 /82 3.0 /83 3.0 /84 false /85 0 /86 << /99 /SimpleCustomFeature >> /87 100.0 /88 true >> /3 << /0 0 /1 0.0 /2 0.0 /3 0.0 /4 0.0 /5 0.0 /6 1 /7 1.2 /8 0 /9 true /10 6 /11 2 /12 2 /13 0 /14 36.0 /15 true /16 .5 /17 [ .8 1.0 1.33 ] /18 [ 0.0 0.0 0.0 ] /19 [ 1.0 1.0 1.0 ] /20 6 /21 false /22 0 /23 true /24 0 /25 0 /27 /nil /26 false /28 /nil /29 false /30 << >> /31 36.0 /32 << >> /33 0 /34 7 /35 0 /36 /nil /37 0 /38 false /39 0 /40 2 >> >>8BIMFMsk 2 $&&'()+)*)**)*)*))+)*))*)*)*))+)*))+)*)*))*)*))+)*))*)*)*))+)*))+)*)*))+)*))+)*)**)*)*))+)*))*)*)*)))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJJHIJGIIHKIJJJKIKIKKHKKHHJIIJJJJHKIKKIKIJJJFFIFJKHKIJJJKHIHGHGIIJKIKIJKLIHJGJJHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJKKILJKKIKIJKLJHKIKKHONOORRRUQTTQSRRSRTPSSVPPSOTUQTRTUTUQSQQRORSTTUURTTVTQTRTTRTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHHJIHHHJJIIHKIJIIHHJIHIHKHIHHGGIGGHGIHHGGIHFFFFGJGJEIJIKEIGIJFJEIIHKEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRUURUOUTUVOTPUVPTQTTUVORRUTPUPTTQOGH>>>:==>=<<;>??>:===><=;<>>><????<==>?>><?:431222322123431333322123331334312223221234312333231234313333122232213343122232312343133331222122/.- ....6666666666666666666666666666666666-~4 V6O//////0///1/0/1//109;:>E./000101457A $&&'()+)*)**)*)*))+)*))*)*)*))+)*))+)*)*))*)*))+)*))*)*)*))+)*))+)*)*))+)*))+)*)**)*)*))+)*))*)*)*)))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJJHIJGIIHKIJJJKIKIKKHKKHHJIIJJJJHKIKKIKIJJJFFIFJKHKIJJJKHIHGHGIIJKIKIJKLIHJGJJHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJKKILJKKIKIJKLJHKIKKHONOORRRUQTTQSRRSRTPSSVPPSOTUQTRTUTUQSQQRORSTTUURTTVTQTRTTRTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHHJIHHHJJIIHKIJIIHHJIHIHKHIHHGGIGGHGIHHGGIHFFFFGJGJEIJIKEIGIJFJEIIHKEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRUURUOUTUVOTPUVPTQTTUVORRUTPUPTTQOGH>>>:==>=<<;>??>:===><=;<>>><????<==>?>><?:431222322123431333322123331334312223221234312333231234313333122232213343122232312343133331222122/.- ....6666666666666666666666666666666666-~4 V6O//////0///1/0/1//109;:>E./000101457A $&&'()+)*)**)*)*))+)*))*)*)*))+)*))+)*)*))*)*))+)*))*)*)*))+)*))+)*)*))+)*))+)*)**)*)*))+)*))*)*)*)))(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((IIHIHIIHHIIIIHGIIIIHJJHIJGIIHKIJJJKIKIKKHKKHHJIIJJJJHKIKKIKIJJJFFIFJKHKIJJJKHIHGHGIIJKIKIJKLIHJGJJHKIJJJKIKIKKIKJJJJKIJKLKJLJKKIKIJKJJHKIKKHKIJJJKIJJKKILJKKIKIJKLJHKIKKHONOORRRUQTTQSRRSRTPSSVPPSOTUQTRTUTUQSQQRORSTTUURTTVTQTRTTRTRTUTTQTRUUQTRTTTURTRUURTSQQMHIHKIIIHGHJHHIHJIIIIHHJIHHHJJIIHKIJIIHHJIHIHKHIHHGGIGGHGIHHGGIHFFFFGJGJEIJIKEIGIJFJEIIHKEJHIKEJGHIIKEIJIKFJEIJFJEIJHKEIHIKEJGIIIKEIIIKEJGIJMQNUTVVOTRUURUOUTUVOTPUVPTQTTUVORRUTPUPTTQOGH>>>:==>=<<;>??>:===><=;<>>><????<==>?>><?:431222322123431333322123331334312223221234312333231234313333122232213343122232312343133331222122/.- ....6666666666666666666666666666666666-~4 V6O//////0///1/0/1//109;:>E./000101457A 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7ֵER7&] \Bq$H۳{Ta #hE:}:GƱkd\~.ϰ}AF<2_ bIpBۮ M] -`L<w3IҬr_`&ƫt??>=c eQ s ;٩GW 3^ XBr'IڧxXc !iD<:EifZy2Ҥ}DL ;-^ bHpEܢ} R` )dG<z5I͠oa_*ʟx@ϼA=9c ePq?ڝHZ 2 `T@r-Kٛx[c$šoAʷ=<ChsfWt7՘sDڵQ8)_d ]ӴEo!Hۖ}Sb %h _E<z8Hǔo|c^~,̓|j@ҰE=5ai cݯKnAّ}N\ .b^ O=t1Jԏu~_`%Îtc@ͫ>=?fo fS s9֌wEݩT 6"_a YѨAr'Hي|뀁Wa !"h^C;~;GjwdZ{2ч}mAդI ;0^f aڣHpF܅ N^ + b]I;y4JЃq~ `_(ɂvg@ϟ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʚ=<CgufYx4rC٘O9*]f ^֗Fp F|Ra (e _E={8Hn|b_+yk@ѓB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@ˎ>=@gr fV s8uEیT7$_b \ҋBo$H}퀞Ta #j^F;|9Fmxe]{/~jCՇH :2`h dۆKpB O_ +b^M=w2Ir} aa&udA΂A>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6Jp{a^,zh@?=9bk dNo>}IY 1^a S>s.Jv\a"nbA<=Bft fUv6uDQ8']e ]Eq#G|Tb $g _E<}9Gmzc^~-{k@E>4_k cLpA L^ /`` M<v2Is}^`%sd>>?>do eR s9xFU 5!_c YAr(H|Wa   j`B;;EixdYz1}oBJ :.^h aGpDܿ~ P_ (c]J=y4Iνo| b`)ɼwfBA<:am fQr =ٺzJZ 3^` VAt+Jٸw\c"m`C=<DfsfYv6ԵrCP9*]d ^Ep Gܳ~Ra & g ]E=y8Iȱozb^~-Ͱ{h@C=7aj dMoBڮ}K] / a^ P?t0Kլu]b&īrd@==@fs fTs8թwES 7$_d [Bo%H٧|Va "k`D;|:FålydZz/Τ~kAG ;1`g bInCڢ N^ ,b]J;z4IϠr| `_'Ɵue?μ?=<bn eP r<םyGW 4^` W?r*JۛxYa"j`Bʷ;<EhweXx4՘qCصM 9+^g _״EpEۖ}R` 'd _E;z7I˔n}b]+˓yj@аA=8`m dOq?ڑ~J[ 0^` Q@u/Jُv\b#Îob@̫>=Aet fVu7֌vEܩS8&]Ic ^ӨDq#Hۊ{%Tb $h>^E<}9Gƈk9xc]~.χ}!i@ҤF<2aHi cܣKpBۅ  }L^ -bE^ M>u2I҃t1} ^a'Ƃu.d@̟@=?eDo eT q<xFޝW 5!_Ia ZϜBp(Hz)ꀍYc  k:^Dʚ<}:Ek=vfZy1كnDטL ;.`Jf bؗHpD} "R` )dD]G;z5Jo6| a^+x*܈g@ϓ?=:cGm eOq? Ȋ{HY 2 `I_ T?r-Kx-[a$o7ڍ`Aˎ;<ChBsfVt7 ՏsDڌO8)]Kd ]ՋCp!H|&S` %f @_E;{8Hm8|c\,z$ٔj@҇C=5`Hk c߆KpAÖL\ .`F` O=v1Js/]b%r1ڙd@̂>=?dCr fU t9ϛxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۠mAI <0^Jh `JpC "M^ ,dE]H;y5Js4~ `_(w+ۥg@?<;dEn eOq =˧yIX 2`I` W?s*Iy+[a!k8ڪ`C=;Cg?tgYx 4٬pEO8*]Je `FpF|#Ta ' e A]H={7Jn7{c_,y&۱iAB=8`Hk eNpAƳ|J\ 0 `G_ Q?u/Kw.\b%q2۶b@>=BgBr fVs9ҸuET7&_Ib \Do$I}'Ta #!j?^E:|:Hm<xd\{0~!ؽkAF<2`Hj bInBڿ  M] -bE_ L<u3Iнt1 _`&żv.f??>=cDp eQ q ;׺zGW 4^Jb XBr)Iظx+Xc !i:;<Ei?Yy2ѵ} K :-^KFpEܳ} $_ (d C;z6Iͱo7^*ʰx)?=9aHNq>ڮY 1^H>t.J٬v,a"«n5<=BfAUv6թQ8']JEq#Gۧ|%b #g ?<}8Gǥm9^~-Τ{"E=4_HLpAۢ  ^ .`F>v1IԠs0a%şs0@=>dCT s9؝V 6!_JCr'Jڛ|(c !"j;=};Gk=[y2јK ;0`IInFܖ !` + dC=w4JΔr4`(ȓx*A=;cFQp =ב Z 3`J?q+I؏y,a!m8;<ChAWv4Ҍ N9*]KDp Fڊ~%` & g B;y8IȈn9]~-͇y&A=7`ILpBڅ![ / `H=u0KՃt0`&Ăp2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|؃H ;1_IKpC !_ ,bE=w4Ir2a'u-܈A?<bER r<͊X 5^IAr)Ix*c !k9ڍ<bDj>vxz4ُo i+_JeniE "}f B]x^ Ip6{~+z'۔hc8bGkqo=ǖ|w^G_ uaJv-뀗"o4ۙ`^BeAruw8ԛtm']IcqfI{% h >^}]Hk9x~~ /}!ڠig4aJipmC "¢}z bG^ u`Kt2} 'u0ܥd`?eFo qs< Χxr!_JcpdHz(낶 i;`}]Ei=xzz1}جo h.^IhpjD~ !}cC_y^ Io4} )w*ڱhd:aFors= ȳ|w^IatdJw-킴"m7ڶ``BfBsvy5 ոsl)]KdpjF~&g @_{_Hm8|~ ,z$۽jf5`HkppAڿĿ{`F` vbJսs/%ļr1b`?dCq tv9׺vo"]IbqeH۸z' h<^~]Fj<w{{0е}k h1^IgpkD۳ !|dD]y_ Kϱs4| )ɰw+e`=dEn qr>ٮyt!`I`qcJڬy+"l8`^Ej=wv9x4ԩq 9!i+_JgoJiEڧ~"'"}f B_y7G^ Iɥp8}~2} +ʤ{'i=0d8bIkoFo?٢ |0 x`H_ u/JcJנv/(#Ÿo4b=;`AeCru=x7՝ u7o&]Kbq$IhIۛ{'#!!h?^}:C_Hƙk<x~4} 0ј|!k<*h2_HjpFoBۖ  - |`E_ w3IaIҔr1 -&Ɠt.f>7`=cDp s Bs;ّz4 s^Ibr)KdIڏx)&!i:`<@_Ei=wy8|2Ҍ}p ;$j-^JgpHkE܊} ")d B]z5F` I͈o5| /*ʇx)g=1b9cFmqCq>څ{2v`H_ r-IbJكx,'"‚o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"ۃ=+aHE Å.`F1Is0+s1ۈ=8dD=ϊ6_K'J|)!"j=;Bk>8!ڏ;'ہ`JI "+ ƃbE4Hq4/v+ڔ=4ۆbE Cɖ3 ҈^I+Kw+'k8ٙ<?g?<כ9!؍]J H|#( e A8En71y&۠>.ڒ`ID!Ƣ1̔`H0Iw0)q2ۥ=:gB =ҧ6ٙ_I$J}!"i9Bk5|:)ܞ_F +Ƞb2Hr,u>5ڣb A4ե^)Jx%k<?j9:"۪_H ( ìf 6Fp0z=1ۯbD1ϱ`.Kw)p==g>8!ڶ]#J|$"g 9Em4{<+ڻ_E -ɽ`2Is+s=8d?5](Jy #i;Ai7};&^I~ )!c5Ho0w<2a B3^+Iw&m<=f;9] I~&!g 8Eo2{=-aE/a0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%PjzPjzPjzPjzPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPP7QhyvfR9 :Qamw xm_H)-K_q~}nY@jzP ,Lbs}o\F$  Tfp{ ~sbH) +BVfq|~p]F!*I_p} |o^H)  jzP5`|\.[htS{RA]tw?]v~^4BbyzY(>ee@A]twFkhB P)X|^!PF BiY L{e:jzP3jg)!{ &SsNpd)Hnߜd)HxyJ &SsYUP>lݜ}DPF)^w5*lޜxNjzPNE,7 J~Jq2fٜ|:WٜJ"iݜk(JqHݜ}EP:o؜JPF%^~1-x؜}M jzPND[_eDvR4c&fԜ.=xԜR%s؜v+4c \לUPӜ5PF Rl pԜzP3~%!p Doݜ?~Мk 9ҜCj՜n Doݜ \ӜTPĜk PF,l.MМzPgUA~Evڜ:̜2 oМzEќJEvܜJϜ0Pœ.PF:~ޜG tΜzP#v [WT4oR;sלvɜLϜN o͜r ;sܜ!}̜ZPGPFAܜM"͜zP<ߜ!zd^ԜƜU͜s  ˜!^ܜNɜqPXPFNڜF/̜zPHޜ9~PҜŜV̜".ɜ.Pܜ tƜuP^PFPڜ/5˜zPMݜ[MA!iRPҜŜDa˜15ǜ1Pܜ"ŜkPWPFPڜ -ʜzPDܜ!xlWPҜĜ"ʜ=-Ŝ'Pܜ0ĜKPzgXH50%")0:I\lFPFP ~ocUH=2+")02-"".2ASbtzP2mZK>0)""%07HUh|t~/~P {k^PD;0'")5BSfxreWJ<00",2@M\p^wj]OA:0/""'08ATfv6sbQ?3."",070)""03 5`,PFP{bI)-X2nB/WwzP~W+NvQ[D*bRPxbH)vZ9-YNnK, Ai*l@*S~PxbH)#eA6`kPsG .o PFPlKbjD6fzP~^ S#!p\HPlS4 vO`W"FYDePN[^Pi~PFPvf4SzPvcR hJ8PAx.M&#f~<`P&jPx%uXPFPs_& bzP6_E^rbf{PKxfPL ~{"n%Pow <Pv PFPtjzP~rM X-/pTNPp P >rhnP*uWYP\PFP(,jzP4j[`5E%~P.P"u.!PvjP!PFPQPoPjzPv7cm Y{PkBP<Ak MkP$qPNMPFPpxjzPv<DX AzMNP]Pr )xPkHxxP~PFPPYjzP\+~{WQxP{nP*P]WP |qzP."PFP`jzPz9RxL {PM~PbjdPM#jzPr^PFPv*) jzP)DRXyPuDNPPL)P|qjzP,PFPPA__jzP\\{N]qPPl>Sf`P &jzPPFP)QjzPr" x>)<{PP'kx\"PJxjzPINPFP`tjzP G>X j]j<NPsP|PpAjzP pPFPjDDjzP;ndD+gkPjPW&0~PMPjzPvPFP}pfjzP_Sq(8+{PePSWRnPhjzPNPFPurxjzPxq3XYj]3NPbPqqPG<jzP!:PFPrxXjzPY %:<qePbPhc PfjzPUPFPjz0jzP @BiFxPbP@ oA5P|yjzPwjPFPjz<jzP/9""YXHu P)KP?Ynz xm\H1 bP2%OPP]jzPayPFPjzOjzPGќm.qM{x^ P"Suu\5bPMB1gP <jzPJPFPjz^ujzPVќ`SxPXyTbPxdS ~wP- jzP.PFPjzl`jzPdќX2@KP)pܜ~ObPds_kPDjzPPFPjzxSjzPoќ!bZlW Pt֜dbPQmXPWjzPPFPjzCjzP}ќX&]xP]ӜibP<xvKPemjzP"PFPjz5jzPќU*/KP!ϜP"k=Po\jzP,PFPjz#jzPќ(Pg`O P:P _+"PzPjzP0PFPjz"jzPќ0Ox8hzxPEPX'PBjzP9PFPjzjzPќ0Uy: KP?P%P0P5jzP<PFPjzjzPќ0 <sSH P)P0P4P%jzP<PFPjzjzPќ3EiIrztxPP:H<P"jzP;PFPjzjzPќ<UqG KPe{jXJ<0'".0PFPjzejjzPp, - Pk>bPqzQPBjzPSPFPjzX{jzPe"0pV`vPfbP"f`PxSjzPhpPFPjzEjzPU"UY`Fi}HPbP:Xs|Pl`jzP{`PFPjz0jzPBC ; " P=abPWJnP^ujzPMPFPjz.jzP%]{GYvP\bPou2<[POjzP0F DPjzQjzPvUPNTsrHPmbPbZ%BP<jzPHC%/<PjzmjzPyvH P{lbPHxPP0jzPn<7A6PjzhjzPq-~  7RvPWbPG'|oPXjzPs4OW.PjzK#jzPV\RF:b|f|HPFbPmd*{PxjzP*WfmPjz$TjzP/rg0T P=bP}B^.\PfjzPb.{PjzjzP)~$KvP<bPF[d3PDDjzPPjztjzPxeR<"mYvHPFbPy/'PtjzP<v-:|PjzP\jzPP jVA_ PRbP+dj?sPjzPzKh\edPjzjzP]~|-BvPibPuj6|DP__jzP-JDPjzKjzP~R0 yKpHPbP.5[,P) jzPwx +:PjzUjzPPgbDPju P bPj{mP_jzP9DpzPjzOjzP 2~n<:tPLbPTX.kC0PYjzPn!1jPjzt jzPj R"<iDPrbP(9PxjzP_\>w9Pjz5jjzP"wZ.^siP_EfPfLxDPpPjzP;EWPjzBjzPrg~^,I1tPDnP~bP),jzP.^d"2_Pjz<.mzP#cHWR )cDP~P{_SuUAPtjzPR3 &Pjz},zPim9jQj}\PwP~ Px\~P&pzHMJo"2jPjz'>NwPDj~zL>U&tP_!hsPQ-APC q'Pf4<z>~ v{FWPjz`e[kPHMWjRyfDP"UA\PU ]qPzM4\P [Nz'5bcu"1^PjzEHTPs; HzjvPzKN7Pf(F P2!qP@:2gz}G2k;m.:wPjz"P 8i$P I'X}j~M`tP5_"@PV S.PJ}C5r!PjD;fzx`:DjT3`3:e.Pjz<vN:^~P)tH@`{jR.DPwa7.SrlPgD$P{HPmoB :f?PnB@`|zDm_SC80.""%05O`qUPraO>2-"",0?M\lz\oΜkPjzJ˜~PEݜj|[jtPIsPɜ^PŜ`P-ÜzAœNyϜuPjz>̜PJޜjN'<DPePʜ]PǜcP5ĜzpĜ4wМuPjz'͜.P>ߜjPnEP̜PPɜ[P0ŜzƜzkќgPjz rΜ)P+j{hutPfœWP͜8P z̜FP#Ɯz |ɜPQӜNPjzJМuPvjN:JDPKƜTPΜ|P^Ϝ)Ptǜz`̜u,Ԝ)Pjzsќ>PMWP!}Ȝ9PМRP6ҜhPMɜz5Ϝ:\֜XPjz9ӜNPme{s~rPA̜e P՜mP^՜2PpԜzKӜBjٜiPjzF֜t:P)v}M NJVAPNМcPVٜq&PjٜJP.xל[jzP>qb%P M{lEjz'OknJ6a`5PjzQu|cDP Dhv`ANXaAPOoiQ.PDh{]2P5^}xW'P ChfDjz 4M]hu~teS6"F^myxl]D"  B[k{ zm^N32Nct{rdSA)7Pfv vl`TA)P.MdxxjXA!DZlz{mZB-K`q~~o[B#jzPjzPjzPkzPrzPvzPsPpPiP4\PYHP2P P[PkPTDPPruPP>P7P=SP0c2^P0m@GFP0{X,XP0sQ, ?b!P0vdTI:0*"-06DQ]l~MP0ǜqP0ȜP0ɜP0ʜP0˜}P0͜^P0Μ,P&ќEP'Sr؜x>PKnߜ~UP=\w}b> P6Qes xnbP5 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7ֵER7&] \Bq$H۳{Ta #hE:}:GƱkd\~.ϰ}AF<2_ bIpBۮ M] -`L<w3IҬr_`&ƫt??>=c eQ s ;٩GW 3^ XBr'IڧxXc !iD<:EifZy2Ҥ}DL ;-^ bHpEܢ} R` )dG<z5I͠oa_*ʟx@ϼA=9c ePq?ڝHZ 2 `T@r-Kٛx[c$šoAʷ=<ChsfWt7՘sDڵQ8)_d ]ӴEo!Hۖ}Sb %h _E<z8Hǔo|c^~,̓|j@ҰE=5ai cݯKnAّ}N\ .b^ O=t1Jԏu~_`%Îtc@ͫ>=?fo fS s9֌wEݩT 6"_a YѨAr'Hي|뀁Wa !"h^C;~;GjwdZ{2ч}mAդI ;0^f aڣHpF܅ N^ + b]I;y4JЃq~ `_(ɂvg@ϟ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʚ=<CgufYx4rC٘O9*]f ^֗Fp F|Ra (e _E={8Hn|b_+yk@ѓB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@ˎ>=@gr fV s8uEیT7$_b \ҋBo$H}퀞Ta #j^F;|9Fmxe]{/~jCՇH :2`h dۆKpB O_ +b^M=w2Ir} aa&udA΂A>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6Jp{a^,zh@?=9bk dNo>}IY 1^a S>s.Jv\a"nbA<=Bft fUv6uDQ8']e ]Eq#G|Tb $g _E<}9Gmzc^~-{k@E>4_k cLpA L^ /`` M<v2Is}^`%sd>>?>do eR s9xFU 5!_c YAr(H|Wa   j`B;;EixdYz1}oBJ :.^h aGpDܿ~ P_ (c]J=y4Iνo| b`)ɼwfBA<:am fQr =ٺzJZ 3^` VAt+Jٸw\c"m`C=<DfsfYv6ԵrCP9*]d ^Ep Gܳ~Ra & g ]E=y8Iȱozb^~-Ͱ{h@C=7aj dMoBڮ}K] / a^ P?t0Kլu]b&īrd@==@fs fTs8թwES 7$_d [Bo%H٧|Va "k`D;|:FålydZz/Τ~kAG ;1`g bInCڢ N^ ,b]J;z4IϠr| `_'Ɵue?μ?=<bn eP r<םyGW 4^` W?r*JۛxYa"j`Bʷ;<EhweXx4՘qCصM 9+^g _״EpEۖ}R` 'd _E;z7I˔n}b]+˓yj@аA=8`m dOq?ڑ~J[ 0^` Q@u/Jُv\b#Îob@̫>=Aet fVu7֌vEܩS8&]Ic ^ӨDq#Hۊ{%Tb $h>^E<}9Gƈk9xc]~.χ}!i@ҤF<2aHi cܣKpBۅ  }L^ -bE^ M>u2I҃t1} ^a'Ƃu.d@̟@=?eDo eT q<xFޝW 5!_Ia ZϜBp(Hz)ꀍYc  k:^Dʚ<}:Ek=vfZy1كnDטL ;.`Jf bؗHpD} "R` )dD]G;z5Jo6| a^+x*܈g@ϓ?=:cGm eOq? Ȋ{HY 2 `I_ T?r-Kx-[a$o7ڍ`Aˎ;<ChBsfVt7 ՏsDڌO8)]Kd ]ՋCp!H|&S` %f @_E;{8Hm8|c\,z$ٔj@҇C=5`Hk c߆KpAÖL\ .`F` O=v1Js/]b%r1ڙd@̂>=?dCr fU t9ϛxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۠mAI <0^Jh `JpC "M^ ,dE]H;y5Js4~ `_(w+ۥg@?<;dEn eOq =˧yIX 2`I` W?s*Iy+[a!k8ڪ`C=;Cg?tgYx 4٬pEO8*]Je `FpF|#Ta ' e A]H={7Jn7{c_,y&۱iAB=8`Hk eNpAƳ|J\ 0 `G_ Q?u/Kw.\b%q2۶b@>=BgBr fVs9ҸuET7&_Ib \Do$I}'Ta #!j?^E:|:Hm<xd\{0~!ؽkAF<2`Hj bInBڿ  M] -bE_ L<u3Iнt1 _`&żv.f??>=cDp eQ q ;׺zGW 4^Jb XBr)Iظx+Xc !i:;<Ei?Yy2ѵ} K :-^KFpEܳ} $_ (d C;z6Iͱo7^*ʰx)?=9aHNq>ڮY 1^H>t.J٬v,a"«n5<=BfAUv6թQ8']JEq#Gۧ|%b #g ?<}8Gǥm9^~-Τ{"E=4_HLpAۢ  ^ .`F>v1IԠs0a%şs0@=>dCT s9؝V 6!_JCr'Jڛ|(c !"j;=};Gk=[y2јK ;0`IInFܖ !` + dC=w4JΔr4`(ȓx*A=;cFQp =ב Z 3`J?q+I؏y,a!m8;<ChAWv4Ҍ N9*]KDp Fڊ~%` & g B;y8IȈn9]~-͇y&A=7`ILpBڅ![ / `H=u0KՃt0`&Ăp2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|؃H ;1_IKpC !_ ,bE=w4Ir2a'u-܈A?<bER r<͊X 5^IAr)Ix*c !k9ڍ<bDj>vxz4ُo i+_JeniE "}f B]x^ Ip6{~+z'۔hc8bGkqo=ǖ|w^G_ uaJv-뀗"o4ۙ`^BeAruw8ԛtm']IcqfI{% h >^}]Hk9x~~ /}!ڠig4aJipmC "¢}z bG^ u`Kt2} 'u0ܥd`?eFo qs< Χxr!_JcpdHz(낶 i;`}]Ei=xzz1}جo h.^IhpjD~ !}cC_y^ Io4} )w*ڱhd:aFors= ȳ|w^IatdJw-킴"m7ڶ``BfBsvy5 ոsl)]KdpjF~&g @_{_Hm8|~ ,z$۽jf5`HkppAڿĿ{`F` vbJսs/%ļr1b`?dCq tv9׺vo"]IbqeH۸z' h<^~]Fj<w{{0е}k h1^IgpkD۳ !|dD]y_ Kϱs4| )ɰw+e`=dEn qr>ٮyt!`I`qcJڬy+"l8`^Ej=wv9x4ԩq 9!i+_JgoJiEڧ~"'"}f B_y7G^ Iɥp8}~2} +ʤ{'i=0d8bIkoFo?٢ |0 x`H_ u/JcJנv/(#Ÿo4b=;`AeCru=x7՝ u7o&]Kbq$IhIۛ{'#!!h?^}:C_Hƙk<x~4} 0ј|!k<*h2_HjpFoBۖ  - |`E_ w3IaIҔr1 -&Ɠt.f>7`=cDp s Bs;ّz4 s^Ibr)KdIڏx)&!i:`<@_Ei=wy8|2Ҍ}p ;$j-^JgpHkE܊} ")d B]z5F` I͈o5| /*ʇx)g=1b9cFmqCq>څ{2v`H_ r-IbJكx,'"‚o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"ۃ=+aHE Å.`F1Is0+s1ۈ=8dD=ϊ6_K'J|)!"j=;Bk>8!ڏ;'ہ`JI "+ ƃbE4Hq4/v+ڔ=4ۆbE Cɖ3 ҈^I+Kw+'k8ٙ<?g?<כ9!؍]J H|#( e A8En71y&۠>.ڒ`ID!Ƣ1̔`H0Iw0)q2ۥ=:gB =ҧ6ٙ_I$J}!"i9Bk5|:)ܞ_F +Ƞb2Hr,u>5ڣb A4ե^)Jx%k<?j9:"۪_H ( ìf 6Fp0z=1ۯbD1ϱ`.Kw)p==g>8!ڶ]#J|$"g 9Em4{<+ڻ_E -ɽ`2Is+s=8d?5](Jy #i;Ai7};&^I~ )!c5Ho0w<2a B3^+Iw&m<=f;9] I~&!g 8Eo2{=-aE/a0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%PjzPjzPjzPjzPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPP7QhyvfR9 :Qamw xm_H)-K_q~}nY@jzP ,Lbs}o\F$  Tfp{ ~sbH) +BVfq|~p]F!*I_p} |o^H)  jzP5`|\.[htS{RA]tw?]v~^4BbyzY(>ee@A]twFkhB P)X|^!PF BiY L{e:jzP3jg)!{ &SsNpd)Hnߜd)HxyJ &SsYUP>lݜ}DPF)^w5*lޜxNjzPNE,7 J~Jq2fٜ|:WٜJ"iݜk(JqHݜ}EP:o؜JPF%^~1-x؜}M jzPND[_eDvR4c&fԜ.=xԜR%s؜v+4c \לUPӜ5PF Rl pԜzP3~%!p Doݜ?~Мk 9ҜCj՜n Doݜ \ӜTPĜk PF,l.MМzPgUA~Evڜ:̜2 oМzEќJEvܜJϜ0Pœ.PF:~ޜG tΜzP#v [WT4oR;sלvɜLϜN o͜r ;sܜ!}̜ZPGPFAܜM"͜zP<ߜ!zd^ԜƜU͜s  ˜!^ܜNɜqPXPFNڜF/̜zPHޜ9~PҜŜV̜".ɜ.Pܜ tƜuP^PFPڜ/5˜zPMݜ[MA!iRPҜŜDa˜15ǜ1Pܜ"ŜkPWPFPڜ -ʜzPDܜ!xlWPҜĜ"ʜ=-Ŝ'Pܜ0ĜKPzgXH50%")0:I\lFPFP ~ocUH=2+")02-"".2ASbtzP2mZK>0)""%07HUh|t~/~P {k^PD;0'")5BSfxreWJ<00",2@M\p^wj]OA:0/""'08ATfv6sbQ?3."",070)""03 5`,PFP{bI)-X2nB/WwzP~W+NvQ[D*bRPxbH)vZ9-YNnK, Ai*l@*S~PxbH)#eA6`kPsG .o PFPlKbjD6fzP~^ S#!p\HPlS4 vO`W"FYDePN[^Pi~PFPvf4SzPvcR hJ8PAx.M&#f~<`P&jPx%uXPFPs_& bzP6_E^rbf{PKxfPL ~{"n%Pow <Pv PFPtjzP~rM X-/pTNPp P >rhnP*uWYP\PFP(,jzP4j[`5E%~P.P"u.!PvjP!PFPQPoPjzPv7cm Y{PkBP<Ak MkP$qPNMPFPpxjzPv<DX AzMNP]Pr )xPkHxxP~PFPPYjzP\+~{WQxP{nP*P]WP |qzP."PFP`jzPz9RxL {PM~PbjdPM#jzPr^PFPv*) jzP)DRXyPuDNPPL)P|qjzP,PFPPA__jzP\\{N]qPPl>Sf`P &jzPPFP)QjzPr" x>)<{PP'kx\"PJxjzPINPFP`tjzP G>X j]j<NPsP|PpAjzP pPFPjDDjzP;ndD+gkPjPW&0~PMPjzPvPFP}pfjzP_Sq(8+{PePSWRnPhjzPNPFPurxjzPxq3XYj]3NPbPqqPG<jzP!:PFPrxXjzPY %:<qePbPhc PfjzPUPFPjz0jzP @BiFxPbP@ oA5P|yjzPwjPFPjz<jzP/9""YXHu P)KP?Ynz xm\H1 bP2%OPP]jzPayPFPjzOjzPGќm.qM{x^ P"Suu\5bPMB1gP <jzPJPFPjz^ujzPVќ`SxPXyTbPxdS ~wP- jzP.PFPjzl`jzPdќX2@KP)pܜ~ObPds_kPDjzPPFPjzxSjzPoќ!bZlW Pt֜dbPQmXPWjzPPFPjzCjzP}ќX&]xP]ӜibP<xvKPemjzP"PFPjz5jzPќU*/KP!ϜP"k=Po\jzP,PFPjz#jzPќ(Pg`O P:P _+"PzPjzP0PFPjz"jzPќ0Ox8hzxPEPX'PBjzP9PFPjzjzPќ0Uy: KP?P%P0P5jzP<PFPjzjzPќ0 <sSH P)P0P4P%jzP<PFPjzjzPќ3EiIrztxPP:H<P"jzP;PFPjzjzPќ<UqG KPe{jXJ<0'".0PFPjzejjzPp, - Pk>bPqzQPBjzPSPFPjzX{jzPe"0pV`vPfbP"f`PxSjzPhpPFPjzEjzPU"UY`Fi}HPbP:Xs|Pl`jzP{`PFPjz0jzPBC ; " P=abPWJnP^ujzPMPFPjz.jzP%]{GYvP\bPou2<[POjzP0F DPjzQjzPvUPNTsrHPmbPbZ%BP<jzPHC%/<PjzmjzPyvH P{lbPHxPP0jzPn<7A6PjzhjzPq-~  7RvPWbPG'|oPXjzPs4OW.PjzK#jzPV\RF:b|f|HPFbPmd*{PxjzP*WfmPjz$TjzP/rg0T P=bP}B^.\PfjzPb.{PjzjzP)~$KvP<bPF[d3PDDjzPPjztjzPxeR<"mYvHPFbPy/'PtjzP<v-:|PjzP\jzPP jVA_ PRbP+dj?sPjzPzKh\edPjzjzP]~|-BvPibPuj6|DP__jzP-JDPjzKjzP~R0 yKpHPbP.5[,P) jzPwx +:PjzUjzPPgbDPju P bPj{mP_jzP9DpzPjzOjzP 2~n<:tPLbPTX.kC0PYjzPn!1jPjzt jzPj R"<iDPrbP(9PxjzP_\>w9Pjz5jjzP"wZ.^siP_EfPfLxDPpPjzP;EWPjzBjzPrg~^,I1tPDnP~bP),jzP.^d"2_Pjz<.mzP#cHWR )cDP~P{_SuUAPtjzPR3 &Pjz},zPim9jQj}\PwP~ Px\~P&pzHMJo"2jPjz'>NwPDj~zL>U&tP_!hsPQ-APC q'Pf4<z>~ v{FWPjz`e[kPHMWjRyfDP"UA\PU ]qPzM4\P [Nz'5bcu"1^PjzEHTPs; HzjvPzKN7Pf(F P2!qP@:2gz}G2k;m.:wPjz"P 8i$P I'X}j~M`tP5_"@PV S.PJ}C5r!PjD;fzx`:DjT3`3:e.Pjz<vN:^~P)tH@`{jR.DPwa7.SrlPgD$P{HPmoB :f?PnB@`|zDm_SC80.""%05O`qUPraO>2-"",0?M\lz\oΜkPjzJ˜~PEݜj|[jtPIsPɜ^PŜ`P-ÜzAœNyϜuPjz>̜PJޜjN'<DPePʜ]PǜcP5ĜzpĜ4wМuPjz'͜.P>ߜjPnEP̜PPɜ[P0ŜzƜzkќgPjz rΜ)P+j{hutPfœWP͜8P z̜FP#Ɯz |ɜPQӜNPjzJМuPvjN:JDPKƜTPΜ|P^Ϝ)Ptǜz`̜u,Ԝ)Pjzsќ>PMWP!}Ȝ9PМRP6ҜhPMɜz5Ϝ:\֜XPjz9ӜNPme{s~rPA̜e P՜mP^՜2PpԜzKӜBjٜiPjzF֜t:P)v}M NJVAPNМcPVٜq&PjٜJP.xל[jzP>qb%P M{lEjz'OknJ6a`5PjzQu|cDP Dhv`ANXaAPOoiQ.PDh{]2P5^}xW'P ChfDjz 4M]hu~teS6"F^myxl]D"  B[k{ zm^N32Nct{rdSA)7Pfv vl`TA)P.MdxxjXA!DZlz{mZB-K`q~~o[B#jzPjzPjzPkzPrzPvzPsPpPiP4\PYHP2P P[PkPTDPPruPP>P7P=SP0c2^P0m@GFP0{X,XP0sQ, ?b!P0vdTI:0*"-06DQ]l~MP0ǜqP0ȜP0ɜP0ʜP0˜}P0͜^P0Μ,P&ќEP'Sr؜x>PKnߜ~UP=\w}b> P6Qes xnbP5 337&,8 /#56#/8-&6527)* 8 1!46$.8.%63/6+*9þ 3 5Ź8&,´8 /#5í5!/è8,'7á fUu7ֵER7&] \Bq$H۳{Ta #hE:}:GƱkd\~.ϰ}AF<2_ bIpBۮ M] -`L<w3IҬr_`&ƫt??>=c eQ s ;٩GW 3^ XBr'IڧxXc !iD<:EifZy2Ҥ}DL ;-^ bHpEܢ} R` )dG<z5I͠oa_*ʟx@ϼA=9c ePq?ڝHZ 2 `T@r-Kٛx[c$šoAʷ=<ChsfWt7՘sDڵQ8)_d ]ӴEo!Hۖ}Sb %h _E<z8Hǔo|c^~,̓|j@ҰE=5ai cݯKnAّ}N\ .b^ O=t1Jԏu~_`%Îtc@ͫ>=?fo fS s9֌wEݩT 6"_a YѨAr'Hي|뀁Wa !"h^C;~;GjwdZ{2ч}mAդI ;0^f aڣHpF܅ N^ + b]I;y4JЃq~ `_(ɂvg@ϟ?=;bo eOr ={HX 3^a U?s+IwYa!kaAʚ=<CgufYx4rC٘O9*]f ^֗Fp F|Ra (e _E={8Hn|b_+yk@ѓB>6`l dNp@~I\ 0`` O?u/Jw\b$qb@ˎ>=@gr fV s8uEیT7$_b \ҋBo$H}퀞Ta #j^F;|9Fmxe]{/~jCՇH :2`h dۆKpB O_ +b^M=w2Ir} aa&udA΂A>>bn gR r =yIX 4 ^` XAr)JxXc #k^B<<EjveYx4oBK :-_e `FnF Q_ (f ]F;x6Jp{a^,zh@?=9bk dNo>}IY 1^a S>s.Jv\a"nbA<=Bft fUv6uDQ8']e ]Eq#G|Tb $g _E<}9Gmzc^~-{k@E>4_k cLpA L^ /`` M<v2Is}^`%sd>>?>do eR s9xFU 5!_c YAr(H|Wa   j`B;;EixdYz1}oBJ :.^h aGpDܿ~ P_ (c]J=y4Iνo| b`)ɼwfBA<:am fQr =ٺzJZ 3^` VAt+Jٸw\c"m`C=<DfsfYv6ԵrCP9*]d ^Ep Gܳ~Ra & g ]E=y8Iȱozb^~-Ͱ{h@C=7aj dMoBڮ}K] / a^ P?t0Kլu]b&īrd@==@fs fTs8թwES 7$_d [Bo%H٧|Va "k`D;|:FålydZz/Τ~kAG ;1`g bInCڢ N^ ,b]J;z4IϠr| `_'Ɵue?μ?=<bn eP r<םyGW 4^` W?r*JۛxYa"j`Bʷ;<EhweXx4՘qCصM 9+^g _״EpEۖ}R` 'd _E;z7I˔n}b]+˓yj@аA=8`m dOq?ڑ~J[ 0^` Q@u/Jُv\b#Îob@̫>=Aet fVu7֌vEܩS8&]Ic ^ӨDq#Hۊ{%Tb $h>^E<}9Gƈk9xc]~.χ}!i@ҤF<2aHi cܣKpBۅ  }L^ -bE^ M>u2I҃t1} ^a'Ƃu.d@̟@=?eDo eT q<xFޝW 5!_Ia ZϜBp(Hz)ꀍYc  k:^Dʚ<}:Ek=vfZy1كnDטL ;.`Jf bؗHpD} "R` )dD]G;z5Jo6| a^+x*܈g@ϓ?=:cGm eOq? Ȋ{HY 2 `I_ T?r-Kx-[a$o7ڍ`Aˎ;<ChBsfVt7 ՏsDڌO8)]Kd ]ՋCp!H|&S` %f @_E;{8Hm8|c\,z$ٔj@҇C=5`Hk c߆KpAÖL\ .`F` O=v1Js/]b%r1ڙd@̂>=?dCr fU t9ϛxEU 6"]Ic YCq'Hz'삯Wb # h<`C=~;Fj<yd[{0}۠mAI <0^Jh `JpC "M^ ,dE]H;y5Js4~ `_(w+ۥg@?<;dEn eOq =˧yIX 2`I` W?s*Iy+[a!k8ڪ`C=;Cg?tgYx 4٬pEO8*]Je `FpF|#Ta ' e A]H={7Jn7{c_,y&۱iAB=8`Hk eNpAƳ|J\ 0 `G_ Q?u/Kw.\b%q2۶b@>=BgBr fVs9ҸuET7&_Ib \Do$I}'Ta #!j?^E:|:Hm<xd\{0~!ؽkAF<2`Hj bInBڿ  M] -bE_ L<u3Iнt1 _`&żv.f??>=cDp eQ q ;׺zGW 4^Jb XBr)Iظx+Xc !i:;<Ei?Yy2ѵ} K :-^KFpEܳ} $_ (d C;z6Iͱo7^*ʰx)?=9aHNq>ڮY 1^H>t.J٬v,a"«n5<=BfAUv6թQ8']JEq#Gۧ|%b #g ?<}8Gǥm9^~-Τ{"E=4_HLpAۢ  ^ .`F>v1IԠs0a%şs0@=>dCT s9؝V 6!_JCr'Jڛ|(c !"j;=};Gk=[y2јK ;0`IInFܖ !` + dC=w4JΔr4`(ȓx*A=;cFQp =ב Z 3`J?q+I؏y,a!m8;<ChAWv4Ҍ N9*]KDp Fڊ~%` & g B;y8IȈn9]~-͇y&A=7`ILpBڅ![ / `H=u0KՃt0`&Ăp2==@eBTu8S 7$]IBq%H{&a "i=;~:Fk;]|/|؃H ;1_IKpC !_ ,bE=w4Ir2a'u-܈A?<bER r<͊X 5^IAr)Ix*c !k9ڍ<bDj>vxz4ُo i+_JeniE "}f B]x^ Ip6{~+z'۔hc8bGkqo=ǖ|w^G_ uaJv-뀗"o4ۙ`^BeAruw8ԛtm']IcqfI{% h >^}]Hk9x~~ /}!ڠig4aJipmC "¢}z bG^ u`Kt2} 'u0ܥd`?eFo qs< Χxr!_JcpdHz(낶 i;`}]Ei=xzz1}جo h.^IhpjD~ !}cC_y^ Io4} )w*ڱhd:aFors= ȳ|w^IatdJw-킴"m7ڶ``BfBsvy5 ոsl)]KdpjF~&g @_{_Hm8|~ ,z$۽jf5`HkppAڿĿ{`F` vbJսs/%ļr1b`?dCq tv9׺vo"]IbqeH۸z' h<^~]Fj<w{{0е}k h1^IgpkD۳ !|dD]y_ Kϱs4| )ɰw+e`=dEn qr>ٮyt!`I`qcJڬy+"l8`^Ej=wv9x4ԩq 9!i+_JgoJiEڧ~"'"}f B_y7G^ Iɥp8}~2} +ʤ{'i=0d8bIkoFo?٢ |0 x`H_ u/JcJנv/(#Ÿo4b=;`AeCru=x7՝ u7o&]Kbq$IhIۛ{'#!!h?^}:C_Hƙk<x~4} 0ј|!k<*h2_HjpFoBۖ  - |`E_ w3IaIҔr1 -&Ɠt.f>7`=cDp s Bs;ّz4 s^Ibr)KdIڏx)&!i:`<@_Ei=wy8|2Ҍ}p ;$j-^JgpHkE܊} ")d B]z5F` I͈o5| /*ʇx)g=1b9cFmqCq>څ{2v`H_ r-IbJكx,'"‚o5`<<^BhAst<w7s8k)_J!I}%#!h ?8Do94|"ۃ=+aHE Å.`F1Is0+s1ۈ=8dD=ϊ6_K'J|)!"j=;Bk>8!ڏ;'ہ`JI "+ ƃbE4Hq4/v+ڔ=4ۆbE Cɖ3 ҈^I+Kw+'k8ٙ<?g?<כ9!؍]J H|#( e A8En71y&۠>.ڒ`ID!Ƣ1̔`H0Iw0)q2ۥ=:gB =ҧ6ٙ_I$J}!"i9Bk5|:)ܞ_F +Ƞb2Hr,u>5ڣb A4ե^)Jx%k<?j9:"۪_H ( ìf 6Fp0z=1ۯbD1ϱ`.Kw)p==g>8!ڶ]#J|$"g 9Em4{<+ڻ_E -ɽ`2Is+s=8d?5](Jy #i;Ai7};&^I~ )!c5Ho0w<2a B3^+Iw&m<=f;9] I~&!g 8Eo2{=-aE/a0Iu*r=9f>7_%J|""k:Bl7~;)^H ,d4Hs,w=5bA4^*Kx'j<@h:9#^J}'"d 7Gn2y=0`F0 ^/Jv(o>;e=9]$I{%PjzPjzPjzPjzPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPPjzPP7QhyvfR9 :Qamw xm_H)-K_q~}nY@jzP ,Lbs}o\F$  Tfp{ ~sbH) +BVfq|~p]F!*I_p} |o^H)  jzP5`|\.[htS{RA]tw?]v~^4BbyzY(>ee@A]twFkhB P)X|^!PF BiY L{e:jzP3jg)!{ &SsNpd)Hnߜd)HxyJ &SsYUP>lݜ}DPF)^w5*lޜxNjzPNE,7 J~Jq2fٜ|:WٜJ"iݜk(JqHݜ}EP:o؜JPF%^~1-x؜}M jzPND[_eDvR4c&fԜ.=xԜR%s؜v+4c \לUPӜ5PF Rl pԜzP3~%!p Doݜ?~Мk 9ҜCj՜n Doݜ \ӜTPĜk PF,l.MМzPgUA~Evڜ:̜2 oМzEќJEvܜJϜ0Pœ.PF:~ޜG tΜzP#v [WT4oR;sלvɜLϜN o͜r ;sܜ!}̜ZPGPFAܜM"͜zP<ߜ!zd^ԜƜU͜s  ˜!^ܜNɜqPXPFNڜF/̜zPHޜ9~PҜŜV̜".ɜ.Pܜ tƜuP^PFPڜ/5˜zPMݜ[MA!iRPҜŜDa˜15ǜ1Pܜ"ŜkPWPFPڜ -ʜzPDܜ!xlWPҜĜ"ʜ=-Ŝ'Pܜ0ĜKPzgXH50%")0:I\lFPFP ~ocUH=2+")02-"".2ASbtzP2mZK>0)""%07HUh|t~/~P {k^PD;0'")5BSfxreWJ<00",2@M\p^wj]OA:0/""'08ATfv6sbQ?3."",070)""03 5`,PFP{bI)-X2nB/WwzP~W+NvQ[D*bRPxbH)vZ9-YNnK, Ai*l@*S~PxbH)#eA6`kPsG .o PFPlKbjD6fzP~^ S#!p\HPlS4 vO`W"FYDePN[^Pi~PFPvf4SzPvcR hJ8PAx.M&#f~<`P&jPx%uXPFPs_& bzP6_E^rbf{PKxfPL ~{"n%Pow <Pv PFPtjzP~rM X-/pTNPp P >rhnP*uWYP\PFP(,jzP4j[`5E%~P.P"u.!PvjP!PFPQPoPjzPv7cm Y{PkBP<Ak MkP$qPNMPFPpxjzPv<DX AzMNP]Pr )xPkHxxP~PFPPYjzP\+~{WQxP{nP*P]WP |qzP."PFP`jzPz9RxL {PM~PbjdPM#jzPr^PFPv*) jzP)DRXyPuDNPPL)P|qjzP,PFPPA__jzP\\{N]qPPl>Sf`P &jzPPFP)QjzPr" x>)<{PP'kx\"PJxjzPINPFP`tjzP G>X j]j<NPsP|PpAjzP pPFPjDDjzP;ndD+gkPjPW&0~PMPjzPvPFP}pfjzP_Sq(8+{PePSWRnPhjzPNPFPurxjzPxq3XYj]3NPbPqqPG<jzP!:PFPrxXjzPY %:<qePbPhc PfjzPUPFPjz0jzP @BiFxPbP@ oA5P|yjzPwjPFPjz<jzP/9""YXHu P)KP?Ynz xm\H1 bP2%OPP]jzPayPFPjzOjzPGќm.qM{x^ P"Suu\5bPMB1gP <jzPJPFPjz^ujzPVќ`SxPXyTbPxdS ~wP- jzP.PFPjzl`jzPdќX2@KP)pܜ~ObPds_kPDjzPPFPjzxSjzPoќ!bZlW Pt֜dbPQmXPWjzPPFPjzCjzP}ќX&]xP]ӜibP<xvKPemjzP"PFPjz5jzPќU*/KP!ϜP"k=Po\jzP,PFPjz#jzPќ(Pg`O P:P _+"PzPjzP0PFPjz"jzPќ0Ox8hzxPEPX'PBjzP9PFPjzjzPќ0Uy: KP?P%P0P5jzP<PFPjzjzPќ0 <sSH P)P0P4P%jzP<PFPjzjzPќ3EiIrztxPP:H<P"jzP;PFPjzjzPќ<UqG KPe{jXJ<0'".0PFPjzejjzPp, - Pk>bPqzQPBjzPSPFPjzX{jzPe"0pV`vPfbP"f`PxSjzPhpPFPjzEjzPU"UY`Fi}HPbP:Xs|Pl`jzP{`PFPjz0jzPBC ; " P=abPWJnP^ujzPMPFPjz.jzP%]{GYvP\bPou2<[POjzP0F DPjzQjzPvUPNTsrHPmbPbZ%BP<jzPHC%/<PjzmjzPyvH P{lbPHxPP0jzPn<7A6PjzhjzPq-~  7RvPWbPG'|oPXjzPs4OW.PjzK#jzPV\RF:b|f|HPFbPmd*{PxjzP*WfmPjz$TjzP/rg0T P=bP}B^.\PfjzPb.{PjzjzP)~$KvP<bPF[d3PDDjzPPjztjzPxeR<"mYvHPFbPy/'PtjzP<v-:|PjzP\jzPP jVA_ PRbP+dj?sPjzPzKh\edPjzjzP]~|-BvPibPuj6|DP__jzP-JDPjzKjzP~R0 yKpHPbP.5[,P) jzPwx +:PjzUjzPPgbDPju P bPj{mP_jzP9DpzPjzOjzP 2~n<:tPLbPTX.kC0PYjzPn!1jPjzt jzPj R"<iDPrbP(9PxjzP_\>w9Pjz5jjzP"wZ.^siP_EfPfLxDPpPjzP;EWPjzBjzPrg~^,I1tPDnP~bP),jzP.^d"2_Pjz<.mzP#cHWR )cDP~P{_SuUAPtjzPR3 &Pjz},zPim9jQj}\PwP~ Px\~P&pzHMJo"2jPjz'>NwPDj~zL>U&tP_!hsPQ-APC q'Pf4<z>~ v{FWPjz`e[kPHMWjRyfDP"UA\PU ]qPzM4\P [Nz'5bcu"1^PjzEHTPs; HzjvPzKN7Pf(F P2!qP@:2gz}G2k;m.:wPjz"P 8i$P I'X}j~M`tP5_"@PV S.PJ}C5r!PjD;fzx`:DjT3`3:e.Pjz<vN:^~P)tH@`{jR.DPwa7.SrlPgD$P{HPmoB :f?PnB@`|zDm_SC80.""%05O`qUPraO>2-"",0?M\lz\oΜkPjzJ˜~PEݜj|[jtPIsPɜ^PŜ`P-ÜzAœNyϜuPjz>̜PJޜjN'<DPePʜ]PǜcP5ĜzpĜ4wМuPjz'͜.P>ߜjPnEP̜PPɜ[P0ŜzƜzkќgPjz rΜ)P+j{hutPfœWP͜8P z̜FP#Ɯz |ɜPQӜNPjzJМuPvjN:JDPKƜTPΜ|P^Ϝ)Ptǜz`̜u,Ԝ)Pjzsќ>PMWP!}Ȝ9PМRP6ҜhPMɜz5Ϝ:\֜XPjz9ӜNPme{s~rPA̜e P՜mP^՜2PpԜzKӜBjٜiPjzF֜t:P)v}M NJVAPNМcPVٜq&PjٜJP.xל[jzP>qb%P M{lEjz'OknJ6a`5PjzQu|cDP Dhv`ANXaAPOoiQ.PDh{]2P5^}xW'P ChfDjz 4M]hu~teS6"F^myxl]D"  B[k{ zm^N32Nct{rdSA)7Pfv vl`TA)P.MdxxjXA!DZlz{mZB-K`q~~o[B#jzPjzPjzPkzPrzPvzPsPpPiP4\PYHP2P P[PkPTDPPruPP>P7P=SP0c2^P0m@GFP0{X,XP0sQ, ?b!P0vdTI:0*"-06DQ]l~MP0ǜqP0ȜP0ɜP0ʜP0˜}P0͜^P0Μ,P&ќEP'Sr؜x>PKnߜ~UP=\w}b> P6Qes xnbP5bundlewrap-4.13.6/bundlewrap/000077500000000000000000000000001417502274300161205ustar00rootroot00000000000000bundlewrap-4.13.6/bundlewrap/__init__.py000066400000000000000000000001121417502274300202230ustar00rootroot00000000000000VERSION = (4, 13, 6) VERSION_STRING = ".".join([str(v) for v in VERSION]) bundlewrap-4.13.6/bundlewrap/bundle.py000066400000000000000000000146251417502274300177530ustar00rootroot00000000000000from os.path import exists, join from .exceptions import BundleError, NoSuchBundle, RepositoryError from .metadata import DoNotRunAgain from .utils import cached_property from .utils.text import bold, mark_for_translation as _ from .utils.text import validate_name from .utils.ui import io FILENAME_BUNDLE = "bundle.py" FILENAME_ITEMS = "items.py" FILENAME_METADATA = "metadata.py" def metadata_reactor_for_bundle(bundle_name): reactor_names = set() def metadata_reactor(func): """ Decorator that tags metadata reactors. """ if func.__name__ == "defaults": raise ValueError(_( "metadata reactor in bundle '{}' cannot be named 'defaults'" ).format(bundle_name)) if func.__name__ in reactor_names: raise ValueError(_( "duplicate metadata reactor '{reactor}' in bundle '{bundle}'" ).format(bundle=bundle_name, reactor=func.__name__)) reactor_names.add(func.__name__) func._is_metadata_reactor = True return func def metadata_reactor_provides(*args): def provides_inner(func): func._provides = set() for arg in args: if isinstance(arg, str): arg = arg.split("/") func._provides.add(tuple(arg)) return metadata_reactor(func) return provides_inner metadata_reactor.provides = metadata_reactor_provides return metadata_reactor class Bundle: """ A collection of config items, bound to a node. """ def __init__(self, node, name): self.name = name self.node = node self.repo = node.repo if not validate_name(name): raise RepositoryError(_("invalid bundle name: {}").format(name)) if name not in self.repo.bundle_names: raise NoSuchBundle(_("bundle not found: {}").format(name)) self.bundle_dir = join(self.repo.bundles_dir, self.name) self.bundle_data_dir = join(self.repo.data_dir, self.name) self.bundle_file = join(self.bundle_dir, FILENAME_BUNDLE) self.items_file = join(self.bundle_dir, FILENAME_ITEMS) self.metadata_file = join(self.bundle_dir, FILENAME_METADATA) def __lt__(self, other): return self.name < other.name @cached_property @io.job_wrapper(_("{} {} parsing bundle attributes").format(bold("{0.node.name}"), bold("{0.name}"))) def bundle_attrs(self): if not exists(self.bundle_file): return {} else: base_env = { 'node': self.node, 'repo': self.repo, } # TODO prevent access to node.metadata return self.repo.get_all_attrs_from_file( self.bundle_file, base_env=base_env, ) @cached_property @io.job_wrapper(_("{} {} parsing bundle items").format(bold("{0.node.name}"), bold("{0.name}"))) def bundle_item_attrs(self): if not exists(self.items_file): return {} else: base_env = { 'node': self.node, 'repo': self.repo, } for item_class in self.repo.item_classes: base_env[item_class.BUNDLE_ATTRIBUTE_NAME] = {} return self.repo.get_all_attrs_from_file( self.items_file, base_env=base_env, ) @cached_property @io.job_wrapper(_("{} {} creating items").format(bold("{0.node.name}"), bold("{0.name}"))) def items(self): for item_class in self.repo.item_classes: for item_name, item_attrs in self.bundle_item_attrs.get( item_class.BUNDLE_ATTRIBUTE_NAME, {}, ).items(): yield self.make_item( item_class.BUNDLE_ATTRIBUTE_NAME, item_name, item_attrs, ) def make_item(self, attribute_name, item_name, item_attrs): for item_class in self.repo.item_classes: if item_class.BUNDLE_ATTRIBUTE_NAME == attribute_name: return item_class(self, item_name, item_attrs) raise RuntimeError( _("bundle '{bundle}' tried to generate item '{item}' from " "unknown attribute '{attr}'").format( attr=attribute_name, bundle=self.name, item=item_name, ) ) @cached_property def _metadata_defaults_and_reactors(self): with io.job(_("{node} {bundle} collecting metadata reactors").format( node=bold(self.node.name), bundle=bold(self.name), )): if not exists(self.metadata_file): return {}, set() defaults = {} reactors = set() internal_names = set() for name, attr in self.repo.get_all_attrs_from_file( self.metadata_file, base_env={ 'DoNotRunAgain': DoNotRunAgain, 'metadata_reactor': metadata_reactor_for_bundle(self.name), 'node': self.node, 'repo': self.repo, }, ).items(): if name == "defaults": defaults = attr elif getattr(attr, '_is_metadata_reactor', False): internal_name = getattr(attr, '__name__', name) if internal_name in internal_names: raise BundleError(_( "Metadata reactor '{name}' in bundle {bundle} for node {node} has " "__name__ '{internal_name}', which was previously used by another " "metadata reactor in the same metadata.py. BundleWrap uses __name__ " "internally to tell metadata reactors apart, so this is a problem. " "Perhaps you used a decorator on your metadata reactors that " "doesn't use functools.wraps? You should use that." ).format( bundle=self.name, node=self.node.name, internal_name=internal_name, name=name, )) internal_names.add(internal_name) reactors.add(attr) return defaults, reactors bundlewrap-4.13.6/bundlewrap/cmdline/000077500000000000000000000000001417502274300175335ustar00rootroot00000000000000bundlewrap-4.13.6/bundlewrap/cmdline/__init__.py000066400000000000000000000045671417502274300216600ustar00rootroot00000000000000from cProfile import Profile from os import environ from os.path import abspath from shlex import quote from sys import argv, exit from traceback import format_exc from ..exceptions import NoSuchRepository, MissingRepoDependency from ..repo import Repository from ..utils.cmdline import suppress_broken_pipe_msg from ..utils.text import force_text, mark_for_translation as _, red from ..utils.ui import io from .parser import build_parser_bw @suppress_broken_pipe_msg def main(*args, **kwargs): """ Entry point for the 'bw' command line utility. The args and path parameters are used for integration tests. """ if not args: args = argv[1:] text_args = [force_text(arg) for arg in args] parser_bw = build_parser_bw() pargs = parser_bw.parse_args(args) if not hasattr(pargs, 'func'): parser_bw.print_help() exit(2) if pargs.profile: profile = Profile() profile.enable() path = abspath(pargs.repo_path) io.debug_mode = pargs.debug io.activate() io.debug(_("invocation: {}").format(" ".join([force_text(arg) for arg in argv]))) environ.setdefault('BW_ADD_HOST_KEYS', "1" if pargs.add_ssh_host_keys else "0") if len(text_args) >= 1 and ( text_args[0] == "--version" or (len(text_args) >= 2 and text_args[0] == "repo" and text_args[1] == "create") or text_args[0] == "zen" or "-h" in text_args or "--help" in text_args ): # 'bw repo create' is a special case that only takes a path repo = path else: try: repo = Repository(path) except NoSuchRepository: io.stderr(_( "{x} {path} " "is not a BundleWrap repository." ).format(path=quote(abspath(pargs.repo_path)), x=red("!!!"))) io.deactivate() exit(1) except MissingRepoDependency as exc: io.stderr(str(exc)) io.deactivate() exit(1) except Exception: io.stderr(format_exc()) io.deactivate() exit(1) # convert all string args into text text_pargs = {key: force_text(value) for key, value in vars(pargs).items()} try: pargs.func(repo, text_pargs) finally: io.deactivate() if pargs.profile: profile.disable() profile.dump_stats(pargs.profile) bundlewrap-4.13.6/bundlewrap/cmdline/apply.py000066400000000000000000000111441417502274300212330ustar00rootroot00000000000000from datetime import datetime from sys import exit from ..concurrency import WorkerPool from ..exceptions import GracefulApplyException from ..utils import SkipList from ..utils.cmdline import count_items, get_target_nodes from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import ( blue, bold, error_summary, format_duration, green, green_unless_zero, mark_for_translation as _, red, red_unless_zero, yellow, yellow_unless_zero, ) from ..utils.ui import io def bw_apply(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['targets']) pending_nodes = target_nodes.copy() try: repo.hooks.apply_start( repo, args['targets'], target_nodes, interactive=args['interactive'], ) except GracefulApplyException as exc: io.stderr(_("{x} apply aborted by hook ({reason})").format( reason=str(exc) or _("no reason given"), x=red("!!!"), )) exit(1) io.progress_set_total(count_items(pending_nodes)) start_time = datetime.now() results = [] skip_list = SkipList(args['resume_file']) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': node.apply, 'task_id': node.name, 'kwargs': { 'autoskip_selector': args['autoskip'], 'autoonly_selector': args['autoonly'], 'force': args['force'], 'interactive': args['interactive'], 'show_diff': args['show_diff'], 'skip_list': skip_list, 'workers': args['item_workers'], }, } def handle_result(task_id, return_value, duration): if return_value is None: # node skipped return skip_list.add(task_id) results.append(return_value) def handle_exception(task_id, exception, traceback): msg = _("{x} {node} {msg}").format( node=bold(task_id), msg=exception, x=red("!"), ) if isinstance(exception, GracefulApplyException): errors.append(msg) else: io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_result=handle_result, handle_exception=handle_exception, cleanup=skip_list.dump, pool_id="apply", workers=args['node_workers'], ) worker_pool.run() total_duration = datetime.now() - start_time totals = stats(results) if args['summary'] and results: stats_summary(results, totals, total_duration) error_summary(errors) repo.hooks.apply_end( repo, args['targets'], target_nodes, duration=total_duration, ) exit(1 if errors or totals['failed'] else 0) def stats(results): totals = { 'items': 0, 'correct': 0, 'fixed': 0, 'skipped': 0, 'failed': 0, } for result in results: totals['items'] += result.total for metric in ('correct', 'fixed', 'skipped', 'failed'): totals[metric] += getattr(result, metric) return totals def stats_summary(results, totals, total_duration): rows = [[ bold(_("node")), _("items"), _("OK"), green(_("fixed")), yellow(_("skipped")), red(_("failed")), _("time"), ], ROW_SEPARATOR] for result in sorted(results): rows.append([ result.node_name, str(result.total), str(result.correct), green_unless_zero(result.fixed), yellow_unless_zero(result.skipped), red_unless_zero(result.failed), format_duration(result.duration), ]) if len(results) > 1: rows.append(ROW_SEPARATOR) rows.append([ bold(_("total ({} nodes)").format(len(results))), str(totals['items']), str(totals['correct']), green_unless_zero(totals['fixed']), yellow_unless_zero(totals['skipped']), red_unless_zero(totals['failed']), format_duration(total_duration), ]) alignments = { 1: 'right', 2: 'right', 3: 'right', 4: 'right', 5: 'right', 6: 'right', } for line in render_table(rows, alignments=alignments): io.stdout("{x} {line}".format(x=blue("i"), line=line)) bundlewrap-4.13.6/bundlewrap/cmdline/debug.py000066400000000000000000000014701417502274300211750ustar00rootroot00000000000000from code import interact from .. import VERSION_STRING from ..utils.cmdline import get_node from ..utils.text import mark_for_translation as _ from ..utils.ui import io DEBUG_BANNER = _("BundleWrap {version} interactive repository inspector\n" "> You can access the current repository as 'repo'." "").format(version=VERSION_STRING) DEBUG_BANNER_NODE = DEBUG_BANNER + "\n" + \ _("> You can access the selected node as 'node'.") def bw_debug(repo, args): if args['node'] is None: env = {'repo': repo} banner = DEBUG_BANNER else: env = {'node': get_node(repo, args['node']), 'repo': repo} banner = DEBUG_BANNER_NODE io.deactivate() if args['command']: exec(args['command'], env) else: interact(banner, local=env) bundlewrap-4.13.6/bundlewrap/cmdline/diff.py000066400000000000000000000321431417502274300210200ustar00rootroot00000000000000from copy import copy from difflib import unified_diff from sys import exit from ..exceptions import NoSuchItem from ..metadata import metadata_to_json from ..repo import Repository from ..utils.cmdline import get_target_nodes from ..utils.dicts import diff_dict, dict_to_text from ..utils.scm import get_git_branch, get_git_rev, set_git_rev from ..utils.text import ( bold, force_text, green, mark_for_translation as _, prefix_lines, red, blue, yellow, ) from ..utils.ui import io, QUIT_EVENT from subprocess import check_call def diff_metadata(node_a, node_b): node_a_metadata = metadata_to_json(node_a.metadata).splitlines() node_b_metadata = metadata_to_json(node_b.metadata).splitlines() io.stdout("\n".join(unified_diff( node_a_metadata, node_b_metadata, fromfile=node_a.name, tofile=node_b.name, lineterm='', ))) def diff_item(node_a, node_b, item): item_a = node_a.get_item(item) item_a_dict = item_a.display_on_create(item_a.cdict().copy()) item_b = node_b.get_item(item) item_b_dict = item_b.display_on_create(item_b.cdict().copy()) io.stdout(diff_dict(item_a_dict, item_b_dict)) def diff_node(node_a, node_b): node_a_hashes = sorted( ["{}\t{}".format(i, h) for i, h in node_a.cdict.items()] ) node_b_hashes = sorted( ["{}\t{}".format(i, h) for i, h in node_b.cdict.items()] ) io.stdout("\n".join( filter( lambda line: line.startswith("+") or line.startswith("-"), unified_diff( node_a_hashes, node_b_hashes, fromfile=node_a.name, tofile=node_b.name, lineterm='', n=0, ), ), )) def command_closure(command): def run_it(): io.stderr(_( "{x} Running: {command}" ).format( command=command, x=yellow("i"), )) check_call(command, shell=True) return run_it def git_checkout_closure(rev, detach=False): def run_it(): io.stderr(_( "{x} {git} switching to rev: {rev}" ).format( x=blue("i"), git=bold("git"), rev=rev, )) set_git_rev(rev, detach=detach) return run_it def hooked_diff_metadata_single_node(repo, node, intermissions, epilogues): node_before_metadata = metadata_to_json(node.metadata).splitlines() for intermission in intermissions: intermission() after_repo = Repository(repo.path) node_after = after_repo.get_node(node.name) node_after_metadata = metadata_to_json(node_after.metadata).splitlines() io.stdout("\n".join(unified_diff( node_before_metadata, node_after_metadata, fromfile=_("before"), tofile=_("after"), lineterm='', ))) for epilogue in epilogues: epilogue() def hooked_diff_metadata_multiple_nodes(repo, nodes, intermissions, epilogues): nodes_metadata_before = {} for node in nodes: if QUIT_EVENT.is_set(): exit(1) nodes_metadata_before[node.name] = node.metadata_hash() for intermission in intermissions: intermission() after_repo = Repository(repo.path) nodes_metadata_after = {} for node_name in nodes_metadata_before: if QUIT_EVENT.is_set(): exit(1) nodes_metadata_after[node_name] = \ after_repo.get_node(node_name).metadata_hash() node_hashes_before = sorted( ["{}\t{}".format(i, h) for i, h in nodes_metadata_before.items()] ) node_hashes_after = sorted( ["{}\t{}".format(i, h) for i, h in nodes_metadata_after.items()] ) io.stdout("\n".join( filter( lambda line: line.startswith("+") or line.startswith("-"), unified_diff( node_hashes_before, node_hashes_after, fromfile=_("before"), tofile=_("after"), lineterm='', n=0, ), ), )) for epilogue in epilogues: epilogue() def hooked_diff_single_item(repo, node, item, intermissions, epilogues): try: item_before = node.get_item(item) except NoSuchItem: item_before = None item_before_dict = None else: item_before_dict = item_before.cdict() if item_before_dict: item_before_dict = item_before.display_on_create(copy(item_before_dict)) for intermission in intermissions: intermission() repo_after = Repository(repo.path) node_after = repo_after.get_node(node.name) try: item_after = node_after.get_item(item) except NoSuchItem: item_after = None item_after_dict = None else: item_after_dict = item_after.cdict() if item_after_dict: item_after_dict = item_after.display_on_create(copy(item_after_dict)) for epilogue in epilogues: epilogue() if item_before is None and item_after is None: io.stderr(_("{x} {node} {item} not found anywhere").format( x=bold(red("!")), node=bold(node.name), item=bold(item), )) exit(1) if item_before is None: io.stdout(_("{x} {node} {item} not found previously").format( x=bold(yellow("!")), node=bold(node.name), item=bold(item), )) if item_before_dict and item_after_dict: io.stdout( f"{bold(blue('i'))} {bold(node.name)} {bold(item_before.bundle.name)} {item}\n" + prefix_lines( "\n" + diff_dict(item_before_dict, item_after_dict), yellow("│ "), ).rstrip("\n") + "\n" + yellow("╵") ) elif item_before_dict: io.stdout( f"{bold(red('-'))} {bold(node.name)} {bold(item_before.bundle.name)} {item}\n" + prefix_lines( "\n" + dict_to_text(item_before_dict, value_color=red), red("│ "), ).rstrip("\n") + "\n" + red("╵") ) elif item_after_dict: io.stdout( f"{bold(green('+'))} {bold(node.name)} {bold(item_after.bundle.name)} {item}\n" + prefix_lines( "\n" + dict_to_text(item_after_dict), green("│ "), ).rstrip("\n") + "\n" + green("╵") ) if item_after is None: io.stdout(_("{x} {node} {item} not found after").format( x=bold(yellow("!")), node=bold(node.name), item=bold(item), )) def hooked_diff_config_single_node(repo, node, intermissions, epilogues): item_hashes_before = { item.id: item.hash() for item in node.items if item.ITEM_TYPE_NAME != 'action' } for intermission in intermissions: intermission() after_repo = Repository(repo.path) after_node = after_repo.get_node(node.name) item_hashes_after = { item.id: item.hash() for item in after_node.items if item.ITEM_TYPE_NAME != 'action' } for epilogue in epilogues: epilogue() item_hashes_before = sorted( ["{}\t{}".format(i, h) for i, h in item_hashes_before.items()] ) item_hashes_after = sorted( ["{}\t{}".format(i, h) for i, h in item_hashes_after.items()] ) io.stdout("\n".join( filter( lambda line: line.startswith("+") or line.startswith("-"), unified_diff( item_hashes_before, item_hashes_after, fromfile=_("before"), tofile=_("after"), lineterm='', n=0, ), ), )) def hooked_diff_config_multiple_nodes(repo, nodes, intermissions, epilogues): nodes_config_before = {} for node in nodes: if QUIT_EVENT.is_set(): exit(1) nodes_config_before[node.name] = node.hash() for intermission in intermissions: intermission() after_repo = Repository(repo.path) nodes_config_after = {} for node_name in nodes_config_before: if QUIT_EVENT.is_set(): exit(1) nodes_config_after[node_name] = \ after_repo.get_node(node_name).hash() node_hashes_before = sorted( ["{}\t{}".format(i, h) for i, h in nodes_config_before.items()] ) node_hashes_after = sorted( ["{}\t{}".format(i, h) for i, h in nodes_config_after.items()] ) io.stdout("\n".join( filter( lambda line: line.startswith("+") or line.startswith("-"), unified_diff( node_hashes_before, node_hashes_after, fromfile=_("before"), tofile=_("after"), lineterm='', n=0, ), ), )) for epilogue in epilogues: epilogue() def bw_diff(repo, args): if args['metadata'] and args['item']: io.stderr(_( "{x} Cannot compare metadata and items at the same time" ).format(x=red("!!!"))) exit(1) target_nodes = sorted(get_target_nodes(repo, args['targets'])) if args['branch'] or args['cmd_change'] or args['cmd_reset'] or args['prompt']: intermissions = [] epilogues = [] if args['branch']: original_rev = force_text(get_git_branch() or get_git_rev()) intermissions.append(git_checkout_closure(force_text(args['branch']), detach=True)) if args['cmd_change']: intermissions.append(command_closure(args['cmd_change'])) if args['cmd_reset']: epilogues.append(command_closure(args['cmd_reset'])) if args['branch']: epilogues.append(git_checkout_closure(original_rev, detach=False)) if args['metadata']: if len(target_nodes) == 1: def intermission(): io.stdout(_("{x} Took a snapshot of that node's metadata.").format(x=blue("i"))) io.stdout(_("{x} You may now make changes to your repo.").format(x=blue("i"))) if not io.ask(_("{x} Ready to proceed? (n to cancel)").format(x=blue("?")), True): exit(1) if args['prompt']: intermissions.append(intermission) hooked_diff_metadata_single_node(repo, target_nodes[0], intermissions, epilogues) else: def intermission(): io.stdout(_("{x} Took a snapshot of those nodes' metadata.").format(x=blue("i"))) io.stdout(_("{x} You may now make changes to your repo.").format(x=blue("i"))) if not io.ask(_("{x} Ready to proceed? (n to cancel)").format(x=blue("?")), True): exit(1) if args['prompt']: intermissions.append(intermission) hooked_diff_metadata_multiple_nodes(repo, target_nodes, intermissions, epilogues) elif args['item']: if len(target_nodes) != 1: io.stderr(_( "{x} Select exactly one node to compare item" ).format(x=red("!!!"))) exit(1) def intermission(): io.stdout(_("{x} Took a snapshot of that item.").format(x=blue("i"))) io.stdout(_("{x} You may now make changes to your repo.").format(x=blue("i"))) if not io.ask(_("{x} Ready to proceed? (n to cancel)").format(x=blue("?")), True): exit(1) if args['prompt']: intermissions.append(intermission) hooked_diff_single_item(repo, target_nodes[0], args['item'], intermissions, epilogues) elif len(target_nodes) == 1: def intermission(): io.stdout(_("{x} Took a snapshot of that node.").format(x=blue("i"))) io.stdout(_("{x} You may now make changes to your repo.").format(x=blue("i"))) if not io.ask(_("{x} Ready to proceed? (n to cancel)").format(x=blue("?")), True): exit(1) if args['prompt']: intermissions.append(intermission) hooked_diff_config_single_node(repo, target_nodes[0], intermissions, epilogues) else: def intermission(): io.stdout(_("{x} Took a snapshot of those nodes.").format(x=blue("i"))) io.stdout(_("{x} You may now make changes to your repo.").format(x=blue("i"))) if not io.ask(_("{x} Ready to proceed? (n to cancel)").format(x=blue("?")), True): exit(1) if args['prompt']: intermissions.append(intermission) hooked_diff_config_multiple_nodes(repo, target_nodes, intermissions, epilogues) else: if len(target_nodes) != 2: io.stderr(_( "{x} Exactly two nodes must be selected" ).format(x=red("!!!"))) exit(1) node_a, node_b = target_nodes if args['metadata']: diff_metadata(node_a, node_b) elif args['item']: diff_item(node_a, node_b, args['item']) else: diff_node(node_a, node_b) bundlewrap-4.13.6/bundlewrap/cmdline/groups.py000066400000000000000000000016711417502274300214310ustar00rootroot00000000000000from ..group import GROUP_ATTR_DEFAULTS from ..utils.text import bold, mark_for_translation as _ from ..utils.ui import io from .nodes import _attribute_table GROUP_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['nodes']) GROUP_ATTRS_LISTS = ('nodes',) def bw_groups(repo, args): if not args['groups']: for group in sorted(repo.groups): io.stdout(group.name) else: groups = {repo.get_group(group.strip()) for group in args['groups']} if not args['attrs']: subgroups = groups.copy() for group in groups: subgroups.update(group.subgroups) for subgroup in sorted(subgroups): io.stdout(subgroup.name) else: _attribute_table( groups, bold(_("group")), args['attrs'], GROUP_ATTRS, GROUP_ATTRS_LISTS, args['inline'], ) bundlewrap-4.13.6/bundlewrap/cmdline/hash.py000066400000000000000000000054441417502274300210370ustar00rootroot00000000000000from sys import exit from ..exceptions import NoSuchGroup, NoSuchNode from ..utils.cmdline import get_item from ..utils.text import mark_for_translation as _, red from ..utils.ui import io def bw_hash(repo, args): if args['group_membership'] and args['metadata']: io.stdout(_( "{x} Cannot hash group membership and metadata at the same time").format(x=red("!!!") )) exit(1) if args['group_membership'] and args['item']: io.stdout(_("{x} Cannot hash group membership for an item").format(x=red("!!!"))) exit(1) if args['item'] and args['metadata']: io.stdout(_("{x} Items don't have metadata").format(x=red("!!!"))) exit(1) if args['node_or_group']: try: target = repo.get_node(args['node_or_group']) target_type = 'node' except NoSuchNode: try: target = repo.get_group(args['node_or_group']) target_type = 'group' except NoSuchGroup: io.stderr(_("{x} No such node or group: {node_or_group}").format( node_or_group=args['node_or_group'], x=red("!!!"), )) exit(1) else: if args['item']: target = get_item(target, args['item']) target_type = 'item' else: target = repo target_type = 'repo' if target_type == 'node' and args['dict'] and args['metadata']: io.stdout(_("{x} Cannot show a metadata dict for a single node").format(x=red("!!!"))) exit(1) if target_type == 'group' and args['item']: io.stdout(_("{x} Cannot select item for group").format(x=red("!!!"))) exit(1) if args['dict']: if args['group_membership']: if target_type in ('node', 'repo'): for group in sorted(target.groups): io.stdout(group.name) else: for node in sorted(target.nodes): io.stdout(node.name) elif args['metadata']: for node in sorted(target.nodes): io.stdout("{}\t{}".format(node.name, node.metadata_hash())) else: cdict = target.cached_cdict if args['item'] else target.cdict if cdict is None: io.stdout("REMOVE") else: for key, value in sorted(cdict.items()): io.stdout( "{}\t{}".format(key, value) if args['item'] else "{} {}".format(value, key) ) else: if args['group_membership']: io.stdout(target.group_membership_hash()) elif args['metadata']: io.stdout(target.metadata_hash()) else: io.stdout(target.hash()) bundlewrap-4.13.6/bundlewrap/cmdline/items.py000066400000000000000000000132341417502274300212310ustar00rootroot00000000000000from collections.abc import Collection from os import environ, makedirs from os.path import dirname, exists, join from sys import exit from ..deps import prepare_dependencies from ..exceptions import FaultUnavailable from ..items import BUILTIN_ITEM_ATTRIBUTES from ..utils.cmdline import get_item, get_node from ..utils.dicts import statedict_to_json from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import bold, green, mark_for_translation as _, red, yellow from ..utils.ui import io, page_lines def write_preview(file_item, base_path): """ Writes the content of the given file item to the given path. """ # this might raise an exception, try it before creating anything content = file_item.content file_path = join(base_path, file_item.name.lstrip("/")) dir_path = dirname(file_path) if not exists(dir_path): makedirs(dir_path) with open(file_path, 'wb') as f: f.write(content) def bw_items(repo, args): node = get_node(repo, args['node']) if args['preview'] and not args['item']: io.stderr(_("{x} no ITEM given for preview").format(x=red("!!!"))) exit(1) elif args['file_preview_path']: if args['item']: io.stderr(_("{x} use --file-preview to preview single files").format(x=red("!!!"))) exit(1) if exists(args['file_preview_path']): io.stderr(_( "not writing to existing path: {path}" ).format(path=args['file_preview_path'])) exit(1) for item in sorted(node.items): if not item.id.startswith("file:"): continue if item.attributes['content_type'] == 'any': io.stderr(_( "{x} skipped {filename} (content_type 'any')" ).format(x=yellow("»"), filename=bold(item.name))) continue if item.attributes['content_type'] == 'binary': io.stderr(_( "{x} skipped {filename} (content_type 'binary')" ).format(x=yellow("»"), filename=bold(item.name))) continue if item.attributes['delete']: io.stderr(_( "{x} skipped {filename} ('delete' attribute set)" ).format(x=yellow("»"), filename=bold(item.name))) continue try: write_preview(item, args['file_preview_path']) except FaultUnavailable: io.stderr(_( "{x} skipped {path} (Fault unavailable)" ).format(x=yellow("»"), path=bold(item.name))) else: io.stdout(_( "{x} wrote {path}" ).format( x=green("✓"), path=bold(join( args['file_preview_path'], item.name.lstrip("/"), )), )) elif args['item']: item = get_item(node, args['item']) if args['preview']: try: io.stdout( item.preview(), append_newline=False, ) except NotImplementedError: io.stderr(_( "{x} cannot preview {item} on {node} (doesn't support previews)" ).format(x=red("!!!"), item=item.id, node=node.name)) exit(1) except ValueError: io.stderr(_( "{x} cannot preview {item} on {node} (not available for this item config)" ).format(x=red("!!!"), item=item.id, node=node.name)) exit(1) except FaultUnavailable: io.stderr(_( "{x} cannot preview {item} on {node} (Fault unavailable)" ).format(x=red("!!!"), item=item.id, node=node.name)) exit(1) else: if args['show_attrs']: prepare_dependencies(node) table = [[bold(_("attribute")), bold(_("value"))]] for attribute in BUILTIN_ITEM_ATTRIBUTES: if args['attr'] and attribute != args['attr']: continue table.append(ROW_SEPARATOR) value = getattr(item, attribute) if isinstance(value, str): table.append([attribute, value]) elif isinstance(value, Collection): value = sorted(value) or [""] table.append([attribute, value.pop(0)]) for element in value: table.append([ attribute if environ.get('BW_TABLE_STYLE') == "grep" else "", element, ]) else: table.append([attribute, repr(value)]) page_lines(render_table(table)) else: if item.ITEM_TYPE_NAME == "action": statedict = item.attributes elif args['show_sdict']: statedict = item.sdict() else: statedict = item.cdict() if statedict is None: io.stdout("REMOVE") else: if args['attr']: io.stdout(repr(statedict[args['attr']])) else: io.stdout(statedict_to_json(statedict, pretty=True)) else: for item in sorted(node.items): if args['show_repr']: io.stdout(repr(item)) else: io.stdout(item.id) bundlewrap-4.13.6/bundlewrap/cmdline/lock.py000066400000000000000000000200041417502274300210310ustar00rootroot00000000000000from os import environ from sys import exit from ..concurrency import WorkerPool from ..lock import softlock_add, softlock_list, softlock_remove from ..utils.cmdline import get_target_nodes from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import ( bold, error_summary, format_timestamp, green, mark_for_translation as _, randstr, red, yellow, ) from ..utils.ui import io, page_lines def remove_dummy_nodes(targets): _targets = [] for node in targets: if node.dummy: io.stdout(_("{x} {node} is a dummy node").format(node=bold(node.name), x=yellow("»"))) else: _targets.append(node) return _targets def remove_lock_if_present(node, lock_id): for lock in softlock_list(node): if lock['id'] == lock_id: softlock_remove(node, lock_id) return True return False def bw_lock_add(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['targets']) target_nodes = remove_dummy_nodes(target_nodes) pending_nodes = target_nodes[:] max_node_name_length = max([len(node.name) for node in target_nodes]) lock_id = randstr(length=4).upper() io.progress_set_total(len(pending_nodes)) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': softlock_add, 'task_id': node.name, 'args': (node, lock_id), 'kwargs': { 'comment': args['comment'], 'expiry': args['expiry'], 'item_selectors': args['items'], }, } def handle_result(task_id, return_value, duration): io.progress_advance() io.stdout(_("{x} {node} locked with ID {id} (expires in {exp})").format( x=green("✓"), node=bold(task_id.ljust(max_node_name_length)), id=return_value, exp=args['expiry'], )) def handle_exception(task_id, exception, traceback): msg = "{}: {}".format(task_id, exception) io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_exception=handle_exception, handle_result=handle_result, pool_id="lock", workers=args['node_workers'], ) worker_pool.run() error_summary(errors) def bw_lock_remove(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['targets']) target_nodes = remove_dummy_nodes(target_nodes) pending_nodes = target_nodes[:] max_node_name_length = max([len(node.name) for node in target_nodes]) io.progress_set_total(len(pending_nodes)) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': remove_lock_if_present, 'task_id': node.name, 'args': (node, args['lock_id'].upper()), } def handle_result(task_id, return_value, duration): io.progress_advance() if return_value is True: io.stdout(_("{x} {node} lock {id} removed").format( x=green("✓"), node=bold(task_id.ljust(max_node_name_length)), id=args['lock_id'].upper(), )) else: io.stderr(_( "{x} {node} has no lock with ID {id}" ).format( x=red("!"), node=bold(task_id.ljust(max_node_name_length)), id=args['lock_id'].upper(), )) def handle_exception(task_id, exception, traceback): msg = "{}: {}".format(task_id, exception) io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_exception=handle_exception, handle_result=handle_result, pool_id="lock_remove", workers=args['node_workers'], ) worker_pool.run() error_summary(errors) def bw_lock_show(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['targets']) target_nodes = remove_dummy_nodes(target_nodes) pending_nodes = target_nodes[:] locks_on_node = {} exit_code = 0 def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': softlock_list, 'task_id': node.name, 'args': (node,), } def handle_result(task_id, return_value, duration): locks_on_node[task_id] = return_value repo.hooks.lock_show(repo, repo.get_node(task_id), return_value) def handle_exception(task_id, exception, traceback): msg = "{}: {}".format(task_id, exception) io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_exception=handle_exception, handle_result=handle_result, pool_id="lock_show", workers=args['node_workers'], ) worker_pool.run() rows = [[ bold(_("node")), bold(_("ID")), bold(_("created")), bold(_("expires")), bold(_("user")), bold(_("items")), bold(_("comment")), ], ROW_SEPARATOR] for node_name, locks in sorted(locks_on_node.items()): if locks: first_lock = True for lock in locks: lock['formatted_date'] = format_timestamp(lock['date']) lock['formatted_expiry'] = format_timestamp(lock['expiry']) first_item = True for item in lock['items']: rows.append([ node_name if first_item and first_lock else "", lock['id'] if first_item else "", lock['formatted_date'] if first_item else "", lock['formatted_expiry'] if first_item else "", lock['user'] if first_item else "", item, lock['comment'] if first_item else "", ]) # always repeat for grep style first_item = environ.get("BW_TABLE_STYLE") == 'grep' # always repeat for grep style first_lock = environ.get("BW_TABLE_STYLE") == 'grep' else: rows.append([ node_name, _("(none)"), "", "", "", "", "", ]) rows.append(ROW_SEPARATOR) output = list(render_table( rows[:-1], # remove trailing ROW_SEPARATOR alignments={1: 'center'}, )) if args['items']: rows = [[ bold(_("node")), bold(_("item")), bold(_("locked")), bold(_("ID")), ], ROW_SEPARATOR] for node_name, locks in sorted(locks_on_node.items()): node = repo.get_node(node_name) for item in sorted(node.items): if not item.covered_by_autoskip_selector(args['items']): continue locked_by = None for lock in locks: if item.covered_by_autoskip_selector(lock['items']): locked_by = lock['id'] exit_code = 47 break rows.append([ node.name, item.id, red(_("YES")) if locked_by else green(_("NO")), locked_by or "", ]) if rows[-1] != ROW_SEPARATOR: rows.append(ROW_SEPARATOR) output += list(render_table( rows[:-1], # remove trailing ROW_SEPARATOR )) page_lines(output) error_summary(errors) exit(exit_code) bundlewrap-4.13.6/bundlewrap/cmdline/metadata.py000066400000000000000000000146071417502274300216750ustar00rootroot00000000000000from collections import OrderedDict from contextlib import suppress from decimal import Decimal from sys import exit, version_info from ..metadata import metadata_to_json from ..utils import Fault, list_starts_with from ..utils.cmdline import exit_on_keyboardinterrupt, get_target_nodes from ..utils.dicts import ( delete_key_at_path, replace_key_at_path, set_key_at_path, value_at_key_path, ) from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import ( ansi_clean, blue, bold, force_text, green, mark_for_translation as _, red, yellow, ) from ..utils.ui import io, page_lines def _color_for_source(key, source): if source.startswith("metadata_defaults:"): return blue(key) elif source.startswith("metadata_reactor:"): return green(key) elif source.startswith("group:"): return yellow(key) elif source.startswith("node:"): return red(key) else: return key def _colorize_path( metadata, path, sources, hide_defaults, hide_reactors, hide_groups, hide_node, ): if not isinstance(value_at_key_path(metadata, path), (dict, list, tuple, set)): # only highest-priority source relevant for atomic types sources = [sources[0]] sources_filtered = False for src in sources.copy(): if ( (src.startswith("metadata_defaults:") and hide_defaults) or (src.startswith("metadata_reactor:") and hide_reactors) or (src.startswith("group:") and hide_groups) or (src.startswith("node:") and hide_node) ): sources.remove(src) sources_filtered = True if not sources: delete_key_at_path(metadata, path) return None elif len(sources) == 1: if sources_filtered: # do not colorize if a key is really mixed-source colorized_key = path[-1] else: colorized_key = _color_for_source(path[-1], sources[0]) replace_key_at_path( metadata, path, colorized_key, ) return colorized_key def _sort_dict_colorblind(old_dict): if version_info < (3, 7): new_dict = OrderedDict() else: new_dict = {} for key in sorted(old_dict.keys(), key=ansi_clean): if isinstance(old_dict[key], dict): new_dict[key] = _sort_dict_colorblind(old_dict[key]) else: new_dict[key] = old_dict[key] return new_dict @exit_on_keyboardinterrupt def bw_metadata(repo, args): target_nodes = get_target_nodes(repo, args['targets']) key_paths = sorted([ tuple(path.strip().split("/")) for path in args['keys'] if path ]) or [()] if len(target_nodes) > 1: if key_paths == [()]: io.stdout(_("{x} at least one key is required when viewing multiple nodes").format(x=red("!!!"))) exit(1) if args['blame']: io.stdout(_("{x} blame information can only be shown for a single node").format(x=red("!!!"))) exit(1) table = [[bold(_("node"))] + [bold("/".join(path)) for path in key_paths], ROW_SEPARATOR] for node in sorted(target_nodes): values = [] for key_path in key_paths: value = node.metadata.get(key_path, default=red(_(""))) if isinstance(value, (dict, list, tuple)): value = ", ".join([str(item) for item in value]) elif isinstance(value, set): value = ", ".join(sorted(value)) elif isinstance(value, (bool, float, int, Decimal, Fault)) or value is None: value = str(value) values.append(value) table.append([bold(node.name)] + values) page_lines(render_table(table)) else: node = target_nodes.pop() if args['blame']: table = [[bold(_("path")), bold(_("source"))], ROW_SEPARATOR] for key_path in key_paths: # ensure all paths have been generated and will be present in .blame node.metadata.get(key_path) for path, blamed in sorted(node.metadata.blame.items()): joined_path = "/".join(path) for key_path in key_paths: if list_starts_with(path, key_path): table.append([joined_path, ", ".join(blamed)]) break page_lines(render_table(table)) else: metadata = {} for key_path in key_paths: set_key_at_path(metadata, key_path, node.metadata.get(key_path)) blame = list(node.metadata.blame.items()) # sort descending by key path length since we will be replacing # the keys and can't access paths beneath replaced keys anymore blame.sort(key=lambda e: len(e[0]), reverse=True) for path, blamed in blame: # remove all paths we did not ask to see for filtered_path in key_paths: if ( list_starts_with(path, filtered_path) or list_starts_with(filtered_path, path) ): break else: with suppress(KeyError): delete_key_at_path(metadata, path) continue colorized_key = _colorize_path( metadata, path, blamed, args['hide_defaults'], args['hide_reactors'], args['hide_groups'], args['hide_node'], ) for key_path in key_paths: if colorized_key and list(path) == key_path[:len(path)]: # we just replaced a key in the filtered path key_path[len(path) - 1] = colorized_key # now we need to recreate the dict, sorting the keys as if # they were not colored (otherwise we'd end up sorted by # color) metadata_sorted = _sort_dict_colorblind(metadata) page_lines([ force_text(line).replace("\\u001b", "\033") for line in metadata_to_json( metadata_sorted, sort_keys=False, ).splitlines() ]) bundlewrap-4.13.6/bundlewrap/cmdline/nodes.py000066400000000000000000000056341417502274300212250ustar00rootroot00000000000000from os import environ from sys import exit from ..utils import names from ..utils.cmdline import get_target_nodes from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import bold, mark_for_translation as _, red from ..utils.ui import io, page_lines from ..group import GROUP_ATTR_DEFAULTS NODE_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['bundles', 'file_path', 'groups', 'hostname']) NODE_ATTRS_LISTS = ('bundles', 'groups') def _attribute_table( entities, entity_label, selected_attrs, available_attrs, available_attrs_lists, inline, ): rows = [[entity_label], ROW_SEPARATOR] selected_attrs = [attr.strip() for attr in selected_attrs] if selected_attrs == ['all']: selected_attrs = available_attrs elif 'all' in selected_attrs: io.stderr(_( "{x} invalid attribute list requested ('all' and extraneous): {attr}" ).format(x=red("!!!"), attr=", ".join(sorted(selected_attrs)))) exit(1) for attr in selected_attrs: if attr not in available_attrs: io.stderr(_("{x} unknown attribute: {attr}").format(x=red("!!!"), attr=attr)) exit(1) rows[0].append(bold(attr)) has_list_attrs = False for entity in sorted(entities): attr_values = [[entity.name]] for attr in selected_attrs: if attr in available_attrs_lists: if inline: attr_values.append([",".join(sorted(names(getattr(entity, attr))))]) else: has_list_attrs = True attr_values.append(sorted(names(getattr(entity, attr)))) else: attr_values.append([str(getattr(entity, attr))]) number_of_lines = max([len(value) for value in attr_values]) if environ.get("BW_TABLE_STYLE") == 'grep': # repeat entity name for each line attr_values[0] = attr_values[0] * number_of_lines for line in range(number_of_lines): row = [] for attr_index in range(len(selected_attrs) + 1): try: row.append(attr_values[attr_index][line]) except IndexError: row.append("") rows.append(row) if has_list_attrs: rows.append(ROW_SEPARATOR) if environ.get("BW_TABLE_STYLE") == 'grep': rows = rows[2:] page_lines(render_table( rows[:-1] if has_list_attrs else rows, # remove trailing ROW_SEPARATOR )) def bw_nodes(repo, args): if args['targets']: nodes = get_target_nodes(repo, args['targets']) else: nodes = repo.nodes if not args['attrs']: for node in sorted(nodes): io.stdout(node.name) else: _attribute_table( nodes, bold(_("node")), args['attrs'], NODE_ATTRS, NODE_ATTRS_LISTS, args['inline'], ) bundlewrap-4.13.6/bundlewrap/cmdline/parser.py000066400000000000000000001060301417502274300214010ustar00rootroot00000000000000from argparse import ArgumentParser, RawTextHelpFormatter, SUPPRESS from os import environ, getcwd from .. import VERSION_STRING from ..utils.cmdline import HELP_get_target_nodes from ..utils.text import mark_for_translation as _ from .apply import bw_apply from .debug import bw_debug from .diff import bw_diff from .groups import bw_groups from .hash import bw_hash from .items import bw_items from .lock import bw_lock_add, bw_lock_remove, bw_lock_show from .metadata import bw_metadata from .nodes import bw_nodes from .plot import bw_plot_group, bw_plot_node, bw_plot_node_groups, bw_plot_reactors from .pw import bw_pw from .repo import bw_repo_bundle_create, bw_repo_create from .run import bw_run from .stats import bw_stats from .test import bw_test from .verify import bw_verify from .zen import bw_zen def build_parser_bw(): parser = ArgumentParser( prog="bw", description=_("BundleWrap - Config Management with Python"), ) parser.add_argument( "-a", "--add-host-keys", action='store_true', default=False, dest='add_ssh_host_keys', help=_("set StrictHostKeyChecking=no instead of yes for SSH"), ) parser.add_argument( "-d", "--debug", action='store_true', default=False, dest='debug', help=_("print debugging info"), ) parser.add_argument( "-r", "--repo-path", default=environ.get('BW_REPO_PATH', getcwd()), dest='repo_path', help=_("Look for repository at this path (defaults to current working directory)"), metavar=_("DIRECTORY"), type=str, ) # hidden option to dump profiling info, can be inpected with # SnakeViz or whatever parser.add_argument( "--profile", default=None, dest='profile', help=SUPPRESS, metavar=_("FILE"), type=str, ) parser.add_argument( "--version", action='version', version=VERSION_STRING, ) subparsers = parser.add_subparsers( title=_("subcommands"), help=_("use 'bw --help' for more info"), ) # bw apply help_apply = _("Applies the configuration defined in your repository to your nodes") parser_apply = subparsers.add_parser( "apply", description=help_apply, help=help_apply, formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes ) parser_apply.set_defaults(func=bw_apply) parser_apply.add_argument( 'targets', metavar=_("TARGET"), nargs='+', type=str, help=HELP_get_target_nodes, ) parser_apply.add_argument( "-D", "--no-diff", action='store_false', dest='show_diff', help=_("hide diff for incorrect items when NOT using --interactive"), ) parser_apply.add_argument( "-f", "--force", action='store_true', default=False, dest='force', help=_("ignore existing hard node locks"), ) parser_apply.add_argument( "-i", "--interactive", action='store_true', default=False, dest='interactive', help=_("ask before applying each item"), ) parser_apply.add_argument( "-o", "--only", default=[], dest='autoonly', help=_("""skip all items not matching any SELECTOR: file:/my_path # this specific item tag:my_tag # items with this tag bundle:my_bundle # items in this bundle dependencies of selected items will NOT be skipped """), metavar=_("SELECTOR"), nargs='+', type=str, ) bw_apply_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_apply.add_argument( "-p", "--parallel-nodes", default=bw_apply_p_default, dest='node_workers', help=_("number of nodes to apply to simultaneously " "(defaults to {})").format(bw_apply_p_default), type=int, ) bw_apply_p_items_default = int(environ.get("BW_ITEM_WORKERS", "4")) parser_apply.add_argument( "-P", "--parallel-items", default=bw_apply_p_items_default, dest='item_workers', help=_("number of items to apply simultaneously on each node " "(defaults to {})").format(bw_apply_p_items_default), type=int, ) parser_apply.add_argument( "-s", "--skip", default=[], dest='autoskip', help=_("""skip items matching any SELECTOR: file:/my_path # this specific item tag:my_tag # items with this tag bundle:my_bundle # items in this bundle """), metavar=_("SELECTOR"), nargs='+', type=str, ) parser_apply.add_argument( "-S", "--no-summary", action='store_false', dest='summary', help=_("don't show stats summary"), ) parser_apply.add_argument( "-r", "--resume-file", default=None, dest='resume_file', help=_( "path to a file that a list of completed nodes will be added to; " "if the file already exists, any nodes therein will be skipped" ), metavar=_("PATH"), type=str, ) # bw debug help_debug = _("Start an interactive Python shell for this repository") parser_debug = subparsers.add_parser("debug", description=help_debug, help=help_debug) parser_debug.set_defaults(func=bw_debug) parser_debug.add_argument( "-c", "--command", default=None, dest='command', metavar=_("COMMAND"), required=False, type=str, help=_("command to execute in lieu of REPL"), ) parser_debug.add_argument( "-n", "--node", default=None, dest='node', metavar=_("NODE"), required=False, type=str, help=_("name of node to inspect"), ) # bw diff help_diff = _("Show differences between nodes") parser_diff = subparsers.add_parser( "diff", description=help_diff, help=help_diff, formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes ) parser_diff.set_defaults(func=bw_diff) parser_diff.add_argument( "-b", "--branch", default=None, dest='branch', metavar=_("REV"), required=False, type=str, help=_("compare with this git rev instead (requires clean working dir)"), ) parser_diff.add_argument( "-c", "--cmd-change", default=None, dest='cmd_change', metavar=_("CMD_CHANGE"), required=False, type=str, help=_("command to execute between taking metadata snapshots (e.g., change Git branch)"), ) parser_diff.add_argument( "-r", "--cmd-reset", default=None, dest='cmd_reset', metavar=_("CMD_RESET"), required=False, type=str, help=_("command to execute when finished (e.g., switch back to original Git branch)"), ) parser_diff.add_argument( "-p", "--prompt", action='store_true', default=False, dest='prompt', help=_("interactively ask for user to make changes"), ) parser_diff.add_argument( "-i", "--item", default=None, dest='item', metavar=_("ITEM"), required=False, type=str, help=_("compare this specific item between nodes"), ) parser_diff.add_argument( "-m", "--metadata", action='store_true', default=False, dest='metadata', help=_("compare metadata instead of configuration"), ) parser_diff.add_argument( 'targets', metavar=_("TARGET"), nargs='+', type=str, help=HELP_get_target_nodes, ) # bw groups help_groups = _("Lists groups in this repository") parser_groups = subparsers.add_parser("groups", description=help_groups, help=help_groups) parser_groups.set_defaults(func=bw_groups) parser_groups.add_argument( "-a", "--attrs", dest='attrs', metavar=_("ATTR"), nargs='+', type=str, help=_("show table with the given attributes for each group " "(e.g. 'all', 'members', 'os', ...)"), ) parser_groups.add_argument( "-i", "--inline", action='store_true', dest='inline', help=_("keep lists on a single line (for grep)"), ) parser_groups.add_argument( 'groups', default=None, metavar=_("GROUP"), nargs='*', type=str, help=_("show the given groups (and their subgroups, unless --attrs is used)"), ) # bw hash help_hash = _("Shows a SHA1 hash that summarizes the entire configuration for this repo, node, group, or item.") parser_hash = subparsers.add_parser("hash", description=help_hash, help=help_hash) parser_hash.set_defaults(func=bw_hash) parser_hash.add_argument( "-d", "--dict", action='store_true', default=False, dest='dict', help=_("instead show the data this hash is derived from"), ) parser_hash.add_argument( "-g", "--group", action='store_true', default=False, dest='group_membership', help=_("hash group membership instead of configuration"), ) parser_hash.add_argument( "-m", "--metadata", action='store_true', default=False, dest='metadata', help=_("hash metadata instead of configuration (not available for items)"), ) parser_hash.add_argument( 'node_or_group', metavar=_("NODE|GROUP"), type=str, nargs='?', help=_("show config hash for this node or group"), ) parser_hash.add_argument( 'item', metavar=_("ITEM"), type=str, nargs='?', help=_("show config hash for this item on the given node"), ) # bw items help_items = _("List and preview items for a specific node") parser_items = subparsers.add_parser("items", description=help_items, help=help_items) parser_items.set_defaults(func=bw_items) parser_items.add_argument( 'node', metavar=_("NODE"), type=str, help=_("list items for this node"), ) parser_items.add_argument( 'item', metavar=_("ITEM"), nargs='?', type=str, help=_("show configuration for this item"), ) parser_items.add_argument( 'attr', metavar=_("ATTRIBUTE"), nargs='?', type=str, help=_("show only this item attribute"), ) parser_items.add_argument( "-f", "--preview", "--file-preview", # TODO 4.0 remove action='store_true', dest='preview', help=_("print preview of given ITEM"), ) parser_items.add_argument( "-w", "--write-file-previews", default=None, dest='file_preview_path', metavar=_("DIRECTORY"), required=False, type=str, help=_("create DIRECTORY and fill it with rendered file previews"), ) parser_items.add_argument( "--attrs", action='store_true', dest='show_attrs', help=_("show internal item attributes"), ) parser_items.add_argument( "--repr", action='store_true', dest='show_repr', help=_("show more verbose representation of each item"), ) parser_items.add_argument( "--state", action='store_true', dest='show_sdict', help=_("show actual item status on node instead of should-be configuration"), ) # bw lock help_lock = _("Manage locks on nodes used to prevent collisions between BundleWrap users") parser_lock = subparsers.add_parser( "lock", description=help_lock, help=help_lock, ) parser_lock_subparsers = parser_lock.add_subparsers() # bw lock add help_lock_add = _("Add a new lock to one or more nodes") parser_lock_add = parser_lock_subparsers.add_parser( "add", description=help_lock_add, help=help_lock_add, formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes ) parser_lock_add.set_defaults(func=bw_lock_add) parser_lock_add.add_argument( 'targets', metavar=_("TARGET"), nargs='+', type=str, help=HELP_get_target_nodes, ) parser_lock_add.add_argument( "-c", "--comment", default="", dest='comment', help=_("brief description of the purpose of the lock"), type=str, ) bw_lock_add_e_default = environ.get("BW_SOFTLOCK_EXPIRY", "8h") parser_lock_add.add_argument( "-e", "--expires-in", default=bw_lock_add_e_default, dest='expiry', help=_("how long before the lock is ignored and removed automatically " "(defaults to \"{}\")").format(bw_lock_add_e_default), type=str, ) parser_lock_add.add_argument( "-i", "--items", default=["*"], dest='items', help=_("""lock only items matching any SELECTOR: file:/my_path # this specific item tag:my_tag # items with this tag bundle:my_bundle # items in this bundle """), metavar=_("SELECTOR"), nargs='+', type=str, ) bw_lock_add_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_lock_add.add_argument( "-p", "--parallel-nodes", default=bw_lock_add_p_default, dest='node_workers', help=_("number of nodes to lock simultaneously " "(defaults to {})").format(bw_lock_add_p_default), type=int, ) # bw lock remove help_lock_remove = _("Remove a lock from a node") parser_lock_remove = parser_lock_subparsers.add_parser( "remove", description=help_lock_remove, help=help_lock_remove, formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes ) parser_lock_remove.set_defaults(func=bw_lock_remove) parser_lock_remove.add_argument( 'targets', metavar=_("TARGET"), nargs='+', type=str, help=HELP_get_target_nodes, ) parser_lock_remove.add_argument( 'lock_id', metavar=_("LOCK_ID"), type=str, help=_("ID of the lock to remove (obtained with `bw lock show`)"), ) bw_lock_remove_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_lock_remove.add_argument( "-p", "--parallel-nodes", default=bw_lock_remove_p_default, dest='node_workers', help=_("number of nodes to remove lock from simultaneously " "(defaults to {})").format(bw_lock_remove_p_default), type=int, ) # bw lock show help_lock_show = _("Show details of locks present on a node") parser_lock_show = parser_lock_subparsers.add_parser( "show", description=help_lock_show, help=help_lock_show, formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes ) parser_lock_show.set_defaults(func=bw_lock_show) parser_lock_show.add_argument( 'targets', metavar=_("TARGETS"), nargs='+', type=str, help=HELP_get_target_nodes, ) parser_lock_show.add_argument( "-i", "--items", default=None, dest='items', help=_("""check locks against items matching any SELECTOR: file:/my_path # this specific item tag:my_tag # items with this tag bundle:my_bundle # items in this bundle will exit with code 47 if any matching items are locked """), metavar=_("SELECTOR"), nargs='+', type=str, ) bw_lock_show_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_lock_show.add_argument( "-p", "--parallel-nodes", default=bw_lock_show_p_default, dest='node_workers', help=_("number of nodes to retrieve locks from simultaneously " "(defaults to {})").format(bw_lock_show_p_default), type=int, ) # bw metadata help_metadata = ("View a JSON representation of a node's metadata (defaults blue, reactors green, groups yellow, node red, uncolored if mixed-source) or a table of selected metadata keys from multiple nodes") parser_metadata = subparsers.add_parser( "metadata", description=help_metadata, help=help_metadata, formatter_class=RawTextHelpFormatter, ) parser_metadata.set_defaults(func=bw_metadata) parser_metadata.add_argument( 'targets', metavar=_("TARGET"), nargs='+', type=str, help=HELP_get_target_nodes, ) parser_metadata.add_argument( "-k", "--keys", default=[], dest='keys', metavar=_("KEY"), nargs='*', type=str, help=_("show only partial metadata from the given key paths (e.g. `bw metadata mynode -k users/jdoe` to show `mynode.metadata['users']['jdoe']`)"), ) parser_metadata.add_argument( "-b", "--blame", action='store_true', dest='blame', help=_("show where each piece of metadata comes from"), ) parser_metadata.add_argument( "-D", "--hide-defaults", action='store_true', dest='hide_defaults', help=_("hide values set by defaults in metadata.py"), ) parser_metadata.add_argument( "-G", "--hide-groups", action='store_true', dest='hide_groups', help=_("hide values set in groups.py"), ) parser_metadata.add_argument( "-N", "--hide-node", action='store_true', dest='hide_node', help=_("hide values set in nodes.py"), ) parser_metadata.add_argument( "-R", "--hide-reactors", action='store_true', dest='hide_reactors', help=_("hide values set by reactors in metadata.py"), ) # bw nodes help_nodes = _("List nodes in this repository") parser_nodes = subparsers.add_parser( "nodes", description=help_nodes, help=help_nodes, formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes ) parser_nodes.set_defaults(func=bw_nodes) parser_nodes.add_argument( "-i", "--inline", action='store_true', dest='inline', help=_("keep lists on a single line (for grep)"), ) parser_nodes.add_argument( 'targets', default=None, metavar=_("TARGET"), nargs='*', type=str, help=HELP_get_target_nodes, ) parser_nodes.add_argument( "-a", "--attrs", default=None, dest='attrs', metavar=_("ATTR"), nargs='+', type=str, help=_("show table with the given attributes for each node " "(e.g. 'all', 'groups', 'bundles', 'hostname', 'os', ...)"), ) # bw plot help_plot = _("Generates DOT output that can be piped into `dot -Tsvg -ooutput.svg`. " "The resulting output.svg can be viewed using most browsers.") parser_plot = subparsers.add_parser("plot", description=help_plot, help=help_plot) parser_plot_subparsers = parser_plot.add_subparsers() # bw plot group help_plot_group = _("Plot subgroups and node members for the given group " "or the entire repository") parser_plot_subparsers_group = parser_plot_subparsers.add_parser( "group", description=help_plot_group, help=help_plot_group, ) parser_plot_subparsers_group.set_defaults(func=bw_plot_group) parser_plot_subparsers_group.add_argument( 'group', default=None, metavar=_("GROUP"), nargs='?', type=str, help=_("group to plot"), ) parser_plot_subparsers_group.add_argument( "-N", "--no-nodes", action='store_false', dest='show_nodes', help=_("do not include nodes in output"), ) # bw plot node help_plot_node = _( "Plot items and their dependencies for the given node. " "Color guide: " "needs in red, " "after in blue, " "concurrency blockers in purple, " "before in yellow, " "needed_by in orange, " "auto in green, " "triggers in pink" ) parser_plot_subparsers_node = parser_plot_subparsers.add_parser( "node", description=help_plot_node, help=help_plot_node, ) parser_plot_subparsers_node.set_defaults(func=bw_plot_node) parser_plot_subparsers_node.add_argument( 'node', metavar=_("NODE"), type=str, help=_("node to plot"), ) parser_plot_subparsers_node.add_argument( "--no-cluster", action='store_false', dest='cluster', help=_("do not cluster items by bundle"), ) parser_plot_subparsers_node.add_argument( "--no-depends-auto", action='store_false', dest='depends_auto', help=_("do not show auto-generated and trigger dependencies"), ) parser_plot_subparsers_node.add_argument( "--no-depends-conc", action='store_false', dest='depends_concurrency', help=_("do not show concurrency blocker dependencies"), ) parser_plot_subparsers_node.add_argument( "--no-depends-regular", action='store_false', dest='depends_regular', help=_("do not show after/needs dependencies"), ) parser_plot_subparsers_node.add_argument( "--no-depends-reverse", action='store_false', dest='depends_reverse', help=_("do not show before/needed_by dependencies"), ) # bw plot groups-for-node help_plot_node_groups = _("Show where a specific node gets its groups from") parser_plot_subparsers_node_groups = parser_plot_subparsers.add_parser( "groups-for-node", description=help_plot_node_groups, help=help_plot_node_groups, ) parser_plot_subparsers_node_groups.set_defaults(func=bw_plot_node_groups) parser_plot_subparsers_node_groups.add_argument( 'node', metavar=_("NODE"), type=str, help=_("node to plot"), ) # bw plot reactors help_plot_node_reactors = _( "Show metadata reactor information flow for a node. " "Boxes are reactors, ovals are metadata paths provided or needed by reactors. " "Accesses to other nodes' metadata are truncated by default and shown in red. " "Numbers behind reactor names indicate how often the reactor result " "changed vs. how often the reactor was run (0/1 is perfect efficiency)." ) parser_plot_subparsers_node_reactors = parser_plot_subparsers.add_parser( "reactors", description=help_plot_node_reactors, help=help_plot_node_reactors, ) parser_plot_subparsers_node_reactors.set_defaults(func=bw_plot_reactors) parser_plot_subparsers_node_reactors.add_argument( 'node', metavar=_("NODE"), type=str, help=_("node to plot"), ) parser_plot_subparsers_node_reactors.add_argument( "-k", "--keys", default=[], dest='keys', metavar=_("KEY"), nargs='*', type=str, help=_( "request only partial metadata from the given key paths " "(e.g. `bw plot reactors mynode -k users/jdoe` " "to show `mynode.metadata['users']['jdoe']`)" ), ) parser_plot_subparsers_node_reactors.add_argument( "-r" "--recursive", action='store_true', dest='recursive', help=_("do not truncate plot when crossing to other nodes (result might be huge)"), ) # bw pw help_pw = _("Generate passwords and encrypt/decrypt secrets") parser_pw = subparsers.add_parser( "pw", description=help_pw, help=help_pw, ) parser_pw.set_defaults(func=bw_pw) parser_pw.add_argument( 'string', metavar=_("STRING"), type=str, ) parser_pw.add_argument( "-b", "--bytes", action='store_true', dest='bytes', help=_("derive random bytes as base64 from STRING (`repo.vault.random_bytes_as_base64_for()`)"), ) parser_pw.add_argument( "-d", "--decrypt", action='store_true', dest='decrypt', help=_("decrypt secret given as STRING (`repo.vault.decrypt()`)"), ) parser_pw.add_argument( "-e", "--encrypt", action='store_true', dest='encrypt', help=_("encrypt secret in STRING (`repo.vault.encrypt()`)"), ) parser_pw.add_argument( "-f", "--file", dest='file', metavar=_("TARGET_PATH"), type=str, help=_("treat STRING as source filename for -d and -e, write result to TARGET_PATH (relative to data/)"), ) parser_pw.add_argument( "-H", "--human", action='store_true', dest='human', help=_("derive human-friendly password from STRING (`repo.vault.human_password_for()`)"), ) parser_pw.add_argument( "-k", "--key", dest='key', metavar=_("NAME"), type=str, help=_( "which key from .secrets.cfg to use " "(defaults to 'encrypt' for -d and -e, 'generate' otherwise; " "overrides key name embedded in STRING)" ), ) parser_pw.add_argument( "-l", "--length", default=32, dest='length', metavar=_("INT"), type=int, help=_("length for --password and --bytes (defaults to 32)"), ) parser_pw.add_argument( "-p", "--password", action='store_true', dest='password', help=_("derive password from STRING (`repo.vault.password_for()`)"), ) # bw repo help_repo = _("Various subcommands to manipulate your repository") parser_repo = subparsers.add_parser("repo", description=help_repo, help=help_repo) parser_repo_subparsers = parser_repo.add_subparsers() # bw repo bundle parser_repo_subparsers_bundle = parser_repo_subparsers.add_parser("bundle") parser_repo_subparsers_bundle_subparsers = parser_repo_subparsers_bundle.add_subparsers() # bw repo bundle create parser_repo_subparsers_bundle_create = \ parser_repo_subparsers_bundle_subparsers.add_parser("create") parser_repo_subparsers_bundle_create.set_defaults(func=bw_repo_bundle_create) parser_repo_subparsers_bundle_create.add_argument( 'bundle', metavar=_("BUNDLE"), type=str, help=_("name of bundle to create"), ) # bw repo create parser_repo_subparsers_create = parser_repo_subparsers.add_parser("create") parser_repo_subparsers_create.set_defaults(func=bw_repo_create) # bw run help_run = _("Run a one-off command on a number of nodes") parser_run = subparsers.add_parser( "run", description=help_run, help=help_run, formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes ) parser_run.set_defaults(func=bw_run) parser_run.add_argument( 'targets', metavar=_("TARGET"), nargs='+', type=str, help=HELP_get_target_nodes, ) parser_run.add_argument( 'command', metavar=_("COMMAND"), type=str, help=_("command to run"), ) parser_run.add_argument( "--stderr-table", action='store_true', dest='stderr_table', help=_("include command stderr in stats table"), ) parser_run.add_argument( "--stdout-table", action='store_true', dest='stdout_table', help=_("include command stdout in stats table"), ) bw_run_p_default = int(environ.get("BW_NODE_WORKERS", "1")) parser_run.add_argument( "-p", "--parallel-nodes", default=bw_run_p_default, dest='node_workers', help=_("number of nodes to run command on simultaneously " "(defaults to {})").format(bw_run_p_default), type=int, ) parser_run.add_argument( "-r", "--resume-file", default=None, dest='resume_file', help=_( "path to a file that a list of completed nodes will be added to; " "if the file already exists, any nodes therein will be skipped" ), metavar=_("PATH"), type=str, ) parser_run.add_argument( "-S", "--no-summary", action='store_false', dest='summary', help=_("don't show stats summary"), ) # bw stats help_stats = _("Show some statistics about your repository") parser_stats = subparsers.add_parser("stats", description=help_stats, help=help_stats) parser_stats.set_defaults(func=bw_stats) # bw test help_test = _("Test your repository for consistency " "(you can use this with a CI tool like Jenkins). " "If *any* options other than -i are given, *only* the " "tests selected by those options will be run. Otherwise, a " "default selection of tests will be run (that selection may " "change in future releases). Currently, the default is -IJKMp " "if specific nodes are given and -HIJKMSp if testing the " "entire repo.") parser_test = subparsers.add_parser( "test", description=help_test, help=help_test, formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes ) parser_test.set_defaults(func=bw_test) parser_test.add_argument( 'targets', default=None, metavar=_("TARGET"), nargs='*', type=str, help=HELP_get_target_nodes + _("\n(defaults to all)"), ) parser_test.add_argument( "-d", "--config-determinism", default=0, dest='determinism_config', help=_("verify determinism of configuration by running `bw hash` N times " "and checking for consistent results (with N > 1)"), metavar="N", type=int, ) parser_test.add_argument( "-e", "--empty-groups", action='store_true', dest='empty_groups', help=_("check for empty groups"), ) parser_test.add_argument( "-H", "--hooks-repo", action='store_true', dest='hooks_repo', help=_("run repo-level test hooks"), ) parser_test.add_argument( "-i", "--ignore-missing-faults", action='store_true', dest='ignore_missing_faults', help=_("do not fail when encountering a missing Fault"), ) parser_test.add_argument( "-I", "--items", action='store_true', dest='items', help=_("run item-level tests (like rendering templates)"), ) parser_test.add_argument( "-J", "--hooks-node", action='store_true', dest='hooks_node', help=_("run node-level test hooks"), ) parser_test.add_argument( "-K", "--metadata-keys", action='store_true', dest='metadata_keys', help=_("validate metadata keys"), ) parser_test.add_argument( "-m", "--metadata-determinism", default=0, dest='determinism_metadata', help=_("verify determinism of metadata by running `bw hash -m` N times " "and checking for consistent results (with N > 1)"), metavar="N", type=int, ) parser_test.add_argument( "-M", "--metadata-conflicts", action='store_true', dest='metadata_conflicts', help=_("check for conflicting metadata keys in group metadata, reactors, and defaults"), ) parser_test.add_argument( "-o", "--orphaned-bundles", action='store_true', dest='orphaned_bundles', help=_("check for bundles not assigned to any node"), ) parser_test.add_argument( "-p", "--reactor-provides", action='store_true', dest='reactor_provides', help=_("check for reactors returning keys other than what they declared with @metadata_reactor.provides()"), ) parser_test.add_argument( "-q", "--quiet", action='store_true', dest='quiet', help=_("don't show successful tests"), ) parser_test.add_argument( "-S", "--subgroup-loops", action='store_true', dest='subgroup_loops', help=_("check for loops in subgroup hierarchies"), ) # bw verify help_verify = _("Inspect the health or 'correctness' of a node without changing it") parser_verify = subparsers.add_parser( "verify", description=help_verify, help=help_verify, formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes ) parser_verify.set_defaults(func=bw_verify) parser_verify.add_argument( 'targets', metavar=_("TARGET"), nargs='+', type=str, help=HELP_get_target_nodes, ) parser_verify.add_argument( "-a", "--show-all", action='store_true', dest='show_all', help=_("show correct and skipped items as well as incorrect ones"), ) parser_verify.add_argument( "-D", "--no-diff", action='store_false', dest='show_diff', help=_("hide diff for incorrect items"), ) parser_verify.add_argument( "-o", "--only", default=[], dest='autoonly', help=_("""skip all items not matching any SELECTOR: file:/my_path # this specific item tag:my_tag # items with this tag bundle:my_bundle # items in this bundle """), metavar=_("SELECTOR"), nargs='+', type=str, ) bw_verify_p_default = int(environ.get("BW_NODE_WORKERS", "4")) parser_verify.add_argument( "-p", "--parallel-nodes", default=bw_verify_p_default, dest='node_workers', help=_("number of nodes to verify simultaneously " "(defaults to {})").format(bw_verify_p_default), type=int, ) bw_verify_p_items_default = int(environ.get("BW_ITEM_WORKERS", "4")) parser_verify.add_argument( "-P", "--parallel-items", default=bw_verify_p_items_default, dest='item_workers', help=_("number of items to verify simultaneously on each node " "(defaults to {})").format(bw_verify_p_items_default), type=int, ) parser_verify.add_argument( "-s", "--skip", default=[], dest='autoskip', help=_("""skip items matching any SELECTOR: file:/my_path # this specific item tag:my_tag # items with this tag bundle:my_bundle # items in this bundle """), metavar=_("SELECTOR"), nargs='+', type=str, ) parser_verify.add_argument( "-S", "--no-summary", action='store_false', dest='summary', help=_("don't show stats summary"), ) # bw zen parser_zen = subparsers.add_parser("zen") parser_zen.set_defaults(func=bw_zen) return parser bundlewrap-4.13.6/bundlewrap/cmdline/plot.py000066400000000000000000000026741417502274300210740ustar00rootroot00000000000000from ..deps import prepare_dependencies from ..utils.plot import ( graph_for_items, plot_group, plot_node_groups, plot_reactors, ) from ..utils.cmdline import get_group, get_node from ..utils.ui import io def bw_plot_group(repo, args): group = get_group(repo, args['group']) if args['group'] else None if args['show_nodes']: nodes = group.nodes if group else repo.nodes else: nodes = [] if group: groups = [group] groups.extend(group.subgroups) else: groups = repo.groups for line in plot_group(groups, nodes, args['show_nodes']): io.stdout(line) def bw_plot_node(repo, args): node = get_node(repo, args['node']) for line in graph_for_items( node.name, prepare_dependencies(node), cluster=args['cluster'], concurrency=args['depends_concurrency'], regular=args['depends_regular'], reverse=args['depends_reverse'], auto=args['depends_auto'], ): io.stdout(line) def bw_plot_node_groups(repo, args): node = get_node(repo, args['node']) for line in plot_node_groups(node): io.stdout(line) def bw_plot_reactors(repo, args): node = get_node(repo, args['node']) key_paths = sorted([ tuple(path.strip().split("/")) for path in args['keys'] if path ]) or [()] for line in plot_reactors(repo, node, key_paths, recursive=args['recursive']): io.stdout(line) bundlewrap-4.13.6/bundlewrap/cmdline/pw.py000066400000000000000000000043701417502274300205370ustar00rootroot00000000000000from os.path import join from sys import exit from ..utils.text import mark_for_translation as _, red from ..utils.ui import io OPERATIONS = ( 'bytes', 'decrypt', 'encrypt', 'human', 'password', ) def get_operation(args): opcount = 0 selected_op = None for op in OPERATIONS: if args[op]: selected_op = op opcount += 1 if opcount > 1: io.stdout(_("{x} More than one operation selected").format(x=red("!!!"))) exit(1) elif opcount == 0: return 'password' return selected_op def bw_pw(repo, args): if args['length'] < 1: io.stdout(_("{x} length must be > 1").format(x=red("!!!"))) exit(1) op = get_operation(args) if op == 'bytes': io.stdout(repo.vault.random_bytes_as_base64_for( args['string'], key=args['key'] or 'generate', length=args['length'], ).value) elif op == 'decrypt': if args['file']: content = repo.vault.decrypt_file( args['string'], key=args['key'], ).value with open(join(repo.data_dir, args['file']), 'wb') as f: f.write(content.encode('utf-8')) else: try: key, cryptotext = args['string'].split("$", 1) except ValueError: cryptotext = args['string'] key = args['key'] or 'encrypt' io.stdout(repo.vault.decrypt( cryptotext, key=key, ).value) elif op == 'encrypt': if args['file']: repo.vault.encrypt_file( args['string'], args['file'], key=args['key'] or 'encrypt', ) else: io.stdout(repo.vault.encrypt( args['string'], key=args['key'] or 'encrypt', )) elif op == 'human': io.stdout(repo.vault.human_password_for( args['string'], key=args['key'] or 'generate', ).value) elif op == 'password': io.stdout(repo.vault.password_for( args['string'], key=args['key'] or 'generate', length=args['length'], ).value) bundlewrap-4.13.6/bundlewrap/cmdline/repo.py000066400000000000000000000002541417502274300210530ustar00rootroot00000000000000from ..repo import Repository def bw_repo_bundle_create(repo, args): repo.create_bundle(args['bundle']) def bw_repo_create(path, args): Repository.create(path) bundlewrap-4.13.6/bundlewrap/cmdline/run.py000066400000000000000000000122161417502274300207130ustar00rootroot00000000000000from datetime import datetime from itertools import zip_longest from sys import exit from ..concurrency import WorkerPool from ..exceptions import SkipNode from ..utils import SkipList from ..utils.cmdline import get_target_nodes from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import ( blue, bold, error_summary, format_duration, green, mark_for_translation as _, red, yellow, ) from ..utils.ui import io def run_on_node(node, command, skip_list): if node.dummy: io.stdout(_("{x} {node} is a dummy node").format(node=bold(node.name), x=yellow("»"))) return None if node.name in skip_list: io.stdout(_("{x} {node} skipped by --resume-file").format(node=bold(node.name), x=yellow("»"))) return None try: node.repo.hooks.node_run_start( node.repo, node, command, ) except SkipNode as exc: io.stdout(_("{x} {node} skipped by hook ({reason})").format( node=bold(node.name), reason=str(exc) or _("no reason given"), x=yellow("»"), )) return None with io.job(_("{} running command...").format(bold(node.name))): result = node.run( command, may_fail=True, log_output=True, ) node.repo.hooks.node_run_end( node.repo, node, command, duration=result.duration, return_code=result.return_code, stdout=result.stdout, stderr=result.stderr, ) return result def stats_summary(results, include_stdout, include_stderr): rows = [[ bold(_("node")), bold(_("return code")), bold(_("time")), ], ROW_SEPARATOR] if include_stdout: rows[0].append(bold(_("stdout"))) if include_stderr: rows[0].append(bold(_("stderr"))) for node_name, result in sorted(results.items()): row = [node_name] if result is None: # node has been skipped continue elif result.return_code == 0: row.append(green(str(result.return_code))) else: row.append(red(str(result.return_code))) row.append(format_duration(result.duration, msec=True)) rows.append(row) if include_stdout or include_stderr: stdout = result.stdout.decode('utf-8', errors='replace').strip().split("\n") stderr = result.stderr.decode('utf-8', errors='replace').strip().split("\n") if include_stdout: row.append(stdout[0]) if include_stderr: row.append(stderr[0]) for stdout_line, stderr_line in list(zip_longest(stdout, stderr, fillvalue=""))[1:]: continuation_row = ["", "", ""] if include_stdout: continuation_row.append(stdout_line) if include_stderr: continuation_row.append(stderr_line) rows.append(continuation_row) rows.append(ROW_SEPARATOR) if include_stdout or include_stderr: # remove last ROW_SEPARATOR rows = rows[:-1] if len(rows) > 2: # table might be empty if all nodes have been skipped for line in render_table(rows, alignments={1: 'right', 2: 'right'}): io.stdout("{x} {line}".format(x=blue("i"), line=line)) def bw_run(repo, args): errors = [] target_nodes = get_target_nodes(repo, args['targets']) pending_nodes = target_nodes.copy() io.progress_set_total(len(pending_nodes)) repo.hooks.run_start( repo, args['targets'], target_nodes, args['command'], ) start_time = datetime.now() results = {} skip_list = SkipList(args['resume_file']) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': run_on_node, 'task_id': node.name, 'args': ( node, args['command'], skip_list, ), } def handle_result(task_id, return_value, duration): io.progress_advance() results[task_id] = return_value if return_value is None or return_value.return_code == 0: skip_list.add(task_id) def handle_exception(task_id, exception, traceback): io.progress_advance() msg = "{} {}".format(bold(task_id), exception) io.stderr(traceback) io.stderr(repr(exception)) io.stderr("{} {}".format(red("!"), msg)) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_result=handle_result, handle_exception=handle_exception, cleanup=skip_list.dump, pool_id="run", workers=args['node_workers'], ) worker_pool.run() if args['summary']: stats_summary(results, args['stdout_table'], args['stderr_table']) error_summary(errors) repo.hooks.run_end( repo, args['targets'], target_nodes, args['command'], duration=datetime.now() - start_time, ) exit(1 if errors else 0) bundlewrap-4.13.6/bundlewrap/cmdline/stats.py000066400000000000000000000025641417502274300212520ustar00rootroot00000000000000from operator import itemgetter from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import bold, mark_for_translation as _ from ..utils.ui import page_lines def bw_stats(repo, args): items = {} metadata_defaults = set() metadata_reactors = set() for node in repo.nodes: for metadata_default_name, metadata_default in node.metadata_defaults: metadata_defaults.add(metadata_default_name) for metadata_reactor_name, metadata_reactor in node.metadata_reactors: metadata_reactors.add(metadata_reactor_name) for item in node.items: items.setdefault(item.ITEM_TYPE_NAME, 0) items[item.ITEM_TYPE_NAME] += 1 rows = [ [ bold(_("count")), bold(_("type")), ], ROW_SEPARATOR, [str(len(repo.nodes)), _("nodes")], [str(len(repo.groups)), _("groups")], [str(len(repo.bundle_names)), _("bundles")], [str(len(metadata_defaults)), _("metadata defaults")], [str(len(metadata_reactors)), _("metadata reactors")], [str(sum([len(list(node.items)) for node in repo.nodes])), _("items")], ROW_SEPARATOR, ] for item_type, count in sorted(items.items(), key=itemgetter(1), reverse=True): rows.append([str(count), item_type]) page_lines(render_table(rows, alignments={0: 'right'})) bundlewrap-4.13.6/bundlewrap/cmdline/test.py000066400000000000000000000275701417502274300210770ustar00rootroot00000000000000from copy import copy from sys import exit from ..deps import ItemDependencyLoop from ..exceptions import FaultUnavailable from ..itemqueue import ItemTestQueue from ..metadata import check_for_metadata_conflicts, metadata_to_json from ..repo import Repository from ..utils.cmdline import count_items, get_target_nodes from ..utils.dicts import diff_dict, diff_text from ..utils.text import bold, green, mark_for_translation as _, prefix_lines, red, yellow from ..utils.ui import io, QUIT_EVENT def test_items(nodes, ignore_missing_faults, quiet): io.progress_set_total(count_items(nodes)) for node in nodes: if QUIT_EVENT.is_set(): break if not node.items: io.stdout(_("{x} {node} has no items").format(node=bold(node.name), x=yellow("!"))) continue item_queue = ItemTestQueue(node) while not QUIT_EVENT.is_set(): try: item = item_queue.pop() except KeyError: # no items left break try: item._test() except FaultUnavailable: if ignore_missing_faults: io.progress_advance() io.stderr(_("{x} {node} {bundle} {item} ({msg})").format( bundle=bold(item.bundle.name), item=item.id, msg=yellow(_("Fault unavailable")), node=bold(node.name), x=yellow("»"), )) else: io.stderr(_("{x} {node} {bundle} {item} missing Fault:").format( bundle=bold(item.bundle.name), item=item.id, node=bold(node.name), x=red("!"), )) raise except Exception: io.stderr(_("{x} {node} {bundle} {item}").format( bundle=bold(item.bundle.name), item=item.id, node=bold(node.name), x=red("!"), )) raise else: if item.id.count(":") < 2: # don't count canned actions io.progress_advance() if not quiet: io.stdout("{x} {node} {bundle} {item}".format( bundle=bold(item.bundle.name), item=item.id, node=bold(node.name), x=green("✓"), )) if item_queue.items_with_deps and not QUIT_EVENT.is_set(): raise ItemDependencyLoop(item_queue.items_with_deps) io.progress_set_total(0) def test_subgroup_loops(repo, quiet): checked_groups = [] for group in repo.groups: if QUIT_EVENT.is_set(): break if group in checked_groups: continue with io.job(_("{group} checking for subgroup loops").format(group=bold(group.name))): checked_groups.extend(group.subgroups) # the subgroups property has the check built in if not quiet: io.stdout(_("{x} {group} has no subgroup loops").format( x=green("✓"), group=bold(group.name), )) def test_metadata_conflicts(node, quiet): with io.job(_("{node} checking for metadata conflicts").format(node=bold(node.name))): check_for_metadata_conflicts(node) if not quiet: io.stdout(_("{x} {node} has no metadata conflicts").format( x=green("✓"), node=bold(node.name), )) def test_orphaned_bundles(repo): orphaned_bundles = set(repo.bundle_names) for node in repo.nodes: if QUIT_EVENT.is_set(): break for bundle in node.bundles: if QUIT_EVENT.is_set(): break orphaned_bundles.discard(bundle.name) for bundle in sorted(orphaned_bundles): io.stderr(_("{x} {bundle} is an unused bundle").format( bundle=bold(bundle), x=red("✘"), )) if orphaned_bundles: exit(1) def test_empty_groups(repo): empty_groups = set() for group in repo.groups: if QUIT_EVENT.is_set(): break if not group.nodes: empty_groups.add(group) for group in sorted(empty_groups): io.stderr(_("{x} {group} is an empty group").format( group=bold(group), x=red("✘"), )) if empty_groups: exit(1) def test_determinism(repo, nodes, iterations_config, iterations_metadata, quiet): """ Generate configuration a couple of times for every node and see if anything changes between iterations """ first_run_nodes = {} io.progress_set_total(len(nodes) * (iterations_config + iterations_metadata)) iter_config_todo = iterations_config iter_metadata_todo = iterations_metadata while iter_config_todo > 0 or iter_metadata_todo > 0: if QUIT_EVENT.is_set(): break iteration_repo = Repository(repo.path) iteration_nodes = [iteration_repo.get_node(node.name) for node in nodes] for node in iteration_nodes: if QUIT_EVENT.is_set(): break first_run_nodes.setdefault(node.name, node) if iter_config_todo > 0: with io.job(_("{node} generating configuration ({i}/{n})").format( i=iterations_config - iter_config_todo, n=iterations_config, node=bold(node.name), )): result = node.hash() if first_run_nodes[node.name].hash() != result: io.stderr(_( "{x} Configuration for node {node} changed when generated repeatedly" ).format(node=node.name, x=red("✘"))) heisenitems = set(node.items).symmetric_difference( set(first_run_nodes[node.name].items)) if heisenitems: io.stderr(_( "{x} These items appeared or disappeared on {node} between runs:\n" "\t{items}" ).format( x=red("✘"), node=node.name, items="\n\t".join(sorted({item.id for item in heisenitems})), )) exit(1) for item in node.items: previous_item = first_run_nodes[node.name].get_item(item.id) if item.ITEM_TYPE_NAME == "action": continue # actions don't hash :'( if item.hash() != previous_item.hash(): current_cdict = item.display_on_create(item.cdict().copy()) previous_cdict = previous_item.display_on_create(previous_item.cdict().copy()) output = _("{x} {node} {item} changed:\n").format( x=red("✘"), node=bold(node.name), item=item.id, ) diff = diff_dict(current_cdict, previous_cdict) output += prefix_lines(diff, red("│ ")) output += red("╵") io.stderr(output) exit(1) io.progress_advance() if iter_metadata_todo > 0: with io.job(_("{node} generating metadata ({i}/{n})").format( i=iterations_metadata - iter_metadata_todo, n=iterations_metadata, node=bold(node.name), )): result = node.metadata_hash() if first_run_nodes[node.name].metadata_hash() != result: io.stderr(_( "{x} Metadata for node {node} changed when generated repeatedly" ).format(node=bold(node.name), x=red("✘"))) previous_json = metadata_to_json(first_run_nodes[node.name].metadata) current_json = metadata_to_json(node.metadata) io.stderr(diff_text(previous_json, current_json)) exit(1) io.progress_advance() if iter_config_todo > 0: iter_config_todo -= 1 if iter_metadata_todo > 0: iter_metadata_todo -= 1 io.progress_set_total(0) if not quiet: if iterations_config > 0: io.stdout(_("{x} Configuration remained the same after being generated {n} times").format( n=iterations_config, x=green("✓"), )) if iterations_metadata > 0: io.stdout(_("{x} Metadata remained the same after being generated {n} times").format( n=iterations_metadata, x=green("✓"), )) def test_reactor_provides(repo, nodes, quiet): repo._verify_reactor_provides = True for node in nodes: if QUIT_EVENT.is_set(): break node.metadata.get(()) else: if not quiet: io.stdout(_("{x} No reactors violated their declared keys").format( x=green("✓"), )) def bw_test(repo, args): options_selected = ( args['determinism_config'] > 1 or args['determinism_metadata'] > 1 or args['hooks_node'] or args['hooks_repo'] or args['items'] or args['metadata_conflicts'] or args['orphaned_bundles'] or args['reactor_provides'] or args['empty_groups'] or args['subgroup_loops'] ) if args['targets']: nodes = get_target_nodes(repo, args['targets']) if not options_selected: args['hooks_node'] = True args['items'] = True args['metadata_conflicts'] = True args['metadata_keys'] = True args['reactor_provides'] = True else: nodes = copy(list(repo.nodes)) if not options_selected: args['hooks_node'] = True args['hooks_repo'] = True args['items'] = True args['metadata_conflicts'] = True args['metadata_keys'] = True args['reactor_provides'] = True args['subgroup_loops'] = True if args['reactor_provides'] and not QUIT_EVENT.is_set(): test_reactor_provides(repo, nodes, args['quiet']) if args['subgroup_loops'] and not QUIT_EVENT.is_set(): test_subgroup_loops(repo, args['quiet']) if args['empty_groups'] and not QUIT_EVENT.is_set(): test_empty_groups(repo) if args['orphaned_bundles'] and not QUIT_EVENT.is_set(): test_orphaned_bundles(repo) if args['metadata_conflicts'] and not QUIT_EVENT.is_set(): io.progress_set_total(len(nodes)) for node in nodes: if QUIT_EVENT.is_set(): break test_metadata_conflicts(node, args['quiet']) io.progress_advance() io.progress_set_total(0) if args['items']: test_items(nodes, args['ignore_missing_faults'], args['quiet']) if ( (args['determinism_config'] > 1 or args['determinism_metadata'] > 1) and not QUIT_EVENT.is_set() ): test_determinism( repo, nodes, args['determinism_config'], args['determinism_metadata'], args['quiet'], ) if args['hooks_node'] and not QUIT_EVENT.is_set(): io.progress_set_total(len(nodes)) for node in nodes: if QUIT_EVENT.is_set(): break repo.hooks.test_node(repo, node) io.progress_advance() io.progress_set_total(0) if args['hooks_repo'] and not QUIT_EVENT.is_set(): repo.hooks.test(repo) bundlewrap-4.13.6/bundlewrap/cmdline/verify.py000066400000000000000000000105441417502274300214150ustar00rootroot00000000000000from datetime import datetime from sys import exit from ..concurrency import WorkerPool from ..utils.cmdline import count_items, get_target_nodes from ..utils.table import ROW_SEPARATOR, render_table from ..utils.text import ( blue, bold, cyan, cyan_unless_zero, error_summary, format_duration, green, green_unless_zero, mark_for_translation as _, red, red_unless_zero, ) from ..utils.ui import io def stats_summary(node_stats, total_duration): for node in node_stats.keys(): node_stats[node]['total'] = sum([ node_stats[node]['good'], node_stats[node]['bad'], node_stats[node]['unknown'], ]) try: node_stats[node]['health'] = \ (node_stats[node]['good'] / float(node_stats[node]['total'])) * 100.0 except ZeroDivisionError: node_stats[node]['health'] = 0 totals = { 'items': 0, 'good': 0, 'bad': 0, 'unknown': 0, } node_ranking = [] for node_name, stats in node_stats.items(): totals['items'] += stats['total'] totals['good'] += stats['good'] totals['bad'] += stats['bad'] totals['unknown'] += stats['unknown'] node_ranking.append(( stats['health'], node_name, stats['total'], stats['good'], stats['bad'], stats['unknown'], stats['duration'], )) node_ranking = sorted(node_ranking, reverse=True) try: totals['health'] = (totals['good'] / float(totals['items'])) * 100.0 except ZeroDivisionError: totals['health'] = 0 rows = [[ bold(_("node")), _("items"), green(_("good")), red(_("bad")), cyan(_("unknown")), _("health"), _("duration"), ], ROW_SEPARATOR] for health, node_name, items, good, bad, unknown, duration in node_ranking: rows.append([ node_name, str(items), green_unless_zero(good), red_unless_zero(bad), cyan_unless_zero(unknown), "{0:.1f}%".format(health), format_duration(duration), ]) if len(node_ranking) > 1: rows.append(ROW_SEPARATOR) rows.append([ bold(_("total ({} nodes)").format(len(node_stats.keys()))), str(totals['items']), green_unless_zero(totals['good']), red_unless_zero(totals['bad']), cyan_unless_zero(totals['unknown']), "{0:.1f}%".format(totals['health']), format_duration(total_duration), ]) alignments = { 1: 'right', 2: 'right', 3: 'right', 4: 'right', 5: 'right', 6: 'right', 7: 'right', } for line in render_table(rows, alignments=alignments): io.stdout("{x} {line}".format(x=blue("i"), line=line)) def bw_verify(repo, args): errors = [] node_stats = {} pending_nodes = get_target_nodes(repo, args['targets']) start_time = datetime.now() io.progress_set_total(count_items(pending_nodes)) def tasks_available(): return bool(pending_nodes) def next_task(): node = pending_nodes.pop() return { 'target': node.verify, 'task_id': node.name, 'kwargs': { 'autoonly_selector': args['autoonly'], 'autoskip_selector': args['autoskip'], 'show_all': args['show_all'], 'show_diff': args['show_diff'], 'workers': args['item_workers'], }, } def handle_result(task_id, return_value, duration): node_stats[task_id] = return_value def handle_exception(task_id, exception, traceback): msg = "{}: {}".format( task_id, exception, ) io.stderr(traceback) io.stderr(repr(exception)) io.stderr(msg) errors.append(msg) worker_pool = WorkerPool( tasks_available, next_task, handle_result=handle_result, handle_exception=handle_exception, pool_id="verify", workers=args['node_workers'], ) worker_pool.run() if args['summary'] and node_stats: stats_summary(node_stats, datetime.now() - start_time) error_summary(errors) exit(1 if errors else 0) bundlewrap-4.13.6/bundlewrap/cmdline/zen.py000066400000000000000000000023651417502274300207070ustar00rootroot00000000000000from ..utils.text import mark_for_translation as _ from ..utils.ui import io ZEN = _(""" , @@ @@@@ @@@@@ @@@@@ @@@@@ @@@@@ @@@@@ @@@@@ '@@@@@@, .@@@@@@+ +@@@@@@. @@@@@@, `@@@@@@@ +@@@@@@, `@@@@@@# @@@@@@@@+ :@@@@@@' `@@@@@@@ ;@@@@@@: @@@@@@@@@@@` #@@@@@@. :@@@@@@' @@@@@@@` @@@@@ ;@@@@@@; .@@@@@@# #@@@@@@` ,@@@@@@+ @@@@@ `@@@@@@#'@@@@@@: .@@@@@@+ +@@@@@@. @@@@@ +@@@@@@@@@ +@@@@@@, `@@@@@@# @@@@@ ,@@@@@@+ `@@@@@@@@@` ;@@@@@@: @@@@@ @@@@@@@` :@@@@@@'@@@@@@' @@@@@@@` @@@@@ ;@@@@@@#@@@@@@` `@@@@@@@@@@@@@+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@# +@@@@@@@@. @@@@@@@@@@@@@@@@@@@@@@@@@@@, .@@@# The Zen of BundleWrap ───────────────────── BundleWrap is a tool, not a solution. BundleWrap will not write your configuration for you. BundleWrap is Python all the way down. BundleWrap will adapt rather than grow. BundleWrap is the single point of truth. """) def bw_zen(repo, args): io.stdout(ZEN) bundlewrap-4.13.6/bundlewrap/concurrency.py000066400000000000000000000142531417502274300210310ustar00rootroot00000000000000from concurrent.futures import ThreadPoolExecutor, wait, FIRST_COMPLETED from datetime import datetime from random import randint from sys import exit from traceback import format_tb from .utils.text import mark_for_translation as _ from .utils.ui import io, QUIT_EVENT JOIN_TIMEOUT = 5 # seconds class WorkerPool: """ Manages a bunch of worker threads. """ def __init__( self, tasks_available, next_task, handle_result=None, handle_exception=None, cleanup=None, pool_id=None, workers=4, ): if workers < 1: raise ValueError(_("at least one worker is required")) self.tasks_available = tasks_available self.next_task = next_task self.handle_result = handle_result self.handle_exception = handle_exception self.cleanup = cleanup self.number_of_workers = workers self.idle_workers = set(range(self.number_of_workers)) self.pool_id = "unnamed_pool_{}".format(randint(1, 99999)) if pool_id is None else pool_id self.pending_futures = {} def _get_result(self): """ Blocks until a result from a worker is received. """ io.debug(_("worker pool {pool} waiting for next task to complete").format( pool=self.pool_id, )) completed, pending = wait( self.pending_futures.keys(), return_when=FIRST_COMPLETED, ) future = completed.pop() start_time = self.pending_futures[future]['start_time'] task_id = self.pending_futures[future]['task_id'] worker_id = self.pending_futures[future]['worker_id'] del self.pending_futures[future] self.idle_workers.add(worker_id) exception = future.exception() if exception: io.debug(_( "exception raised while executing task {task} on worker #{worker} " "of worker pool {pool}" ).format( pool=self.pool_id, task=task_id, worker=worker_id, )) exception.__task_id = task_id raise exception else: io.debug(_( "worker pool {pool} delivering result of {task} on worker #{worker}" ).format( pool=self.pool_id, task=task_id, worker=worker_id, )) return (task_id, future.result(), datetime.now() - start_time) def start_task(self, target=None, task_id=None, args=None, kwargs=None): """ target any callable (includes bound methods) task_id something to remember this worker by args list of positional arguments passed to target kwargs dictionary of keyword arguments passed to target """ if args is None: args = [] else: args = list(args) if kwargs is None: kwargs = {} task_id = "unnamed_task_{}".format(randint(1, 99999)) if task_id is None else task_id worker_id = self.idle_workers.pop() io.debug(_("worker pool {pool} is starting task {task} on worker #{worker}").format( pool=self.pool_id, task=task_id, worker=worker_id, )) self.pending_futures[self.executor.submit(target, *args, **kwargs)] = { 'start_time': datetime.now(), 'task_id': task_id, 'worker_id': worker_id, } def run(self): io.debug(_("spinning up worker pool {pool}").format(pool=self.pool_id)) processed_results = [] exit_code = None self.executor = ThreadPoolExecutor(max_workers=self.number_of_workers) try: while ( (self.tasks_available() and not QUIT_EVENT.is_set()) or self.workers_are_running ): while ( self.tasks_available() and self.workers_are_available and not QUIT_EVENT.is_set() ): task = self.next_task() if task is not None: self.start_task(**task) if self.workers_are_running: try: result = self._get_result() except SystemExit as exc: if exit_code is None: # Don't overwrite exit code if it has already been set. # This may be a worker exiting with 0 only because # a previous worker raised SystemExit with 1. # We must preserve that original exit code. exit_code = exc.code # just make sure QUIT_EVENT is set and continue # waiting for pending results QUIT_EVENT.set() except Exception as exc: traceback = "".join(format_tb(exc.__traceback__)) if self.handle_exception is None: raise exc else: processed_results.append( self.handle_exception(exc.__task_id, exc, traceback) ) else: if self.handle_result is not None: processed_results.append(self.handle_result(*result)) if QUIT_EVENT.is_set(): # we have reaped all our workers, let's stop this thread # before it does anything else exit(0 if exit_code is None else exit_code) return processed_results finally: io.debug(_("shutting down worker pool {pool}").format(pool=self.pool_id)) if self.cleanup: self.cleanup() self.executor.shutdown() io.debug(_("worker pool {pool} has been shut down").format(pool=self.pool_id)) @property def workers_are_available(self): return bool(self.idle_workers) @property def workers_are_running(self): return bool(self.pending_futures) bundlewrap-4.13.6/bundlewrap/deps.py000066400000000000000000000527461417502274300174430ustar00rootroot00000000000000from contextlib import suppress from .exceptions import BundleError, ItemDependencyError, NoSuchItem from .items import ALLOWED_ITEM_AUTO_ATTRIBUTES, Item from .items.actions import Action from .utils.plot import explain_item_dependency_loop from .utils.text import bold, mark_for_translation as _ from .utils.ui import io class ItemDependencyLoop(ItemDependencyError): """ Raised when there is a loop in item dependencies. """ def __init__(self, items): self.items = items def __repr__(self): return "".format(len(self.items)) def __str__(self): return "\n".join(explain_item_dependency_loop(self.items)) class TagFillerItem(Item): BUNDLE_ATTRIBUTE_NAME = "__tagfiller__" ITEM_TYPE_NAME = "empty_tag" def sdict(self): return {} def resolve_selector(selector, items, originating_item_id=None, originating_tag=None): """ Given an item selector (e.g. 'bundle:foo' or 'file:/bar'), return all items matching that selector from the given list of items. """ if selector.startswith("!"): negate = lambda b: not b selector = selector[1:] else: negate = lambda b: b try: selector_type, selector_name = selector.split(":", 1) except ValueError: raise ValueError(_("invalid item selector: {}").format(selector)) if selector_type == "bundle": return filter( lambda item: negate(item.bundle.name == selector_name) and item.id != originating_item_id, items, ) elif selector_type == "tag": if not selector_name: # "tag:" return filter( lambda item: negate(bool(item.tags)) and originating_tag not in item.tags, items, ) else: return filter( lambda item: negate(selector_name in item.tags) and item.id != originating_item_id, items, ) elif not selector_name: # "file:" return filter( lambda item: negate(item.ITEM_TYPE_NAME == selector_type) and item.id != originating_item_id, items, ) else: # single item if negate(False): return filter(lambda item: item.id != selector, items) else: return [find_item(selector, items)] def find_item(item_id, items): """ Returns the first item with the given ID within the given list of items. """ try: item = list(filter(lambda item: item.id == item_id, items))[0] except IndexError: raise NoSuchItem(_("item not found: {}").format(item_id)) return item def _flatten_dependencies(items): """ This will cause all dependencies - direct AND inherited - to be listed in item._flattened_deps. """ for item in items: if not hasattr(item, '_flattened_deps'): _flatten_deps_for_item(item, items) def _flatten_deps_for_item(item, items): """ Recursively retrieves and returns a list of all inherited dependencies of the given item. This can handle loops, but will ignore them. """ item._flattened_deps = {item.id for item in item._deps} item._flattened_deps_needs = {item.id for item in item._deps_needs | item._deps_needed_by} for dep_item in item._deps: # Don't recurse if we have already resolved nested # dependencies for this item. Also serves as a guard # against infinite recursion when there are loops. if not hasattr(dep_item, '_flattened_deps'): _flatten_deps_for_item(dep_item, items) item._flattened_deps.update(dep_item._flattened_deps) item._flattened_deps_needs.update(dep_item._flattened_deps_needs) def _add_incoming_needs(items): """ For each item, records all items that need that item in ._incoming_needs. """ for item in items: item._incoming_needs = set() for depending_item in items: if item == depending_item: continue if item.id in depending_item._flattened_deps_needs: item._incoming_needs.add(depending_item) def _prepare_auto_attrs(items): for item in items: auto_attrs = item.get_auto_attrs(items) # remove next line in 5.0 when killing get_auto_deps auto_attrs['needs'] = set(auto_attrs.get('needs', set())) | set(item.get_auto_deps(items)) for key, value in auto_attrs.items(): if key not in ALLOWED_ITEM_AUTO_ATTRIBUTES: raise ValueError(_("get_auto_attrs() on {item} returned illegal key {key}").format( item=item.id, key=repr(key), )) setattr(item, key, set(getattr(item, key)) | set(value)) def _prepare_deps(items): for item in items: item._deps = set() # holds all item ids blocking execution of that item for dep_type, deps in ( ('after', set(item.after)), ('needs', set(item.needs)), ): setattr(item, '_deps_' + dep_type, set()) for dep in deps: try: resolved_deps = tuple(resolve_selector(dep, items, originating_item_id=item.id)) except NoSuchItem: raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' has a dependency ({dep_type}) " "on '{dep}', which doesn't exist" ).format( item=item.id, bundle=item.bundle.name, dep=dep, dep_type=dep_type, )) else: item._deps.update(resolved_deps) getattr(item, '_deps_' + dep_type).update(resolved_deps) def _inject_canned_actions(items): """ Looks for canned actions like "svc_upstart:mysql:reload" in items, created actions for them and add those to the list of items. """ actions = set() for item in items: for canned_action_name, canned_action_attrs in item.get_canned_actions().items(): canned_action_id = f"{item.id}:{canned_action_name}" canned_action_attrs.update({'triggered': True}) action = Action( item.bundle, canned_action_id, canned_action_attrs, skip_name_validation=True, ) actions.add(action) items.update(actions) def _inject_tag_filler_items(items, bundles): """ Creates TagFillerItems to ensure each tag has at least one item. This is important so even if there are no user-defined items with a tag, that tag can still be used to chain dependencies. [item:A] --needs--> [tag:B] --needs--> [item:C] Users will assume that item:A will implicitly depend on item:C, but if tag:B doesn't resolve to any items, that connection won't be made. """ for bundle in bundles: for tag, attrs in bundle.bundle_attrs.get('tags', {}).items(): if not tuple(resolve_selector(f"tag:{tag}", items)): items.add(TagFillerItem(bundle, tag, {'tags': {tag}})) def _inject_concurrency_blockers(items, node_os, node_os_version): """ Looks for items with BLOCK_CONCURRENT set and inserts daisy-chain dependencies to force a sequential apply. """ # find every item type that cannot be applied in parallel item_types = set() for item in items: item._deps_concurrency = set() if item.block_concurrent(node_os, node_os_version): item_types.add(item.__class__) # Now that we have collected all relevant types, # we must group them together when they overlap. E.g.: # # Type1.block_concurrent(...) == ["type1", "type2"] # Type2.block_concurrent(...) == ["type2", "type3"] # Type4.block_concurrent(...) == ["type4"] # # becomes # # ["type1", "type2", "type3"] # ["type4"] # # because the first two types overlap in blocking type2. This is # necessary because existing dependencies from type3 to type1 need # to be taken into account when generating the daisy-chains # connecting the three types. If we processed blockers for Type1 and # Type2 independently, we might end up with two very different # chains for Type2, which may cause circular dependencies. chain_groups = [] for item_type in item_types: block_concurrent = {item_type.ITEM_TYPE_NAME} block_concurrent.update(item_type.block_concurrent(node_os, node_os_version)) found = False for blocked_types in chain_groups: for blocked_type in block_concurrent: if blocked_type in blocked_types: blocked_types.update(block_concurrent) found = True break if found: break if not found: chain_groups.append(block_concurrent) # daisy-chain all items of the chain group while respecting existing # dependencies between them for blocked_types in chain_groups: blocked_types = set(blocked_types) type_items = set(filter( lambda item: item.ITEM_TYPE_NAME in blocked_types, items, )) processed_items = set() for item in type_items: # disregard deps to items of other types item.__deps = set(filter( lambda dep: dep.split(":", 1)[0] in blocked_types, item._flattened_deps, )) previous_item = None while len(processed_items) < len(type_items): # find the first item without same-type deps we haven't # processed yet try: item = list(filter( lambda item: not item.__deps and item not in processed_items, type_items, ))[0] except IndexError: # this can happen if the flattened deps of all items of # this type already contain a dependency on another # item of this type break if previous_item is not None: # unless we're at the first item # add dep to previous item -- unless it's already in there if previous_item not in item._deps: item._deps.add(previous_item) item._deps_concurrency.add(previous_item) item._flattened_deps.add(previous_item.id) previous_item = item processed_items.add(item) # Now remove all deps on the processed item. This frees up # items depending *only* on the processed item to be # eligible for the next iteration of this loop. for other_item in type_items: with suppress(KeyError): other_item.__deps.remove(item.id) def _inject_reverse_dependencies(items): """ Looks for 'before' and 'needed_by' deps and creates standard dependencies accordingly. """ for item in items: for dep_type in ('before', 'needed_by'): setattr(item, '_deps_' + dep_type, set()) for item in items: for dep_type in ('before', 'needed_by'): for depending_item_id in set(getattr(item, dep_type)): try: dependent_items = resolve_selector( depending_item_id, items, originating_item_id=item.id, ) except NoSuchItem: raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' has a reverse dependency ({dep_type}) " "on '{dep}', which doesn't exist" ).format( item=item.id, bundle=item.bundle.name, dep=depending_item_id, dep_type=dep_type, )) for dependent_item in dependent_items: dependent_item._deps.add(item) getattr(dependent_item, '_deps_' + dep_type).add(item) def _inject_reverse_triggers(items): """ Looks for 'triggered_by' and 'precedes' attributes and turns them into standard triggers (defined on the opposing end). """ for item in items: for triggering_item_selector in item.triggered_by: try: triggering_items = resolve_selector( triggering_item_selector, items, originating_item_id=item.id, ) except NoSuchItem: raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' has a reverse trigger (triggered_by) " "on '{dep}', which doesn't exist" ).format( item=item.id, bundle=item.bundle.name, dep=triggering_item_selector, )) for triggering_item in triggering_items: triggering_item.triggers.add(item.id) for preceded_item_selector in item.precedes: try: preceded_items = resolve_selector( preceded_item_selector, items, originating_item_id=item.id, ) except NoSuchItem: raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' has a reverse trigger (precedes) " "on '{dep}', which doesn't exist" ).format( item=item.id, bundle=item.bundle.name, dep=preceded_item_selector, )) for preceded_item in preceded_items: preceded_item.preceded_by.add(item.id) def _inject_trigger_dependencies(items): """ Injects dependencies from all triggered items to their triggering items. """ for item in items: item._deps_triggers = set() for item in items: for triggered_item_selector in item.triggers: try: triggered_items = resolve_selector( triggered_item_selector, items, originating_item_id=item.id, ) except KeyError: raise BundleError(_( "unable to find definition of '{item1}' triggered " "by '{item2}' in bundle '{bundle}'" ).format( bundle=item.bundle.name, item1=triggered_item_selector, item2=item.id, )) for triggered_item in triggered_items: if triggered_item.triggered: triggered_item._deps.add(item) triggered_item._deps_triggers.add(item) else: raise BundleError(_( "'{item1}' in bundle '{bundle1}' triggered " "by '{item2}' in bundle '{bundle2}', " "but missing 'triggered' attribute" ).format( item1=triggered_item.id, bundle1=triggered_item.bundle.name, item2=item.id, bundle2=item.bundle.name, )) def _inject_preceded_by_dependencies(items): """ Injects dependencies from all triggering items to their preceded_by items and attaches triggering items to preceding items. """ for item in items: if item.preceded_by and item.triggered: raise BundleError(_( "triggered item '{item}' in bundle '{bundle}' must not use " "'preceded_by' (use chained triggers instead)".format( bundle=item.bundle.name, item=item.id, ), )) for triggered_item_selector in item.preceded_by: try: triggered_items = resolve_selector( triggered_item_selector, items, originating_item_id=item.id, ) except KeyError: raise BundleError(_( "unable to find definition of '{item1}' preceding " "'{item2}' in bundle '{bundle}'" ).format( bundle=item.bundle.name, item1=triggered_item_selector, item2=item.id, )) for triggered_item in triggered_items: if triggered_item.triggered: triggered_item._precedes_items.add(item) item._deps.add(triggered_item) else: raise BundleError(_( "'{item1}' in bundle '{bundle1}' precedes " "'{item2}' in bundle '{bundle2}', " "but missing 'triggered' attribute" ).format( item1=triggered_item.id, bundle1=triggered_item.bundle.name, item2=item.id, bundle2=item.bundle.name, )) def _inject_tag_attrs(items, bundles): """ Applies the tag-level attributes from bundle.py to all matching items. """ for bundle in bundles: for tag, attrs in bundle.bundle_attrs.get('tags', {}).items(): for item in resolve_selector(f"tag:{tag}", items, originating_tag=tag): for attr in ( "after", "before", "needs", "needed_by", "precedes", "preceded_by", "triggers", "triggered_by", ): getattr(item, attr).update(attrs.get(attr, set())) def _add_inherited_tags(items, bundles): """ This will apply tags to items based on the tags in bundle.py. tags = { "foo": { "tags": {"bar"}, # will cause all items with tag:foo # to also have tag:bar }, } """ tags_added = True while tags_added: tags_added = False for bundle in bundles: for tag, attrs in bundle.bundle_attrs.get('tags', {}).items(): inherited_tags = attrs.get('tags', set()) if not inherited_tags: # just an optimization to avoid needlessly calling resolve_selector() continue for item in resolve_selector(f"tag:{tag}", items): len_before = len(item.tags) item.tags.update(inherited_tags) if len_before < len(item.tags): tags_added = True @io.job_wrapper(_("{} processing dependencies").format(bold("{0.name}"))) def prepare_dependencies(node): """ Performs all dependency preprocessing on a list of items. """ for item in node.items: item._check_loopback_dependency() items = set(node.items) # might be a tuple from cached_property _inject_canned_actions(items) _inject_tag_filler_items(items, node.bundles) _add_inherited_tags(items, node.bundles) _inject_tag_attrs(items, node.bundles) _prepare_auto_attrs(items) _prepare_deps(items) _inject_reverse_triggers(items) _inject_reverse_dependencies(items) _inject_trigger_dependencies(items) _inject_preceded_by_dependencies(items) _flatten_dependencies(items) _add_incoming_needs(items) _inject_concurrency_blockers(items, node.os, node.os_version) return items def remove_dep_from_items(items, dep): """ Removes the given item id (dep) from the temporary list of dependencies of all items in the given list. """ for item in items: with suppress(KeyError): item._deps.remove(dep) return items def remove_item_dependents(items, dep_item): """ Removes the items depending on the given item from the list of items. """ removed_items = set() for item in items: if dep_item in item._deps_needs | item._deps_needed_by: removed_items.add(item) with suppress(KeyError): item._deps.remove(dep_item) for item in removed_items: items.remove(item) if removed_items: io.debug( "skipped these items because they depend on {item}, which was " "skipped previously: {skipped}".format( item=dep_item.id, skipped=", ".join([item.id for item in removed_items]), ) ) all_recursively_removed_items = set() for removed_item in removed_items: if removed_item.cascade_skip: items, recursively_removed_items = \ remove_item_dependents(items, removed_item) all_recursively_removed_items.update(recursively_removed_items) else: items = remove_dep_from_items(items, removed_item) return (items, removed_items | all_recursively_removed_items) def split_items_without_deps(items): """ Takes a list of items and extracts the ones that don't have any dependencies. The extracted deps are returned as a list. """ remaining_items = set() removed_items = set() for item in items: if item._deps: remaining_items.add(item) else: removed_items.add(item) return (remaining_items, removed_items) bundlewrap-4.13.6/bundlewrap/exceptions.py000066400000000000000000000046051417502274300206600ustar00rootroot00000000000000class DontCache(Exception): """ Used in the cached_property decorator to temporily prevent caching the returned result """ def __init__(self, obj): self.obj = obj class FaultUnavailable(Exception): """ Raised when a Fault object cannot be resolved. """ pass class GracefulApplyException(Exception): """ Raised when a problem has been encountered in `bw apply`, but a more verbose error has already been printed. """ pass class ItemSkipped(Exception): """ Raised when an item is skipped during `bw verify`. """ pass class NoSuchBundle(Exception): """ Raised when a bundle of unknown name is requested. """ pass class NoSuchGroup(Exception): """ Raised when a group of unknown name is requested. """ pass class NoSuchItem(Exception): """ Raised when an item of unknown name is requested. """ pass class NoSuchNode(Exception): """ Raised when a node of unknown name is requested. """ pass class RemoteException(Exception): """ Raised when a shell command on a node fails. """ pass class RepositoryError(Exception): """ Indicates that somethings is wrong with the current repository. """ pass class BundleError(RepositoryError): """ Indicates an error in a bundle. """ pass class ItemDependencyError(RepositoryError): """ Indicates a problem with item dependencies (e.g. loops). """ pass class NoSuchRepository(RepositoryError): """ Raised when trying to get a Repository object from a directory that is not in fact a repository. """ pass class MetadataPersistentKeyError(RepositoryError): """ Raised when metadata reactors keep raising KeyErrors indefinitely. """ pass class MissingRepoDependency(RepositoryError): """ Raised when a dependency from requirements.txt is missing. """ pass class SkipNode(Exception): """ Can be raised by hooks to skip a node. """ pass class TemplateError(RepositoryError): """ Raised when an error occurs while rendering a template. """ pass class UsageException(Exception): """ Raised when command line options don't make sense. """ pass class NodeLockedException(Exception): """ Raised when a node is already locked during an 'apply' run. """ pass bundlewrap-4.13.6/bundlewrap/group.py000066400000000000000000000247021417502274300176330ustar00rootroot00000000000000from os import mkdir from os.path import exists, join import re from tomlkit import dumps as toml_dump, parse as toml_parse from .exceptions import NoSuchGroup, NoSuchNode, RepositoryError from .utils import cached_property, error_context, Fault, get_file_contents, names from .utils.dicts import ( dict_to_toml, hash_statedict, set_key_at_path, validate_dict, COLLECTION_OF_STRINGS, TUPLE_OF_INTS, ) from .utils.text import mark_for_translation as _, toml_clean, validate_name GROUP_ATTR_DEFAULTS = { 'cmd_wrapper_inner': "export LANG=C; {}", 'cmd_wrapper_outer': "sudo -u {1} sh -c {0}", 'lock_dir': "/var/lib/bundlewrap", 'dummy': False, 'kubectl_context': None, 'locking_node': None, 'os': 'linux', # Setting os_version to 0 by default will probably yield less # surprises than setting it to max_int. Users will probably # start at a certain version and then gradually update their # systems, adding conditions like this: # # if node.os_version >= (2,): # new_behavior() # else: # old_behavior() # # If we set os_version to max_int, nodes without an explicit # os_version would automatically adopt the new_behavior() as # soon as it appears in the repo - which is probably not what # people want. 'os_version': (0,), 'password': None, # On some nodes, we maybe have pip2 and pip3 installed, but there's # no way of knowing which one the user wants. Or maybe there's only # one of them, but there's no symlink to pip, only pip3. 'pip_command': 'pip', 'use_shadow_passwords': True, 'username': None, } GROUP_ATTR_TYPES = { 'bundles': COLLECTION_OF_STRINGS, 'cmd_wrapper_inner': str, 'cmd_wrapper_outer': str, 'lock_dir': str, 'dummy': bool, 'file_path': str, 'kubectl_context': (str, type(None)), 'locking_node': (str, type(None)), 'member_patterns': COLLECTION_OF_STRINGS, 'members': COLLECTION_OF_STRINGS, 'metadata': dict, 'os': str, 'os_version': TUPLE_OF_INTS, 'password': (Fault, str, type(None)), 'pip_command': str, 'subgroups': COLLECTION_OF_STRINGS, 'subgroup_patterns': COLLECTION_OF_STRINGS, 'supergroups': COLLECTION_OF_STRINGS, 'use_shadow_passwords': bool, 'username': (Fault, str, type(None)), } def _build_error_chain(loop_node, last_node, nodes_in_between): """ Used to illustrate subgroup loop paths in error messages. loop_node: name of node that loops back to itself last_node: name of last node pointing back to loop_node, causing the loop nodes_in_between: names of nodes traversed during loop detection, does include loop_node if not a direct loop, but not last_node """ error_chain = [] for visited in nodes_in_between: if (loop_node in error_chain) != (loop_node == visited): error_chain.append(visited) error_chain.append(last_node) error_chain.append(loop_node) return error_chain class Group: """ A group of nodes. """ def __init__(self, group_name, attributes=None): if attributes is None: attributes = {} if not validate_name(group_name): raise RepositoryError(_("'{}' is not a valid group name.").format(group_name)) with error_context(group_name=group_name): validate_dict(attributes, GROUP_ATTR_TYPES) self._attributes = attributes self._immediate_subgroup_patterns = { re.compile(pattern) for pattern in set(attributes.get('subgroup_patterns', set())) } self._member_patterns = { re.compile(pattern) for pattern in set(attributes.get('member_patterns', set())) } self.name = group_name self.file_path = attributes.get('file_path') for attr in GROUP_ATTR_DEFAULTS: # defaults are applied in node.py setattr(self, attr, attributes.get(attr)) def __lt__(self, other): return self.name < other.name def __repr__(self): return "".format(self.name) def __str__(self): return self.name @cached_property def cdict(self): group_dict = {} for node in self.nodes: group_dict[node.name] = node.hash() return group_dict def group_membership_hash(self): return hash_statedict(sorted(names(self.nodes))) def hash(self): return hash_statedict(self.cdict) def metadata_hash(self): group_dict = {} for node in self.nodes: group_dict[node.name] = node.metadata_hash() return hash_statedict(group_dict) @cached_property def nodes(self): for node in self.repo.nodes: if node.in_group(self.name): yield node @cached_property def _nodes_from_members(self): for node_name in self._attributes.get('members', set()): try: yield self.repo.get_node(node_name) except NoSuchNode: raise RepositoryError(_( "Group '{group}' has '{node}' listed as a member in groups.py, " "but no such node could be found." ).format( group=self.name, node=node_name, )) @property def _subgroup_names_from_patterns(self): for pattern in self._immediate_subgroup_patterns: for group in self.repo.groups: if pattern.search(group.name) is not None and group != self: yield group.name @cached_property def _supergroups_from_attribute(self): for supergroup_name in self._attributes.get('supergroups', set()): try: supergroup = self.repo.get_group(supergroup_name) except NoSuchGroup: raise RepositoryError(_( "Group '{group}' has '{supergroup}' listed as a supergroup in groups.py, " "but no such group could be found." ).format( group=self.name, supergroup=supergroup_name, )) if self.name in ( list(supergroup._attributes.get('subgroups', set())) + list(supergroup._subgroup_names_from_patterns) ): raise RepositoryError(_( "Group '{group}' has '{supergroup}' listed as a supergroup in groups.py, " "but it is already listed as a subgroup on that group (redundant)." ).format( group=self.name, supergroup=supergroup_name, )) yield supergroup def _check_subgroup_names(self, visited_names): """ Recursively finds subgroups and checks for loops. """ for name in self._immediate_subgroup_names: if name not in visited_names: try: group = self.repo.get_group(name) except NoSuchGroup: raise RepositoryError(_( "Group '{group}' has '{subgroup}' listed as a subgroup in groups.py, " "but no such group could be found." ).format( group=self.name, subgroup=name, )) for group_name in group._check_subgroup_names( visited_names + [self.name], ): yield group_name else: error_chain = _build_error_chain( name, self.name, visited_names, ) raise RepositoryError(_( "Group '{group}' can't be a subgroup of itself. " "({chain})" ).format( group=name, chain=" -> ".join(error_chain), )) if self.name not in visited_names: yield self.name @cached_property def parent_groups(self): for group in self.repo.groups: if self in group.subgroups: yield group @cached_property def immediate_parent_groups(self): for group in self.repo.groups: if self in group.immediate_subgroups: yield group @cached_property def subgroups(self): """ Iterator over all subgroups as group objects. """ for group_name in set(self._check_subgroup_names([self.name])): yield self.repo.get_group(group_name) @cached_property def toml(self): if not self.file_path or not self.file_path.endswith(".toml"): raise ValueError(_("group {} not in TOML format").format(self.name)) return toml_parse(get_file_contents(self.file_path)) def toml_save(self): try: toml_doc = self.toml except ValueError: attributes = self._attributes.copy() del attributes['file_path'] toml_doc = dict_to_toml(attributes) self.file_path = join(self.repo.path, "groups", self.name + ".toml") if not exists(join(self.repo.path, "groups")): mkdir(join(self.repo.path, "groups")) with open(self.file_path, 'w') as f: f.write(toml_clean(toml_dump(toml_doc))) def toml_set(self, path, value): if not isinstance(path, tuple): path = path.split("/") set_key_at_path(self.toml, path, value) @cached_property def immediate_subgroups(self): """ Iterator over all immediate subgroups as group objects. """ for group_name in self._immediate_subgroup_names: try: yield self.repo.get_group(group_name) except NoSuchGroup: raise RepositoryError(_( "Group '{group}' has '{subgroup}' listed as a subgroup in groups.py, " "but no such group could be found." ).format( group=self.name, subgroup=group_name, )) @cached_property def _immediate_subgroup_names(self): return set( list(self._attributes.get('subgroups', set())) + list(self._subgroup_names_from_patterns) + [group.name for group in self.repo.groups if self in group._supergroups_from_attribute] ) bundlewrap-4.13.6/bundlewrap/itemqueue.py000066400000000000000000000072501417502274300205010ustar00rootroot00000000000000from .deps import ( find_item, prepare_dependencies, remove_item_dependents, remove_dep_from_items, split_items_without_deps, ) from .exceptions import NoSuchItem from .utils.text import mark_for_translation as _ from .utils.ui import io class BaseQueue: def __init__(self, node): self.items_with_deps = prepare_dependencies(node) self.items_without_deps = set() self._split() self.pending_items = set() def _split(self): self.items_with_deps, self.items_without_deps = \ split_items_without_deps(self.all_items) @property def all_items(self): return self.items_with_deps | self.items_without_deps class ItemQueue(BaseQueue): def item_failed(self, item): """ Called when an item could not be fixed. Yields all items that have been skipped as a result by cascading. """ for skipped_item in self.item_skipped(item): yield skipped_item def item_fixed(self, item): """ Called when an item has successfully been fixed. """ self.item_ok(item) self._fire_triggers_for_item(item) def item_ok(self, item): """ Called when an item didn't need to be fixed. """ self.pending_items.remove(item) # if an item is applied successfully, all dependencies on it can # be removed from the remaining items self.items_with_deps = remove_dep_from_items( self.items_with_deps, item, ) self._split() def item_skipped(self, item): """ Called when an item has been skipped. Yields all items that have been skipped as a result by cascading. """ self.pending_items.remove(item) if item.cascade_skip: # TODO 5.0 always do this when removing cascade_skip # if an item fails or is skipped, all items that depend on # it shall be removed from the queue self.items_with_deps, skipped_items = remove_item_dependents( self.items_with_deps, item, ) for skipped_item in skipped_items: yield skipped_item else: self.items_with_deps = remove_dep_from_items( self.items_with_deps, item, ) self._split() def pop(self): """ Gets the next item available for processing and moves it into self.pending_items. Will raise KeyError if no item is available. """ if not self.items_without_deps: raise KeyError item = self.items_without_deps.pop() self.pending_items.add(item) return item def _fire_triggers_for_item(self, item): for triggered_item_id in item.triggers: try: triggered_item = find_item( triggered_item_id, self.all_items, ) triggered_item.has_been_triggered = True except NoSuchItem: io.debug(_( "{item} tried to trigger {triggered_item}, " "but it wasn't available. It must have been skipped previously." ).format( item=item.id, triggered_item=triggered_item_id, )) class ItemTestQueue(BaseQueue): """ A simpler variation of ItemQueue that is used by `bw test` to check for circular dependencies. """ def pop(self): item = self.items_without_deps.pop() self.items_with_deps = remove_dep_from_items(self.items_with_deps, item) self._split() return item bundlewrap-4.13.6/bundlewrap/items/000077500000000000000000000000001417502274300172415ustar00rootroot00000000000000bundlewrap-4.13.6/bundlewrap/items/__init__.py000066400000000000000000000767731417502274300213760ustar00rootroot00000000000000""" Note that modules in this package have to use absolute imports because Repository.item_classes loads them as files. """ from copy import copy from datetime import datetime from inspect import cleandoc from os.path import join from textwrap import TextWrapper from bundlewrap.exceptions import ( BundleError, FaultUnavailable, ItemDependencyError, ItemSkipped, ) from bundlewrap.utils import cached_property, Fault from bundlewrap.utils.dicts import dict_to_text, diff_dict, hash_statedict, validate_statedict from bundlewrap.utils.text import force_text, mark_for_translation as _ from bundlewrap.utils.text import blue, bold, green, italic, red, wrap_question from bundlewrap.utils.ui import io from bundlewrap.operations import run_local ALLOWED_ITEM_AUTO_ATTRIBUTES = { 'after', 'before', 'needed_by', 'needs', } BUILTIN_ITEM_ATTRIBUTES = { 'after': set(), 'before': set(), 'cascade_skip': None, 'comment': None, 'needed_by': set(), 'needs': set(), 'preceded_by': set(), 'precedes': set(), 'error_on_missing_fault': False, 'skip': False, 'tags': set(), 'triggered': False, 'triggered_by': set(), 'triggers': set(), 'unless': "", 'when_creating': {}, } wrapper = TextWrapper( break_long_words=False, break_on_hyphens=False, expand_tabs=False, replace_whitespace=False, ) def format_comment(comment): result = "\n\n" for line in wrapper.wrap(cleandoc(comment)): for inlineline in line.split("\n"): result += "{} {}\n".format(bold("#"), italic(inlineline)) return result def keys_to_fix(cdict, sdict): if cdict is None: return set() if sdict is None: return set(cdict.keys()) differing_keys = set() for key, value in cdict.items(): if value != sdict[key]: differing_keys.add(key) return differing_keys class ItemStatus: """ Holds information on a particular Item such as whether it needs fixing and what's broken. """ def __init__(self, cdict, sdict): self.cdict = cdict self.sdict = sdict self.keys_to_fix = [] self.must_be_deleted = (self.sdict is not None and self.cdict is None) self.must_be_created = (self.cdict is not None and self.sdict is None) if not self.must_be_deleted and not self.must_be_created: self.keys_to_fix = keys_to_fix(cdict, sdict) def __repr__(self): return "".format(self.correct) @property def correct(self): return not self.must_be_deleted and not self.must_be_created and not bool(self.keys_to_fix) def make_normalize(attribute_default): """ This is to ensure you can pass filter() results and such in place of lists and have them converted to the proper type automatically. """ if type(attribute_default) in (dict, list, set, tuple): def normalize(attribute_value): if attribute_value is None: return attribute_value else: return type(attribute_default)(attribute_value) return normalize else: return copy class Item: """ A single piece of configuration (e.g. a file, a package, a service). """ BUNDLE_ATTRIBUTE_NAME = None ITEM_ATTRIBUTES = {} ITEM_TYPE_NAME = None REJECT_UNKNOWN_ATTRIBUTES = True REQUIRED_ATTRIBUTES = [] SKIP_REASON_CMDLINE = 1 SKIP_REASON_DEP_FAILED = 2 SKIP_REASON_FAULT_UNAVAILABLE = 3 SKIP_REASON_INTERACTIVE = 4 SKIP_REASON_INTERACTIVE_ONLY = 5 SKIP_REASON_NO_TRIGGER = 6 SKIP_REASON_SOFTLOCK = 7 SKIP_REASON_UNLESS = 8 SKIP_REASON_DEP_SKIPPED = 9 SKIP_REASON_ATTR = 10 SKIP_REASON_DESC = { SKIP_REASON_CMDLINE: _("cmdline"), SKIP_REASON_DEP_FAILED: _("dependency failed"), SKIP_REASON_FAULT_UNAVAILABLE: _("Fault unavailable"), SKIP_REASON_INTERACTIVE: _("declined interactively"), SKIP_REASON_INTERACTIVE_ONLY: _("interactive only"), SKIP_REASON_NO_TRIGGER: _("not triggered"), SKIP_REASON_SOFTLOCK: _("soft locked"), SKIP_REASON_UNLESS: _("unless"), SKIP_REASON_DEP_SKIPPED: _("dependency skipped"), SKIP_REASON_ATTR: _("attribute"), } STATUS_OK = 1 STATUS_FIXED = 2 STATUS_FAILED = 3 STATUS_SKIPPED = 4 STATUS_ACTION_SUCCEEDED = 5 WHEN_CREATING_ATTRIBUTES = {} @classmethod def block_concurrent(cls, node_os, node_os_version): """ Return a list of item types that cannot be applied in parallel with this item type. """ return [] def __init__( self, bundle, name, attributes, skip_validation=False, skip_name_validation=False, ): self.attributes = {} self.bundle = bundle self.has_been_triggered = False self.item_dir = join(bundle.bundle_dir, self.BUNDLE_ATTRIBUTE_NAME) self.item_data_dir = join(bundle.bundle_data_dir, self.BUNDLE_ATTRIBUTE_NAME) self.name = name self.node = bundle.node self.when_creating = {} self._command_results = [] self._faults_missing_for_attributes = set() self._precedes_items = set() if not skip_validation: if not skip_name_validation: self._validate_name(bundle, name) self.validate_name(bundle, name) self._validate_attribute_names(bundle, self.id, attributes) self._validate_required_attributes(bundle, self.id, attributes) self.validate_attributes(bundle, self.id, attributes) try: attributes = self.patch_attributes(attributes) except FaultUnavailable: self._faults_missing_for_attributes.add(_("unknown")) for attribute_name, attribute_default in BUILTIN_ITEM_ATTRIBUTES.items(): normalize = make_normalize(attribute_default) try: setattr(self, attribute_name, force_text(normalize(attributes.get( attribute_name, copy(attribute_default), )))) except FaultUnavailable: self._faults_missing_for_attributes.add(attribute_name) setattr(self, attribute_name, BUILTIN_ITEM_ATTRIBUTES[attribute_name]) for attribute_name, attribute_default in self.ITEM_ATTRIBUTES.items(): if attribute_name not in BUILTIN_ITEM_ATTRIBUTES: normalize = make_normalize(attribute_default) try: self.attributes[attribute_name] = force_text(normalize(attributes.get( attribute_name, copy(attribute_default), ))) except FaultUnavailable: self._faults_missing_for_attributes.add(attribute_name) for attribute_name, attribute_default in self.WHEN_CREATING_ATTRIBUTES.items(): normalize = make_normalize(attribute_default) try: self.when_creating[attribute_name] = force_text(normalize( attributes.get('when_creating', {}).get( attribute_name, copy(attribute_default), ) )) except FaultUnavailable: self._faults_missing_for_attributes.add('when_creating/' + attribute_name) if not self.REJECT_UNKNOWN_ATTRIBUTES: for key, value in attributes.items(): if key in BUILTIN_ITEM_ATTRIBUTES: continue if isinstance(value, Fault): try: value = value.value except FaultUnavailable: self._faults_missing_for_attributes.add(key) continue self.attributes.setdefault(key, value) if self.cascade_skip is None: self.cascade_skip = not (self.skip or self.triggered or self.unless) if self.id in self.triggers: raise BundleError(_( "item {item} in bundle '{bundle}' can't trigger itself" ).format( bundle=self.bundle.name, item=self.id, )) def __eq__(self, other): return self.id == other.id def __hash__(self): return hash(self.id) def __lt__(self, other): return self.id < other.id def __str__(self): return self.id def __repr__(self): return "".format(self.id) def _check_loopback_dependency(self): """ Alerts the user if they have an item depend on itself. """ if ( self.id in self.after or self.id in self.before or self.id in self.needs or self.id in self.needed_by ): raise ItemDependencyError(_( "'{item}' in bundle '{bundle}' on node '{node}' cannot depend on itself" ).format( item=self.id, bundle=self.bundle.name, node=self.node.name, )) @cached_property def cached_cdict(self): if self._faults_missing_for_attributes: self._raise_for_faults() cdict = self.cdict() try: validate_statedict(cdict) except ValueError as e: raise ValueError(_( "{item} from bundle '{bundle}' returned invalid cdict: {msg}" ).format( bundle=self.bundle.name, item=self.id, msg=repr(e), )) return cdict @cached_property def cached_sdict(self): status = self.sdict() try: validate_statedict(status) except ValueError as e: raise ValueError(_( "{item} from bundle '{bundle}' returned invalid status: {msg}" ).format( bundle=self.bundle.name, item=self.id, msg=repr(e), )) return status @cached_property def cached_status(self): return self.get_status() @cached_property def cached_unless_result(self): """ Returns True if 'unless' wants to skip this item. """ if self.unless and (self.ITEM_TYPE_NAME == 'action' or not self.cached_status.correct): unless_result = self.node.run(self.unless, may_fail=True) return unless_result.return_code == 0 else: return False def _triggers_preceding_items(self, interactive=False): """ Preceding items will execute this to figure out if they're triggered. """ if self.cached_unless_result: # 'unless' says we don't need to run return False if self.ITEM_TYPE_NAME == 'action': # so we have an action where 'unless' says it must be run # but the 'interactive' attribute might still override that if self.attributes['interactive'] and not interactive: return False else: return True return not self.cached_status.correct def _raise_for_faults(self): raise FaultUnavailable(_( "{item} on {node} is missing faults " "for these attributes: {attrs} " "(most of the time this means you're missing " "a required key in your .secrets.cfg)" ).format( attrs=", ".join(sorted(self._faults_missing_for_attributes)), item=self.id, node=self.node.name, )) def _skip_with_soft_locks(self, mine, others): """ Returns True/False depending on whether the item should be skipped based on the given set of locks. """ for lock in mine: if self.covered_by_autoskip_selector(lock['items']): io.debug(_("{item} on {node} whitelisted by lock {lock}").format( item=self.id, lock=lock['id'], node=self.node.name, )) return False for lock in others: if self.covered_by_autoskip_selector(lock['items']): io.debug(_("{item} on {node} blacklisted by lock {lock}").format( item=self.id, lock=lock['id'], node=self.node.name, )) return True return False def _test(self): with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): if self._faults_missing_for_attributes: self._raise_for_faults() return self.test() @classmethod def _validate_attribute_names(cls, bundle, item_id, attributes): if not isinstance(attributes, dict): raise BundleError(_( "invalid item '{item}' in bundle '{bundle}': not a dict" ).format( item=item_id, bundle=bundle.name, )) if cls.REJECT_UNKNOWN_ATTRIBUTES: invalid_attributes = set(attributes.keys()).difference( set(cls.ITEM_ATTRIBUTES.keys()).union( set(BUILTIN_ITEM_ATTRIBUTES.keys()) ), ) if invalid_attributes: raise BundleError(_( "invalid attribute(s) for '{item}' in bundle '{bundle}': {attrs}" ).format( item=item_id, bundle=bundle.name, attrs=", ".join(invalid_attributes), )) invalid_attributes = set(attributes.get('when_creating', {}).keys()).difference( set(cls.WHEN_CREATING_ATTRIBUTES.keys()) ) if invalid_attributes: raise BundleError(_( "invalid when_creating attribute(s) for '{item}' in bundle '{bundle}': {attrs}" ).format( item=item_id, bundle=bundle.name, attrs=", ".join(invalid_attributes), )) @classmethod def _validate_name(cls, bundle, name): if not name: raise BundleError(_( "invalid name for {type} in bundle '{bundle}': must not be empty string" ).format( bundle=bundle.name, type=cls.ITEM_TYPE_NAME, )) if ":" in name: raise BundleError(_( "invalid name for {type} in bundle '{bundle}': {name} (must not contain colon)" ).format( bundle=bundle.name, name=name, type=cls.ITEM_TYPE_NAME, )) @classmethod def _validate_required_attributes(cls, bundle, item_id, attributes): missing = [] for attrname in cls.REQUIRED_ATTRIBUTES: if attrname not in attributes: missing.append(attrname) if missing: raise BundleError(_( "{item} in bundle '{bundle}' missing required attribute(s): {attrs}" ).format( item=item_id, bundle=bundle.name, attrs=", ".join(missing), )) def apply( self, autoskip_selector=(), autoonly_selector=(), my_soft_locks=(), other_peoples_soft_locks=(), interactive=False, interactive_default=True, show_diff=True, ): self.node.repo.hooks.item_apply_start( self.node.repo, self.node, self, ) status_code = None status_before = None status_after = None details = None start_time = datetime.now() for item in self._precedes_items: if item._triggers_preceding_items(interactive=interactive): io.debug(_( "preceding item {item} on {node} has been triggered by {other_item}" ).format(item=self.id, node=self.node.name, other_item=item.id)) self.has_been_triggered = True break else: io.debug(_( "preceding item {item} on {node} has NOT been triggered by {other_item}" ).format(item=self.id, node=self.node.name, other_item=item.id)) if self.skip: status_code = self.STATUS_SKIPPED details = self.SKIP_REASON_ATTR elif self.triggered and not self.has_been_triggered: io.debug(_( "skipping {item} on {node} because it wasn't triggered" ).format(item=self.id, node=self.node.name)) status_code = self.STATUS_SKIPPED details = self.SKIP_REASON_NO_TRIGGER elif not self.covered_by_autoonly_selector(autoonly_selector): io.debug(_( "autoonly does not match {item} on {node}" ).format(item=self.id, node=self.node.name)) status_code = self.STATUS_SKIPPED details = self.SKIP_REASON_CMDLINE elif self.covered_by_autoskip_selector(autoskip_selector): io.debug(_( "autoskip matches {item} on {node}" ).format(item=self.id, node=self.node.name)) status_code = self.STATUS_SKIPPED details = self.SKIP_REASON_CMDLINE elif self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks): status_code = self.STATUS_SKIPPED details = self.SKIP_REASON_SOFTLOCK elif self.cached_unless_result: io.debug(_( "'unless' for {item} on {node} succeeded, not fixing" ).format(item=self.id, node=self.node.name)) status_code = self.STATUS_SKIPPED details = self.SKIP_REASON_UNLESS elif self._faults_missing_for_attributes: if self.error_on_missing_fault: self._raise_for_faults() else: io.debug(_( "skipping {item} on {node} because it is missing faults " "for these attributes: {attrs} " "(most of the time this means you're missing " "a required key in your .secrets.cfg)" ).format( attrs=", ".join(sorted(self._faults_missing_for_attributes)), item=self.id, node=self.node.name, )) status_code = self.STATUS_SKIPPED details = self.SKIP_REASON_FAULT_UNAVAILABLE else: try: status_before = self.cached_status except FaultUnavailable: if self.error_on_missing_fault: self._raise_for_faults() else: io.debug(_( "skipping {item} on {node} because it is missing Faults " "(most of the time this means you're missing " "a required key in your .secrets.cfg)" ).format( item=self.id, node=self.node.name, )) status_code = self.STATUS_SKIPPED details = self.SKIP_REASON_FAULT_UNAVAILABLE else: if status_before.correct: status_code = self.STATUS_OK elif show_diff or interactive: if status_before.must_be_created: cdict = copy(status_before.cdict) cdict.update(self.when_creating) details = self.display_on_create(cdict) elif status_before.must_be_deleted: details = self.display_on_delete(copy(status_before.sdict)) else: details = self.display_dicts( copy(status_before.cdict), copy(status_before.sdict), # TODO remove sorted() in 5.0 to pass a set sorted(copy(status_before.keys_to_fix)), ) if status_code is None: # item not skipped or OK if not interactive: with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): self.fix(status_before) else: if status_before.must_be_created: question_text = dict_to_text(details, value_color=green) prompt = _("Create {}?").format(bold(self.id)) elif status_before.must_be_deleted: question_text = dict_to_text(details, value_color=red) prompt = _("Delete {}?").format(bold(self.id)) else: display_cdict, display_sdict, display_keys_to_fix = details question_text = diff_dict( display_sdict, display_cdict, skip_missing_in_target=True, ) prompt = _("Fix {}?").format(bold(self.id)) if self.comment: question_text += format_comment(self.comment) question = wrap_question( self.id, question_text, prompt, prefix="{x} {node} ".format( node=bold(self.node.name), x=blue("?"), ), ) answer = io.ask( question, interactive_default, epilogue="{x} {node}".format( node=bold(self.node.name), x=blue("?"), ), ) if answer: with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): self.fix(status_before) else: status_code = self.STATUS_SKIPPED details = self.SKIP_REASON_INTERACTIVE if status_code is None: # item not skipped or OK status_after = self.get_status(cached=False) status_code = self.STATUS_FIXED if status_after.correct else self.STATUS_FAILED self.node.repo.hooks.item_apply_end( self.node.repo, self.node, self, duration=datetime.now() - start_time, status_code=status_code, status_before=status_before, status_after=status_after, ) return ( status_code, details, status_before.must_be_created if status_before else None, status_before.must_be_deleted if status_before else None, ) def run_local(self, command, **kwargs): result = run_local(command, **kwargs) self._command_results.append({ 'command': command, 'result': result, }) return result def run(self, command, **kwargs): result = self.node.run(command, **kwargs) self._command_results.append({ 'command': command, 'result': result, }) return result def cdict(self): """ Return a statedict that describes the target state of this item as configured in the repo. Returning `None` instead means that the item should not exist. MAY be overridden by subclasses. """ return self.attributes def covered_by_autoskip_selector(self, autoskip_selector): """ True if this item should be skipped based on the given selector (e.g. ("tag:foo", "bundle:bar")). """ components = [c.strip() for c in autoskip_selector] if ( "*" in components or self.id in components or "bundle:{}".format(self.bundle.name) in components or "{}:".format(self.ITEM_TYPE_NAME) in components ): return True for tag in self.tags: if "tag:{}".format(tag) in components: return True return False def covered_by_autoonly_selector(self, autoonly_selector, check_deps=True): """ True if this item should NOT be skipped based on the given selector (e.g. ("tag:foo", "bundle:bar")). """ if not autoonly_selector: return True components = [c.strip() for c in autoonly_selector] if ( self.id in components or "bundle:{}".format(self.bundle.name) in components or "{}:".format(self.ITEM_TYPE_NAME) in components ): return True for tag in self.tags: if "tag:{}".format(tag) in components: return True if check_deps: for depending_item in self._incoming_needs: if ( depending_item.id in components or "bundle:{}".format(depending_item.bundle.name) in components or "{}:".format(depending_item.ITEM_TYPE_NAME) in components ): return True for tag in depending_item.tags: if "tag:{}".format(tag) in components: return True return False def fix(self, status): """ This is supposed to actually implement stuff on the target node. MUST be overridden by subclasses. """ raise NotImplementedError() def get_auto_attrs(self, items): """ Return a dict with any number of attributes. The respective sets will be merged with the user-supplied values. For example: return { 'needs': { 'file:/foo', }, } Note that only attributes from ALLOWED_ITEM_AUTO_ATTRIBUTES are allowed. """ return {} def get_auto_deps(self, items): """ DEPRECATED remove in 5.0, use get_auto_attrs instead Return a list of item IDs this item should have dependencies on. Be very careful when using this. There are few circumstances where this is really necessary. Only use this if you really need to examine the actual list of items in order to figure out your dependencies. MAY be overridden by subclasses. """ return [] def get_canned_actions(self): """ Return a dictionary of action definitions (mapping action names to dicts of action attributes, as in bundles). MAY be overridden by subclasses. """ return {} def get_status(self, cached=True): """ Returns an ItemStatus instance describing the current status of the item on the actual node. """ with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): if not cached: del self._cache['cached_sdict'] return ItemStatus(self.cached_cdict, self.cached_sdict) def hash(self): return hash_statedict(self.cached_cdict) @property def id(self): if self.ITEM_TYPE_NAME == 'action' and ":" in self.name: # canned actions don't have an "action:" prefix return self.name return "{}:{}".format(self.ITEM_TYPE_NAME, self.name) def verify( self, autoskip_selector=(), autoonly_selector=(), ): if not self.covered_by_autoonly_selector(autoonly_selector, check_deps=False): io.debug(_( "autoonly does not match {item} on {node}" ).format(item=self.id, node=self.node.name)) raise ItemSkipped if self.covered_by_autoskip_selector(autoskip_selector): io.debug(_( "autoskip matches {item} on {node}" ).format(item=self.id, node=self.node.name)) raise ItemSkipped if self.cached_status.must_be_created: display = self.display_on_create(copy(self.cached_status.cdict)) elif self.cached_status.must_be_deleted: display = self.display_on_delete(copy(self.cached_status.sdict)) else: display = self.display_dicts( copy(self.cached_status.cdict), copy(self.cached_status.sdict), # TODO remove sorted() in 5.0 to pass a set sorted(copy(self.cached_status.keys_to_fix)), ) return self.cached_unless_result, self.cached_status, display def display_on_create(self, cdict): """ Given a cdict as implemented above, modify it to better suit interactive presentation when an item is created. If there are any when_creating attributes, they will be added to the cdict before it is passed to this method. MAY be overridden by subclasses. """ return cdict # TODO rename to display_on_fix in 5.0 def display_dicts(self, cdict, sdict, keys): """ Given cdict and sdict as implemented above, modify them to better suit interactive presentation. The keys parameter is a list of keys whose values differ between cdict and sdict. MAY be overridden by subclasses. """ return (cdict, sdict, keys) def display_on_delete(self, sdict): """ Given an sdict as implemented above, modify it to better suit interactive presentation when an item is deleted. MAY be overridden by subclasses. """ return sdict def patch_attributes(self, attributes): """ Allows an item to preprocess the attributes it is initialized with. Returns the modified attributes dictionary. MAY be overridden by subclasses. """ return attributes def preview(self): """ Can return a preview of this item as a Unicode string. BundleWrap will NOT add a trailing newline. MAY be overridden by subclasses. """ raise NotImplementedError() def sdict(self): """ Return a statedict that describes the actual state of this item on the node. Returning `None` instead means that the item does not exist on the node. For the item to validate as correct, the values for all keys in self.cdict() have to match this statedict. MUST be overridden by subclasses. """ raise NotImplementedError() def test(self): """ Used by `bw repo test`. Should do as much as possible to detect what would become a runtime error during a `bw apply`. Files will attempt to render their templates for example. SHOULD be overridden by subclasses """ pass @classmethod def validate_attributes(cls, bundle, item_id, attributes): """ Raises BundleError if something is amiss with the user-specified attributes. SHOULD be overridden by subclasses. """ pass @classmethod def validate_name(cls, bundle, name): """ Raise BundleError if the given name is not valid (e.g. contains invalid characters for this kind of item. MAY be overridden by subclasses. """ pass bundlewrap-4.13.6/bundlewrap/items/actions.py000066400000000000000000000230771417502274300212640ustar00rootroot00000000000000from datetime import datetime from bundlewrap.exceptions import BundleError, ItemSkipped from bundlewrap.items import format_comment, Item from bundlewrap.utils import Fault from bundlewrap.utils.ui import io from bundlewrap.utils.text import mark_for_translation as _ from bundlewrap.utils.text import blue, bold, wrap_question class ActionFailure(Exception): """ Raised when an action failes to meet the expected rcode/output. """ def __init__(self, failed_expectations): self.failed_expectations = failed_expectations class Action(Item): """ A command that is run on a node. """ BUNDLE_ATTRIBUTE_NAME = 'actions' ITEM_ATTRIBUTES = { 'command': None, 'data_stdin': None, 'expected_stderr': None, 'expected_stdout': None, 'expected_return_code': {0}, 'interactive': None, } ITEM_TYPE_NAME = 'action' REQUIRED_ATTRIBUTES = ['command'] def _get_result( self, autoonly_selector="", autoskip_selector="", my_soft_locks=(), other_peoples_soft_locks=(), interactive=False, interactive_default=True, show_diff=True, ): if self.skip: return (self.STATUS_SKIPPED, self.SKIP_REASON_ATTR, None, None) if self._faults_missing_for_attributes: if self.error_on_missing_fault: self._raise_for_faults() else: io.debug(_( "skipping {item} on {node} because it is missing faults " "for these attributes: {attrs} " "(most of the time this means you're missing " "a required key in your .secrets.cfg)" ).format( attrs=", ".join(sorted(self._faults_missing_for_attributes)), item=self.id, node=self.node.name, )) return (self.STATUS_SKIPPED, self.SKIP_REASON_FAULT_UNAVAILABLE, None, None) if not self.covered_by_autoonly_selector(autoonly_selector): io.debug(_( "autoonly does not match {item} on {node}" ).format(item=self.id, node=self.node.name)) return (self.STATUS_SKIPPED, self.SKIP_REASON_CMDLINE, None, None) if self.covered_by_autoskip_selector(autoskip_selector): io.debug(_( "autoskip matches {item} on {node}" ).format(item=self.id, node=self.node.name)) return (self.STATUS_SKIPPED, self.SKIP_REASON_CMDLINE, None, None) if self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks): return (self.STATUS_SKIPPED, self.SKIP_REASON_SOFTLOCK, None, None) if interactive is False and self.attributes['interactive'] is True: return (self.STATUS_SKIPPED, self.SKIP_REASON_INTERACTIVE_ONLY, None, None) for item in self._precedes_items: if item._triggers_preceding_items(interactive=interactive): io.debug(_( "preceding item {item} on {node} has been triggered by {other_item}" ).format(item=self.id, node=self.node.name, other_item=item.id)) self.has_been_triggered = True break else: io.debug(_( "preceding item {item} on {node} has NOT been triggered by {other_item}" ).format(item=self.id, node=self.node.name, other_item=item.id)) if self.triggered and not self.has_been_triggered: io.debug(_("skipping {} because it wasn't triggered").format(self.id)) return (self.STATUS_SKIPPED, self.SKIP_REASON_NO_TRIGGER, None, None) if self.unless: with io.job(_("{node} {bundle} {item} checking 'unless' condition").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): unless_result = self.bundle.node.run( self.unless, may_fail=True, ) if unless_result.return_code == 0: io.debug(_("{node}:{bundle}:action:{name}: failed 'unless', not running").format( bundle=self.bundle.name, name=self.name, node=self.bundle.node.name, )) return (self.STATUS_SKIPPED, self.SKIP_REASON_UNLESS, None, None) question_body = "" if self.attributes['data_stdin'] is not None: question_body += "<" + _("data") + "> | " question_body += self.attributes['command'] if self.comment: question_body += format_comment(self.comment) if ( interactive and self.attributes['interactive'] is not False and not io.ask( wrap_question( self.id, question_body, _("Run action {}?").format( bold(self.name), ), prefix="{x} {node} ".format( node=bold(self.node.name), x=blue("?"), ), ), interactive_default, epilogue="{x} {node}".format( node=bold(self.node.name), x=blue("?"), ), ) ): return (self.STATUS_SKIPPED, self.SKIP_REASON_INTERACTIVE, None, None) try: self.run() return (self.STATUS_ACTION_SUCCEEDED, None, None, None) except ActionFailure as exc: return (self.STATUS_FAILED, exc.failed_expectations, None, None) def apply(self, *args, **kwargs): return self.get_result(*args, **kwargs) def cdict(self): raise AttributeError(_("actions don't have cdicts")) def get_result(self, *args, **kwargs): self.node.repo.hooks.action_run_start( self.node.repo, self.node, self, ) start_time = datetime.now() result = self._get_result(*args, **kwargs) self.node.repo.hooks.action_run_end( self.node.repo, self.node, self, duration=datetime.now() - start_time, status=result[0], ) return result def run(self): if self.attributes['data_stdin'] is not None: data_stdin = self.attributes['data_stdin'] # Allow users to use either a string/unicode object or raw # bytes -- or Faults. if isinstance(data_stdin, Fault): data_stdin = data_stdin.value if type(data_stdin) is not bytes: data_stdin = data_stdin.encode('UTF-8') else: data_stdin = None with io.job(_("{node} {bundle} {item}").format( bundle=bold(self.bundle.name), item=self.id, node=bold(self.node.name), )): result = super().run( self.attributes['command'], data_stdin=data_stdin, may_fail=True, ) failed_expectations = ({}, {}, []) if self.attributes['expected_return_code'] is not None and \ result.return_code not in self.attributes['expected_return_code']: failed_expectations[0][_("return code")] = str(self.attributes['expected_return_code']) failed_expectations[1][_("return code")] = str(result.return_code) failed_expectations[2].append(_("return code")) if self.attributes['expected_stderr'] is not None and \ result.stderr_text != self.attributes['expected_stderr']: failed_expectations[0][_("stderr")] = self.attributes['expected_stderr'] failed_expectations[1][_("stderr")] = result.stderr_text failed_expectations[2].append(_("stderr")) if self.attributes['expected_stdout'] is not None and \ result.stdout_text != self.attributes['expected_stdout']: failed_expectations[0][_("stdout")] = self.attributes['expected_stdout'] failed_expectations[1][_("stdout")] = result.stdout_text failed_expectations[2].append(_("stdout")) if failed_expectations[2]: raise ActionFailure(failed_expectations) return result def patch_attributes(self, attributes): if isinstance(attributes.get('expected_return_code'), int): attributes['expected_return_code'] = {attributes['expected_return_code']} return attributes @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('interactive', None) not in (True, False, None): raise BundleError(_( "invalid interactive setting for action '{item}' in bundle '{bundle}'" ).format(item=item_id, bundle=bundle.name)) def verify(self, autoskip_selector=(), autoonly_selector=()): if not self.covered_by_autoonly_selector(autoonly_selector, check_deps=False): io.debug(_( "autoonly does not match {item} on {node}" ).format(item=self.id, node=self.node.name)) raise ItemSkipped if self.covered_by_autoskip_selector(autoskip_selector): io.debug(_( "autoskip matches {item} on {node}" ).format(item=self.id, node=self.node.name)) raise ItemSkipped if self.unless and self.cached_unless_result: return self.cached_unless_result, None, None else: raise NotImplementedError bundlewrap-4.13.6/bundlewrap/items/directories.py000066400000000000000000000243331417502274300221340ustar00rootroot00000000000000from collections import defaultdict from os.path import normpath from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.remote import PathInfo from bundlewrap.utils.text import mark_for_translation as _ from bundlewrap.utils.text import is_subdirectory from bundlewrap.utils.ui import io UNMANAGED_PATH_DESC = _("unmanaged subpaths") def validator_mode(item_id, value): if value is None: return value = str(value) if not value.isdigit(): raise BundleError( _("mode for {item} should be written as digits, got: '{value}'" "").format(item=item_id, value=value) ) for digit in value: if int(digit) > 7 or int(digit) < 0: raise BundleError(_( "invalid mode for {item}: '{value}'" ).format(item=item_id, value=value)) if len(value) not in (3, 4): raise BundleError(_( "mode for {item} should be three or four digits long, was: '{value}'" ).format(item=item_id, value=value)) ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) ATTRIBUTE_VALIDATORS.update({ 'mode': validator_mode, }) class Directory(Item): """ A directory. """ BUNDLE_ATTRIBUTE_NAME = "directories" ITEM_ATTRIBUTES = { 'group': "root", 'mode': "0755", 'owner': "root", 'purge': False, } ITEM_TYPE_NAME = "directory" def __repr__(self): return "".format( quote(self.name), ) def cdict(self): cdict = { 'paths_to_purge': set(), 'type': 'directory', } for optional_attr in ('group', 'mode', 'owner'): if self.attributes[optional_attr] is not None: cdict[optional_attr] = self.attributes[optional_attr] return cdict def display_on_create(self, cdict): del cdict['paths_to_purge'] del cdict['type'] return cdict def display_dicts(self, cdict, sdict, keys): try: keys.remove('paths_to_purge') except ValueError: pass else: keys.append(UNMANAGED_PATH_DESC) cdict[UNMANAGED_PATH_DESC] = sorted(cdict['paths_to_purge']) sdict[UNMANAGED_PATH_DESC] = sorted(sdict['paths_to_purge']) del cdict['paths_to_purge'] del sdict['paths_to_purge'] return (cdict, sdict, keys) def fix(self, status): if status.must_be_created or 'type' in status.keys_to_fix: # fixing the type fixes everything self._fix_type(status) return for path in status.sdict.get('paths_to_purge', set()): self.run("rm -rf -- {}".format(quote(path))) for fix_type in ('mode', 'owner', 'group'): if fix_type in status.keys_to_fix: if fix_type == 'group' and 'owner' in status.keys_to_fix: # owner and group are fixed with a single chown continue getattr(self, "_fix_" + fix_type)(status) def _fix_mode(self, status): if self.node.os in self.node.OS_FAMILY_BSD: chmod_command = "chmod {} {}" else: chmod_command = "chmod {} -- {}" self.run(chmod_command.format( self.attributes['mode'], quote(self.name), )) if self.node.os not in self.node.OS_FAMILY_BSD: # The bits S_ISUID and S_ISGID are special. POSIX says, # if they are NOT set, the implementation of "chmod" may or # may not clear them. This means that "chmod 0755 foodir" # does not necessarily clear the S_ISUID and/or S_ISGID bit, # while a "chmod 6755 foodir" will always set them. # # GNU coreutils have decided to actually behave this way. # You can't clear a S_ISUID or S_ISGID bit by issuing "chmod # 0755 foodir". You must explicitly do a "chmod u-s foodir" # or "chmod g-s foodir". # # This does not apply to regular files, nor to the sticky # bit (S_ISVTX). Also, FreeBSD, NetBSD, and OpenBSD do clear # these bits on "chmod 0755 foodir". # We only want to run these extra commands if we have found # one of the two special bits to be set. if status.sdict is not None and int(status.sdict['mode'], 8) & 0o6000: if not int(self.attributes['mode'], 8) & 0o4000: self.run("chmod u-s {}".format(quote(self.name))) if not int(self.attributes['mode'], 8) & 0o2000: self.run("chmod g-s {}".format(quote(self.name))) def _fix_owner(self, status): group = self.attributes['group'] or "" if group: group = ":" + quote(group) if self.node.os in self.node.OS_FAMILY_BSD: command = "chown {}{} {}" else: command = "chown {}{} -- {}" self.run(command.format( quote(self.attributes['owner'] or ""), group, quote(self.name), )) _fix_group = _fix_owner def _fix_type(self, status): self.run("rm -rf -- {}".format(quote(self.name))) self.run("mkdir -p -- {}".format(quote(self.name))) if self.attributes['mode']: self._fix_mode(status) if self.attributes['owner'] or self.attributes['group']: self._fix_owner(status) def _get_paths_to_purge(self): result = self.run("find {} -maxdepth 1 -print0".format(quote(self.name))) for line in result.stdout.split(b"\0"): line = line.decode('utf-8') for item_type in ('directory', 'file', 'symlink'): for item in self.node.items: if ( item.id == "{}:{}".format(item_type, line) or item.id.startswith("{}:{}/".format(item_type, line)) ): break else: continue break else: # this file or directory is not managed io.debug(( "found unmanaged path below {dirpath} on {node}, " "marking for removal: {path}" ).format( dirpath=self.name, node=self.node.name, path=line, )) yield line def get_auto_attrs(self, items): deps = set() for item in items: if item == self: continue if (( item.ITEM_TYPE_NAME == "file" and is_subdirectory(item.name, self.name) ) or ( item.ITEM_TYPE_NAME in ("file", "symlink") and item.name == self.name )): raise BundleError(_( "{item1} (from bundle '{bundle1}') blocking path to " "{item2} (from bundle '{bundle2}')" ).format( item1=item.id, bundle1=item.bundle.name, item2=self.id, bundle2=self.bundle.name, )) elif item.ITEM_TYPE_NAME == "user" and item.name == self.attributes['owner']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.add(item.id) elif item.ITEM_TYPE_NAME == "group" and item.name == self.attributes['group']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.add(item.id) elif item.ITEM_TYPE_NAME in ("directory", "symlink"): if is_subdirectory(item.name, self.name): deps.add(item.id) return {'needs': deps} def sdict(self): path_info = PathInfo(self.node, self.name) if not path_info.exists: return None else: paths_to_purge = set() if self.attributes['purge']: paths_to_purge = set(self._get_paths_to_purge()) return { 'type': 'directory' if path_info.is_directory else path_info.stat['type'], 'mode': path_info.mode, 'owner': path_info.owner, 'group': path_info.group, 'paths_to_purge': paths_to_purge, } def patch_attributes(self, attributes): if 'mode' in attributes and attributes['mode'] is not None: attributes['mode'] = str(attributes['mode']).zfill(4) if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD: # BSD doesn't have a root group, so we have to use a # different default value here attributes['group'] = 'wheel' return attributes @classmethod def validate_attributes(cls, bundle, item_id, attributes): for key, value in attributes.items(): ATTRIBUTE_VALIDATORS[key](item_id, value) @classmethod def validate_name(cls, bundle, name): if normpath(name) != name: raise BundleError(_( "'{path}' is an invalid directory path, " "should be '{normpath}' (bundle '{bundle}')" ).format( bundle=bundle.name, normpath=normpath(name), path=name, )) bundlewrap-4.13.6/bundlewrap/items/files.py000066400000000000000000000605771417502274300207340ustar00rootroot00000000000000from atexit import register as at_exit from base64 import b64decode from collections import defaultdict from contextlib import contextmanager, suppress from datetime import datetime from hashlib import md5 from os import getenv, getpid, makedirs, mkdir, rmdir from os.path import basename, dirname, exists, isfile, join, normpath from shlex import quote from shutil import rmtree from subprocess import check_output, CalledProcessError, STDOUT from sys import exc_info from tempfile import gettempdir from time import sleep from traceback import format_exception from jinja2 import Environment, FileSystemLoader from mako.lookup import TemplateLookup from mako.template import Template from bundlewrap.exceptions import BundleError, FaultUnavailable, TemplateError from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item from bundlewrap.items.directories import validator_mode from bundlewrap.utils import cached_property, download, hash_local_file, sha1, tempfile from bundlewrap.utils.remote import PathInfo from bundlewrap.utils.text import bold, force_text, mark_for_translation as _ from bundlewrap.utils.text import is_subdirectory from bundlewrap.utils.ui import io DIFF_MAX_FILE_SIZE = 1024 * 1024 * 5 # bytes def content_processor_base64(item): # .encode() is required for pypy3 only return b64decode(item._template_content.encode()) def content_processor_jinja2(item): loader = FileSystemLoader(searchpath=[item.item_data_dir, item.item_dir]) env = Environment(loader=loader) template = env.from_string(item._template_content) io.debug(f"{item.node.name}:{item.id}: rendering with Jinja2...") start = datetime.now() try: content = template.render( item=item, bundle=item.bundle, node=item.node, repo=item.node.repo, **item.attributes['context'] ) except FaultUnavailable: raise except Exception as e: io.stderr("".join(format_exception(*exc_info()))) raise TemplateError(_( "Error while rendering template for {node}:{bundle}:{item}: {error}" ).format( bundle=item.bundle.name, error=e, item=item.id, node=item.node.name, )) duration = datetime.now() - start io.debug("{node}:{bundle}:{item}: rendered in {time:.09f} s".format( bundle=item.bundle.name, item=item.id, node=item.node.name, time=duration.total_seconds(), )) return content.encode(item.attributes['encoding']) def content_processor_mako(item): template = Template( item._template_content.encode('utf-8'), input_encoding='utf-8', lookup=TemplateLookup(directories=[item.item_data_dir, item.item_dir]), output_encoding=item.attributes['encoding'], ) io.debug(f"{item.node.name}:{item.id}: rendering with Mako...") start = datetime.now() try: content = template.render( item=item, bundle=item.bundle, node=item.node, repo=item.node.repo, **item.attributes['context'] ) except FaultUnavailable: raise except Exception as e: io.stderr("".join(format_exception(*exc_info()))) if isinstance(e, NameError) and str(e) == "Undefined": # Mako isn't very verbose here. Try to give a more useful # error message - even though we can't pinpoint the excat # location of the error. :/ e = _("Undefined variable (look for '${...}')") elif isinstance(e, KeyError): e = _("KeyError: {}").format(str(e)) raise TemplateError(_( "Error while rendering template for {node}:{bundle}:{item}: {error}" ).format( bundle=item.bundle.name, error=e, item=item.id, node=item.node.name, )) duration = datetime.now() - start io.debug(f"{item.node.name}:{item.id}: rendered in {duration.total_seconds():.09f} s") return content def content_processor_text(item): return item._template_content.encode(item.attributes['encoding']) CONTENT_PROCESSORS = { 'any': lambda item: b"", 'base64': content_processor_base64, 'binary': None, 'jinja2': content_processor_jinja2, 'mako': content_processor_mako, 'text': content_processor_text, 'download': None, } def download_file(item): file_name_hashed = md5(item.attributes['source'].encode('UTF-8')).hexdigest() cache_path = getenv("BW_FILE_DOWNLOAD_CACHE") if cache_path: remove_dir = None file_path = join(cache_path, file_name_hashed) lock_dir = join(cache_path, "{}.bw_lock".format(file_name_hashed)) makedirs(cache_path, exist_ok=True) else: remove_dir = join(gettempdir(), "bw-file-download-cache-{}".format(getpid())) file_path = join(remove_dir, file_name_hashed) lock_dir = join(remove_dir, "{}.bw_lock".format(file_name_hashed)) makedirs(remove_dir, exist_ok=True) io.debug(f"{item.node.name}:{item.id}: download lock dir is {lock_dir}") # Since we only download the file once per process, there's no point # in displaying the node name here. The file may be used on multiple # nodes. with io.job(_("{} {} waiting for download".format(bold(item.node.name), bold(item.id)))): while True: try: mkdir(lock_dir) io.debug(f"{item.node.name}:{item.id}: have download lock") break except FileExistsError: io.debug(f"{item.node.name}:{item.id}: waiting for download lock") sleep(1) try: if not isfile(file_path): io.debug( f"{item.node.name}:{item.id}: " f"starting download from {item.attributes['source']}" ) with io.job(_("{} {} downloading file".format(bold(item.node.name), bold(item.id)))): download(item.attributes['source'], file_path) io.debug( f"{item.node.name}:{item.id}: " f"finished download from {item.attributes['source']}" ) # Always do hash verification, if requested. if item.attributes['content_hash']: with io.job(_("{} {} checking file integrity".format( bold(item.node.name), bold(item.id), ))): local_hash = hash_local_file(file_path) io.debug(f"{item.node.name}:{item.id}: content hash is {local_hash}") if local_hash != item.attributes['content_hash']: raise BundleError(_( "could not download correct file from {} - sha1sum mismatch " "(expected {}, got {})" ).format( item.attributes['source'], item.attributes['content_hash'], local_hash )) io.debug(f"{item.node.name}:{item.id}: content hash matches") finally: rmdir(lock_dir) io.debug(f"{item.node.name}:{item.id}: released download lock") return file_path, remove_dir def get_remote_file_contents(node, path): """ Returns the contents of the given path as a string. """ with tempfile() as tmp_file: node.download(path, tmp_file) with open(tmp_file, 'rb') as f: content = f.read() return content def validator_content_type(item_id, value): if value not in CONTENT_PROCESSORS: raise BundleError(_( "invalid content_type for {item}: '{value}'" ).format(item=item_id, value=value)) ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) ATTRIBUTE_VALIDATORS.update({ 'content_type': validator_content_type, 'mode': validator_mode, }) class File(Item): """ A file. """ BUNDLE_ATTRIBUTE_NAME = "files" ITEM_ATTRIBUTES = { 'content': None, 'content_type': 'text', 'content_hash': None, 'context': None, 'delete': False, 'encoding': "utf-8", 'group': "root", 'mode': "0644", 'owner': "root", 'source': None, 'verify_with': None, 'test_with': None, } ITEM_TYPE_NAME = "file" def __repr__(self): return "".format(quote(self.name)) @property def _template_content(self): if self.attributes['source'] is not None: filename = join(self.item_data_dir, self.attributes['source']) if not exists(filename): filename = join(self.item_dir, self.attributes['source']) with open(filename, 'rb') as f: return force_text(f.read()) else: return force_text(self.attributes['content']) @cached_property def content(self): return CONTENT_PROCESSORS[self.attributes['content_type']](self) @cached_property def content_hash(self): if self.attributes['content_type'] in ('binary', 'download'): return hash_local_file(self.template) else: return sha1(self.content) @cached_property def template(self): if self.attributes['content_type'] == 'download': file_path, remove_dir = download_file(self) if remove_dir: io.debug(_("registering {} for deletion on exit").format(remove_dir)) at_exit(rmtree, remove_dir, ignore_errors=True) return file_path data_template = join(self.item_data_dir, self.attributes['source']) if exists(data_template): return data_template return join(self.item_dir, self.attributes['source']) def cdict(self): if self.attributes['delete']: return None cdict = {'type': 'file'} if self.attributes['content_type'] != 'any': if self.attributes['content_type'] == 'download' and self.attributes['content_hash']: cdict['content_hash'] = self.attributes['content_hash'] else: cdict['content_hash'] = self.content_hash for optional_attr in ('group', 'mode', 'owner'): if self.attributes[optional_attr] is not None: cdict[optional_attr] = self.attributes[optional_attr] return cdict def fix(self, status): if status.must_be_created or status.must_be_deleted or 'type' in status.keys_to_fix: self._fix_type(status) else: for fix_type in ('content_hash', 'mode', 'owner', 'group'): if fix_type in status.keys_to_fix: if fix_type == 'group' and \ 'owner' in status.keys_to_fix: # owner and group are fixed with a single chown continue if fix_type in ('mode', 'owner', 'group') and \ 'content' in status.keys_to_fix: # fixing content implies settings mode and owner/group continue getattr(self, "_fix_" + fix_type)(status) def _fix_content_hash(self, status): with self._write_local_file() as local_path: self.node.upload( local_path, self.name, mode=self.attributes['mode'], owner=self.attributes['owner'] or "", group=self.attributes['group'] or "", may_fail=True, ) def _fix_mode(self, status): if self.node.os in self.node.OS_FAMILY_BSD: command = "chmod {} {}" mode = self.attributes['mode'] else: command = "chmod {} -- {}" # GNU chmod refuses to set some modes (e.g., "chmod 0755 dir" # when "dir" is a directory which currently has mode "2755") # unless there's an additional leading zero. mode = '0' + self.attributes['mode'].zfill(4) self.run(command.format(mode, quote(self.name))) def _fix_owner(self, status): group = self.attributes['group'] or "" if group: group = ":" + quote(group) if self.node.os in self.node.OS_FAMILY_BSD: command = "chown {}{} {}" else: command = "chown {}{} -- {}" self.run(command.format( quote(self.attributes['owner'] or ""), group, quote(self.name), )) _fix_group = _fix_owner def _fix_type(self, status): if status.sdict: self.run("rm -rf -- {}".format(quote(self.name))) if not status.must_be_deleted: self.run("mkdir -p -- {}".format(quote(dirname(self.name)))) self._fix_content_hash(status) def get_auto_deps(self, items): deps = [] for item in items: if item.ITEM_TYPE_NAME == 'file' and is_subdirectory(item.name, self.name): raise BundleError(_( "{item1} (from bundle '{bundle1}') blocking path to " "{item2} (from bundle '{bundle2}')" ).format( item1=item.id, bundle1=item.bundle.name, item2=self.id, bundle2=self.bundle.name, )) elif item.ITEM_TYPE_NAME == 'user' and item.name == self.attributes['owner']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME == 'group' and item.name == self.attributes['group']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME in ('directory', 'symlink'): if is_subdirectory(item.name, self.name): deps.append(item.id) return deps def sdict(self): path_info = PathInfo(self.node, self.name) if not path_info.exists: return None else: return { 'type': 'file' if path_info.is_file else path_info.stat['type'], 'content_hash': path_info.sha1 if path_info.is_file else None, 'mode': path_info.mode, 'owner': path_info.owner, 'group': path_info.group, 'size': path_info.size, } def display_on_create(self, cdict): if ( self.attributes['content_type'] not in ('any', 'base64', 'binary', 'download') and len(self.content) < DIFF_MAX_FILE_SIZE ): del cdict['content_hash'] cdict['content'] = force_text(self.content) if self.attributes['content_type'] == 'download': cdict['source'] = self.attributes['source'] del cdict['type'] return cdict def display_dicts(self, cdict, sdict, keys): if ( 'content_hash' in keys and self.attributes['content_type'] not in ('base64', 'binary', 'download') and sdict['size'] < DIFF_MAX_FILE_SIZE and len(self.content) < DIFF_MAX_FILE_SIZE and PathInfo(self.node, self.name).is_text_file ): keys.remove('content_hash') keys.append('content') del cdict['content_hash'] del sdict['content_hash'] cdict['content'] = self.content.decode(self.attributes['encoding']) sdict['content'] = get_remote_file_contents( self.node, self.name, ).decode('utf-8', 'backslashreplace') if 'type' in keys: with suppress(ValueError): keys.remove('content_hash') if self.attributes['content_type'] == 'download': cdict['source'] = self.attributes['source'] sdict['source'] = '' if sdict: del sdict['size'] if self.attributes['content_type'] == 'any': with suppress(KeyError): del sdict['content_hash'] return (cdict, sdict, keys) def display_on_delete(self, sdict): del sdict['content_hash'] path_info = PathInfo(self.node, self.name) if ( sdict['size'] < DIFF_MAX_FILE_SIZE and path_info.is_text_file ): sdict['content'] = get_remote_file_contents(self.node, self.name) if path_info.is_file: sdict['size'] = f"{sdict['size']} bytes" return sdict def patch_attributes(self, attributes): if ( 'content' not in attributes and 'source' not in attributes and attributes.get('content_type', 'text') != 'any' and attributes.get('delete', False) is False ): attributes['source'] = basename(self.name) if 'context' not in attributes: attributes['context'] = {} if 'mode' in attributes and attributes['mode'] is not None: attributes['mode'] = str(attributes['mode']).zfill(4) if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD: # BSD doesn't have a root group, so we have to use a # different default value here attributes['group'] = 'wheel' return attributes def preview(self): if ( self.attributes['content_type'] in ('any', 'base64', 'binary') or self.attributes['delete'] is True ): raise ValueError return self.content.decode(self.attributes['encoding']) def test(self): if self.attributes['source'] and not exists(self.template): raise BundleError(_( "{item} from bundle '{bundle}' refers to missing " "file '{path}' in its 'source' attribute" ).format( bundle=self.bundle.name, item=self.id, path=self.template, )) if not self.attributes['delete'] and not self.attributes['content_type'] == 'any': with self._write_local_file() as local_path: if self.attributes['test_with']: cmd = self.attributes['test_with'].format(quote(local_path)) exitcode, stdout = self._run_validator(cmd) if exitcode == 0: io.debug(f"{self.id} passed local validation") elif exitcode in (126, 127, 255): io.debug(f"{self.id} failed local validation with code {exitcode}, ignoring") else: raise BundleError(_( "{i} failed local validation using: {c}\n\n{out}" ).format( c=cmd, i=self.id, out=stdout, )) def _run_validator(self, cmd): io.debug(f"calling local validator for {self.node.name}:{self.id}: {cmd}") try: p = check_output(cmd, shell=True, stderr=STDOUT) except CalledProcessError as e: return e.returncode, e.output.decode() else: return 0, p.decode() @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('delete', False): for attr in attributes.keys(): if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): raise BundleError(_( "{item} from bundle '{bundle}' cannot have other " "attributes besides 'delete'" ).format(item=item_id, bundle=bundle.name)) if 'content' in attributes and 'source' in attributes: raise BundleError(_( "{item} from bundle '{bundle}' cannot have both 'content' and 'source'" ).format(item=item_id, bundle=bundle.name)) if 'content' in attributes and attributes.get('content_type') == 'binary': raise BundleError(_( "{item} from bundle '{bundle}' cannot have binary inline content " "(use content_type 'base64' instead)" ).format(item=item_id, bundle=bundle.name)) if 'content_hash' in attributes and attributes.get('content_type') != 'download': raise BundleError(_( "{item} from bundle '{bundle}' specified 'content_hash', but is " "not of type 'download'" ).format(item=item_id, bundle=bundle.name)) if attributes.get('content_type') == 'download': if 'source' not in attributes: raise BundleError(_( "{item} from bundle '{bundle}' is of type 'download', but missing " "required attribute 'source'" ).format(item=item_id, bundle=bundle.name)) elif '://' not in attributes['source']: raise BundleError(_( "{item} from bundle '{bundle}' is of type 'download', but {source} " "does not look like a URL" ).format(item=item_id, bundle=bundle.name, source=attributes['source'])) if 'encoding' in attributes and attributes.get('content_type') in ( 'any', 'base64', 'binary', 'download', ): raise BundleError(_( "content_type of {item} from bundle '{bundle}' cannot provide different encoding " "(remove the 'encoding' attribute)" ).format(item=item_id, bundle=bundle.name)) if ( attributes.get('content_type', None) == "any" and ( 'content' in attributes or 'encoding' in attributes or 'source' in attributes ) ): raise BundleError(_( "{item} from bundle '{bundle}' with content_type 'any' " "must not define 'content', 'encoding' and/or 'source'" ).format(item=item_id, bundle=bundle.name)) for key, value in attributes.items(): ATTRIBUTE_VALIDATORS[key](item_id, value) @classmethod def validate_name(cls, bundle, name): if normpath(name) == "/": raise BundleError(_("'/' cannot be a file")) if normpath(name) != name: raise BundleError(_( "'{path}' is an invalid file path, should be '{normpath}' (bundle '{bundle}')" ).format( bundle=bundle.name, normpath=normpath(name), path=name, )) @contextmanager def _write_local_file(self): """ Makes the file contents available at the returned temporary path and performs local verification if necessary or requested. The calling method is responsible for cleaning up the file at the returned path (only if not a binary). """ with tempfile() as tmp_file: if self.attributes['content_type'] in ('binary', 'download'): local_path = self.template else: local_path = tmp_file with open(local_path, 'wb') as f: f.write(self.content) if self.attributes['verify_with']: cmd = self.attributes['verify_with'].format(quote(local_path)) exitcode, stdout = self._run_validator(cmd) if exitcode == 0: io.debug(f"{self.id} passed local validation") else: raise BundleError(_( "{i} failed local validation using: {c}\n\n{out}" ).format( c=cmd, i=self.id, out=stdout, )) yield local_path bundlewrap-4.13.6/bundlewrap/items/git_deploy.py000066400000000000000000000266041417502274300217620ustar00rootroot00000000000000from atexit import register as at_exit from hashlib import md5 from os import environ, getenv, getpid, makedirs, mkdir, remove, rmdir, setpgrp from os.path import isfile, join from shlex import quote from shutil import rmtree from subprocess import PIPE, Popen from tempfile import gettempdir, NamedTemporaryFile from time import sleep from bundlewrap.exceptions import BundleError, RepositoryError from bundlewrap.items import Item from bundlewrap.operations import RunResult from bundlewrap.utils import cached_property from bundlewrap.utils.text import is_subdirectory, mark_for_translation as _, randstr from bundlewrap.utils.ui import io REPO_MAP_FILENAME = "git_deploy_repos" REMOTE_STATE_FILENAME = ".bundlewrap_git_deploy" def is_ref(rev): """ Braindead check to see if our rev is a branch or tag name. False negatives are OK since this is only used for optimization. """ for char in rev: if char not in "0123456789abcdef": return True return False def get_local_repo_path(bw_repo_path, repo_name): """ From the given BundleWrap repo, get the filesystem path to the git repo associated with the given internal repo name. """ repo_map_path = join(bw_repo_path, REPO_MAP_FILENAME) if not isfile(repo_map_path): io.stderr(_("missing repo map for git_deploy at {}").format(repo_map_path)) io.stderr(_("you must create this file with the following format:")) io.stderr(_(" : " "")) io.stderr(_("since the path is local, you should also add the " "{} file to your gitignore").format(REPO_MAP_FILENAME)) raise RepositoryError(_("missing repo map for git_deploy")) with open(join(bw_repo_path, REPO_MAP_FILENAME)) as f: repo_map = f.readlines() for line in repo_map: if not line.strip() or line.startswith("#"): continue try: repo, path = line.split(":", 1) except Exception: raise RepositoryError(_("unable to parse line from {path}: '{line}'").format( line=line, path=repo_map_path, )) if repo_name == repo: return path.strip() raise RepositoryError(_("no path found for repo '{repo}' in {path}").format( path=repo_map_path, repo=repo_name, )) class GitDeploy(Item): """ Facilitates deployment of a given rev from a local git repo to a node. """ BUNDLE_ATTRIBUTE_NAME = "git_deploy" ITEM_ATTRIBUTES = { 'repo': None, 'rev': None, 'use_xattrs': False, } ITEM_TYPE_NAME = "git_deploy" REQUIRED_ATTRIBUTES = ['repo', 'rev'] def __repr__(self): return "".format( self.name, self.attributes['repo'], self.attributes['rev'], ) @cached_property def _expanded_rev(self): git_cmdline = ["rev-parse", self.attributes['rev']] return self.run_git( git_cmdline, self._repo_dir, ) @cached_property def _repo_dir(self): if "://" in self.attributes['repo']: repo_dir, remove_dir = self.clone_to_dir( self.attributes['repo'], self.attributes['rev'], ) if remove_dir is not None: io.debug(_("registering {} for deletion on exit").format(remove_dir)) at_exit(rmtree, remove_dir, ignore_errors=True) else: repo_dir = get_local_repo_path(self.node.repo.path, self.attributes['repo']) return repo_dir def cdict(self): return {'rev': self._expanded_rev} def get_auto_deps(self, items): deps = set() for item in items: if item == self: continue if (( item.ITEM_TYPE_NAME == "file" and is_subdirectory(item.name, self.name) ) or ( item.ITEM_TYPE_NAME in ("file", "symlink") and item.name == self.name )): raise BundleError(_( "{item1} (from bundle '{bundle1}') blocking path to " "{item2} (from bundle '{bundle2}')" ).format( item1=item.id, bundle1=item.bundle.name, item2=self.id, bundle2=self.bundle.name, )) if ( item.ITEM_TYPE_NAME == "directory" and item.name == self.name ): if item.attributes['purge']: raise BundleError(_( "cannot git_deploy into purged directory {}" ).format(item.name)) else: deps.add(item.id) return deps def fix(self, status): archive_local = NamedTemporaryFile(delete=False) try: archive_local.close() self.run_git( ["archive", "-o", archive_local.name, self._expanded_rev], self._repo_dir, ) temp_filename = ".bundlewrap_tmp_git_deploy_" + randstr() try: self.node.upload( archive_local.name, temp_filename, ) self.run("find {} -mindepth 1 -delete".format(quote(self.name))) self.run("tar -xf {} -C {}".format(temp_filename, quote(self.name))) if self.attributes['use_xattrs']: self.run("attr -q -s bw_git_deploy_rev -V {} {}".format( self._expanded_rev, quote(self.name), )) else: self.run("echo {} > {}".format( self._expanded_rev, quote(join(self.name, REMOTE_STATE_FILENAME)), )) self.run("chmod 400 {}".format( quote(join(self.name, REMOTE_STATE_FILENAME)), )) finally: self.run("rm -f {}".format(temp_filename)) finally: remove(archive_local.name) def sdict(self): if self.attributes['use_xattrs']: status_result = self.run( "attr -q -g bw_git_deploy_rev {}".format(quote(self.name)), may_fail=True, ) else: status_result = self.run( "cat {}".format(quote(join(self.name, REMOTE_STATE_FILENAME))), may_fail=True, ) if status_result.return_code != 0: return None else: return {'rev': status_result.stdout.decode('utf-8').strip()} def run_git(self, cmdline, repo_dir): """ Runs the given git command line in the given directory. Returns stdout of the command. """ git_env = environ.copy() git_env['GIT_TERMINAL_PROMPT'] = '0' cmdline = ["git"] + cmdline io.debug(_("running '{}' in {}").format( " ".join(cmdline), repo_dir, )) git_process = Popen( cmdline, cwd=repo_dir, env=git_env, preexec_fn=setpgrp, stderr=PIPE, stdout=PIPE, ) stdout, stderr = git_process.communicate() result = RunResult() result.stdout = stdout result.stderr = stderr result.return_code = git_process.returncode self._command_results.append({ 'command': " ".join(cmdline), 'result': result, }) if result.return_code != 0: raise RuntimeError(_( "`git {command}` failed in {dir} for {item} from bundle {bundle}:\n" "{stdout}\n{stderr}" ).format( command=" ".join(cmdline[1:]), dir=repo_dir, item=self.id, bundle=self.bundle.name, stdout=result.stdout.decode('utf-8'), stderr=result.stderr.decode('utf-8'), )) return stdout.decode('utf-8').strip() def clone_to_dir(self, remote_url, rev): """ Clones the given URL to a temporary directory, using a shallow clone if the given revision is definitely not a commit hash. Clones to the base directory $BW_GIT_DEPLOY_CACHE if set. Returns the path to the repo directory and to another directory to be deleted when the process exits (may be None). """ repo_dir_hashed = md5(remote_url.encode('UTF-8')).hexdigest() cache_dir_env = getenv("BW_GIT_DEPLOY_CACHE") if cache_dir_env: # Do not remove this, because it was not created by us. remove_dir = None repo_dir = join(cache_dir_env, repo_dir_hashed) lock_dir = join(cache_dir_env, repo_dir_hashed + ".bw_lock") else: remove_dir = join(gettempdir(), "bw-git-cache-{}".format(getpid())) repo_dir = join(remove_dir, repo_dir_hashed) lock_dir = join(remove_dir, repo_dir_hashed + ".bw_lock") makedirs(repo_dir, exist_ok=True) io.debug(_("{pid}: lock_dir {lock_dir}").format(lock_dir=lock_dir, pid=getpid())) io.debug(_("{pid}: remove_dir {remove_dir}").format(remove_dir=remove_dir, pid=getpid())) io.debug(_("{pid}: repo_dir {repo_dir}").format(repo_dir=repo_dir, pid=getpid())) if is_ref(rev) and not remote_url.startswith('http'): git_cmdline = ["clone", "--bare", "--depth", "1", "--no-single-branch", remote_url, "."] else: git_cmdline = ["clone", "--bare", remote_url, "."] # Use a lock directory to cooperate with other running instances # of bw (in cases where $BW_GIT_DEPLOY_CACHE is used). while True: try: mkdir(lock_dir) io.debug(_("{pid}: Have lock on {lock_dir}").format( lock_dir=lock_dir, pid=getpid(), )) break except FileExistsError: io.debug(_("{pid}: Waiting for lock on {lock_dir} ...").format( lock_dir=lock_dir, pid=getpid(), )) sleep(1) try: # We now have a lock, but another process may have cloned # the repo in the meantime. (It is vital to use a git command # here which does not traverse to parent directories.) try: self.run_git( ["rev-parse", "--resolve-git-dir", "."], repo_dir, ) io.debug(_("{pid}: Repo already existed in {repo_dir}").format( repo_dir=repo_dir, pid=getpid(), )) except RuntimeError: self.run_git(git_cmdline, repo_dir) io.debug(_("{pid}: Cloned repo to {repo_dir}").format( repo_dir=repo_dir, pid=getpid(), )) finally: rmdir(lock_dir) io.debug(_("{pid}: Released lock on {lock_dir}").format( lock_dir=lock_dir, pid=getpid(), )) return repo_dir, remove_dir # FIXME get_auto_deps for dir and ensure dir does not use purge bundlewrap-4.13.6/bundlewrap/items/groups.py000066400000000000000000000077611417502274300211450ustar00rootroot00000000000000from bundlewrap.exceptions import BundleError from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item from bundlewrap.items.users import _USERNAME_VALID_CHARACTERS from bundlewrap.utils.text import mark_for_translation as _ def _parse_group_line(line): """ Parses a line from /etc/group and returns the information as a dictionary. """ result = dict(zip( ('groupname', 'password', 'gid', 'members'), line.strip().split(":"), )) result['gid'] = result['gid'] del result['password'] # nothing useful here return result class Group(Item): """ A group. """ BUNDLE_ATTRIBUTE_NAME = "groups" ITEM_ATTRIBUTES = { 'delete': False, 'gid': None, } ITEM_TYPE_NAME = "group" REQUIRED_ATTRIBUTES = [] @classmethod def block_concurrent(cls, node_os, node_os_version): # https://github.com/bundlewrap/bundlewrap/issues/367 if node_os in ('freebsd', 'openbsd'): return [cls.ITEM_TYPE_NAME] else: return [] def __repr__(self): return "".format(self.name) def cdict(self): if self.attributes['delete']: return None cdict = {} if self.attributes.get('gid') is not None: cdict['gid'] = self.attributes['gid'] return cdict def fix(self, status): if self.node.os == 'freebsd': command = "pw " else: command = "" if status.must_be_deleted: command += f"groupdel {self.name}" else: command += "groupadd " if status.must_be_created else "groupmod " if self.attributes['gid'] is not None: command += "-g {} ".format(self.attributes['gid']) if self.node.os == 'freebsd': # FreeBSD expects to be the first argument to # `pw groupadd/mod`, however we can also pass it using -n # instead. Then it is positionally independent. command += "-n " command += f"{self.name}" self.run(command, may_fail=True) def sdict(self): # verify content of /etc/group grep_result = self.run( "grep -e '^{}:' /etc/group".format(self.name), may_fail=True, ) if grep_result.return_code != 0: return None else: return _parse_group_line(grep_result.stdout_text) def patch_attributes(self, attributes): if isinstance(attributes.get('gid'), int): attributes['gid'] = str(attributes['gid']) return attributes @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('delete', False): for attr in attributes.keys(): if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): raise BundleError(_( "{item} from bundle '{bundle}' cannot have other " "attributes besides 'delete'" ).format(item=item_id, bundle=bundle.name)) @classmethod def validate_name(cls, bundle, name): for char in name: if char not in _USERNAME_VALID_CHARACTERS: raise BundleError(_( "Invalid character in group name '{name}': {char} (bundle '{bundle}')" ).format( char=char, bundle=bundle.name, name=name, )) if name.endswith("_") or name.endswith("-"): raise BundleError(_( "Group name '{name}' must not end in dash or underscore (bundle '{bundle}')" ).format( bundle=bundle.name, name=name, )) if len(name) > 30: raise BundleError(_( "Group name '{name}' is longer than 30 characters (bundle '{bundle}')" ).format( bundle=bundle.name, name=name, )) bundlewrap-4.13.6/bundlewrap/items/kubernetes.py000066400000000000000000000362011417502274300217640ustar00rootroot00000000000000from abc import ABCMeta import json from os.path import exists, join import re from bundlewrap.exceptions import BundleError from bundlewrap.metadata import metadata_to_json from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item from bundlewrap.items.files import content_processor_jinja2, content_processor_mako from bundlewrap.utils.dicts import merge_dict, reduce_dict from bundlewrap.utils.ui import io from bundlewrap.utils.text import force_text, mark_for_translation as _ import yaml def log_error(run_result): if run_result.return_code != 0: io.debug(run_result.stdout.decode('utf-8')) io.debug(run_result.stderr.decode('utf-8')) class KubernetesItem(Item, metaclass=ABCMeta): """ A generic Kubernetes item. """ ITEM_ATTRIBUTES = { 'delete': False, 'encoding': "utf-8", # required by content processors 'manifest': None, 'manifest_file': None, 'manifest_processor': None, 'context': None, } KIND = None NAME_REGEX = r"^[a-z0-9-\.]{1,253}/[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def __init__(self, *args, **kwargs): super(KubernetesItem, self).__init__(*args, **kwargs) self.item_data_dir = join(self.bundle.bundle_data_dir, "manifests") self.item_dir = join(self.bundle.bundle_dir, "manifests") @property def _template_content(self): # required by content processors filename = join(self.item_data_dir, self.attributes['manifest_file']) if not exists(filename): filename = join(self.item_dir, self.attributes['manifest_file']) with open(filename, 'rb') as f: return force_text(f.read()) def cdict(self): if self.attributes['delete']: return None else: return {'manifest': json.dumps( self.nuke_k8s_status(json.loads(self.manifest)), indent=4, sort_keys=True, )} def fix(self, status): if status.must_be_deleted: result = self.run_local(self._kubectl + ["delete", self.KIND, self.resource_name]) log_error(result) else: result = self.run_local( self._kubectl + ["apply", "-f", "-"], data_stdin=self.manifest.encode('utf-8'), ) log_error(result) def get_auto_deps(self, items, _secrets=True): deps = [] for item in items: if ( item.ITEM_TYPE_NAME == 'k8s_namespace' and item.name == self.namespace ): if item.attributes['delete'] and not self.attributes['delete']: raise BundleError(_( "{item} (bundle '{bundle}' on {node}) " "cannot exist in namespace marked for deletion" ).format( item=self.id, bundle=self.bundle.name, node=self.node.name, )) deps.append(item.id) elif ( _secrets and item.ITEM_TYPE_NAME == 'k8s_secret' and item.namespace == self.namespace ): deps.append(item.id) return deps @property def _kubectl(self): cmdline = [ "kubectl", "--context={}".format(self.node.kubectl_context), ] if self.namespace: cmdline.append("--namespace={}".format(self.namespace)) return cmdline @property def _manifest_dict(self): if self.attributes['manifest_processor'] == 'jinja2': content_processor = content_processor_jinja2 elif self.attributes['manifest_processor'] == 'mako': content_processor = content_processor_mako else: content_processor = lambda item: item._template_content.encode('utf-8') if self.attributes['manifest'] is not None or self.attributes['manifest_file'] is None: user_manifest = self.attributes['manifest'] or {} elif ( self.attributes['manifest_file'].endswith(".yaml") or self.attributes['manifest_file'].endswith(".yml") ): user_manifest = yaml.load(content_processor(self), Loader=yaml.SafeLoader) elif self.attributes['manifest_file'].endswith(".json"): user_manifest = json.loads(content_processor(self)) merged_manifest = merge_dict( { 'kind': self.KIND, 'metadata': { 'name': self.name.split("/")[-1], }, }, user_manifest, ) if merged_manifest.get('apiVersion') is None: raise BundleError(_( "{item} from bundle '{bundle}' needs an apiVersion in its manifest" ).format(item=self.id, bundle=self.bundle.name)) return merged_manifest @property def manifest(self): return metadata_to_json(self._manifest_dict) @property def namespace(self): return self.name.split("/", 1)[0] or None def nuke_k8s_status(self, manifest): if 'status' in manifest: del manifest['status'] return manifest def patch_attributes(self, attributes): if 'context' not in attributes: attributes['context'] = {} return attributes def preview(self): if self.attributes['delete'] is True: raise ValueError return yaml.dump(json.loads(self.manifest), default_flow_style=False) @property def resource_name(self): return self._manifest_dict['metadata']['name'] def sdict(self): # Include apiVersion in object name to stop k8s from chosing an # apiVersion randomly. version_spec = [self.KIND] if '/' in self._manifest_dict['apiVersion']: group, version = self._manifest_dict['apiVersion'].split('/') version_spec.append(version) version_spec.append(group) else: version_spec.append(self._manifest_dict['apiVersion']) # Yes, it has to be something like: # kubectl ... get -o json Secret.v1./token version_spec.append('') request_name = '{}/{}'.format('.'.join(version_spec), self.resource_name) result = self.run_local(self._kubectl + ["get", "-o", "json", request_name]) if result.return_code == 0: full_json_response = json.loads(result.stdout.decode('utf-8')) if full_json_response.get("status", {}).get("phase") == "Terminating": # this resource is currently being deleted, consider it gone return None return {'manifest': json.dumps(reduce_dict( full_json_response, self.nuke_k8s_status(json.loads(self.manifest)), ), indent=4, sort_keys=True)} elif result.return_code == 1 and "NotFound" in result.stderr.decode('utf-8'): return None else: io.debug(result.stdout.decode('utf-8')) io.debug(result.stderr.decode('utf-8')) raise RuntimeError(_("error getting state of {}, check `bw --debug`".format(self.id))) @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('delete', False): for attr in attributes.keys(): if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): raise BundleError(_( "{item} from bundle '{bundle}' cannot have other " "attributes besides 'delete'" ).format(item=item_id, bundle=bundle.name)) if attributes.get('manifest') and attributes.get('manifest_file'): raise BundleError(_( "{item} from bundle '{bundle}' cannot have both 'manifest' and 'manifest_file'" ).format(item=item_id, bundle=bundle.name)) if attributes.get('manifest_processor') not in (None, 'jinja2', 'mako'): raise BundleError(_( "{item} from bundle '{bundle}' has invalid manifest_processor " "(must be 'jinja2' or 'mako')" ).format(item=item_id, bundle=bundle.name)) @classmethod def validate_name(cls, bundle, name): if not cls.NAME_REGEX_COMPILED.match(name): raise BundleError(_( "name for {item_type}:{name} (bundle '{bundle}') " "on {node} doesn't match {regex}" ).format( item_type=cls.ITEM_TYPE_NAME, name=name, bundle=bundle.name, node=bundle.node.name, regex=cls.NAME_REGEX, )) class KubernetesRawItem(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_raw" ITEM_TYPE_NAME = "k8s_raw" NAME_REGEX = r"^([a-z0-9-\.]{1,253})?/[a-zA-Z0-9-\.]{1,253}/[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def _check_bundle_collisions(self, items): super(KubernetesRawItem, self)._check_bundle_collisions(items) for item in items: if item == self or not isinstance(item, KubernetesItem): continue if item.KIND == self.KIND and item.resource_name == self.resource_name: raise BundleError(_( "duplicate definition of {item} (from bundle {bundle}) " "as {item2} (from bundle {bundle2}) on {node}" ).format( item=self.id, bundle=self.bundle.name, item2=item.id, bundle2=item.bundle.name, node=self.node.name, )) def get_auto_deps(self, items): deps = super(KubernetesRawItem, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME == 'k8s_crd' and item._manifest_dict.get('spec', {}).get('names', {}).get('kind') == self.KIND ): deps.append(item.id) return deps @property def KIND(self): return self.name.split("/", 2)[1] class KubernetesClusterRole(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_clusterroles" KIND = "ClusterRole" ITEM_TYPE_NAME = "k8s_clusterrole" NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) @property def namespace(self): return None class KubernetesClusterRoleBinding(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_clusterrolebindings" KIND = "ClusterRoleBinding" ITEM_TYPE_NAME = "k8s_clusterrolebinding" NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def get_auto_deps(self, items): deps = super(KubernetesClusterRoleBinding, self).get_auto_deps(items) deps.append("k8s_clusterrole:") return deps @property def namespace(self): return None class KubernetesConfigMap(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_configmaps" KIND = "ConfigMap" ITEM_TYPE_NAME = "k8s_configmap" class KubernetesCronJob(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_cronjobs" KIND = "CronJob" ITEM_TYPE_NAME = "k8s_cronjob" class KubernetesCustomResourceDefinition(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_crd" KIND = "CustomResourceDefinition" ITEM_TYPE_NAME = "k8s_crd" NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def get_auto_deps(self, items): return [] @property def namespace(self): return None class KubernetesDaemonSet(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_daemonsets" KIND = "DaemonSet" ITEM_TYPE_NAME = "k8s_daemonset" def get_auto_deps(self, items): deps = super(KubernetesDaemonSet, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME in ('k8s_pvc', 'k8s_configmap') and item.namespace == self.namespace ): deps.append(item.id) return deps class KubernetesDeployment(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_deployments" KIND = "Deployment" ITEM_TYPE_NAME = "k8s_deployment" def get_auto_deps(self, items): deps = super(KubernetesDeployment, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME in ('k8s_pvc', 'k8s_configmap') and item.namespace == self.namespace ): deps.append(item.id) return deps class KubernetesIngress(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_ingresses" KIND = "Ingress" ITEM_TYPE_NAME = "k8s_ingress" def get_auto_deps(self, items): deps = super(KubernetesIngress, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME == 'k8s_service' and item.namespace == self.namespace ): deps.append(item.id) return deps class KubernetesNamespace(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_namespaces" KIND = "Namespace" ITEM_TYPE_NAME = "k8s_namespace" NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) def get_auto_deps(self, items): return [] class KubernetesNetworkPolicy(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_networkpolicies" KIND = "NetworkPolicy" ITEM_TYPE_NAME = "k8s_networkpolicy" NAME_REGEX = r"^([a-z0-9-\.]{1,253})?/[a-z0-9-\.]{1,253}$" NAME_REGEX_COMPILED = re.compile(NAME_REGEX) class KubernetesPersistentVolumeClain(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_pvc" KIND = "PersistentVolumeClaim" ITEM_TYPE_NAME = "k8s_pvc" class KubernetesRole(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_roles" KIND = "Role" ITEM_TYPE_NAME = "k8s_role" class KubernetesRoleBinding(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_rolebindings" KIND = "RoleBinding" ITEM_TYPE_NAME = "k8s_rolebinding" def get_auto_deps(self, items): deps = super(KubernetesRoleBinding, self).get_auto_deps(items) deps.append("k8s_role:") return deps class KubernetesSecret(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_secrets" KIND = "Secret" ITEM_TYPE_NAME = "k8s_secret" def get_auto_deps(self, items): return super(KubernetesSecret, self).get_auto_deps(items, _secrets=False) class KubernetesService(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_services" KIND = "Service" ITEM_TYPE_NAME = "k8s_service" class KubernetesServiceAccount(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_serviceaccounts" KIND = "ServiceAccount" ITEM_TYPE_NAME = "k8s_serviceaccount" class KubernetesStatefulSet(KubernetesItem): BUNDLE_ATTRIBUTE_NAME = "k8s_statefulsets" KIND = "StatefulSet" ITEM_TYPE_NAME = "k8s_statefulset" def get_auto_deps(self, items): deps = super(KubernetesStatefulSet, self).get_auto_deps(items) for item in items: if ( item.ITEM_TYPE_NAME in ('k8s_pvc', 'k8s_configmap') and item.namespace == self.namespace ): deps.append(item.id) return deps bundlewrap-4.13.6/bundlewrap/items/pkg.py000066400000000000000000000044041417502274300203760ustar00rootroot00000000000000from abc import ABCMeta, abstractmethod from contextlib import suppress from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ class Pkg(Item, metaclass=ABCMeta): """ A generic package. """ ITEM_ATTRIBUTES = { 'installed': True, } _pkg_install_cache = {} @classmethod def block_concurrent(cls, node_os, node_os_version): return [cls.ITEM_TYPE_NAME] def __repr__(self): return "<{} name:{} installed:{}>".format( self.ITEM_TYPE_NAME, self.name, self.attributes['installed'], ) def fix(self, status): with suppress(KeyError): self._pkg_install_cache.get(self.node.name, set()).remove(self.id) if self.attributes['installed'] is False: self.pkg_remove() else: self.pkg_install() @abstractmethod def pkg_all_installed(self): raise NotImplementedError @abstractmethod def pkg_install(self): raise NotImplementedError @abstractmethod def pkg_installed(self): raise NotImplementedError def pkg_installed_cached(self): cache = self._pkg_install_cache.setdefault(self.node.name, set()) if not cache: cache.add(None) # make sure we don't run into this if again for pkgid in self.pkg_all_installed(): cache.add(pkgid) if self.pkg_in_cache(self.id, cache): return True return self.pkg_installed() @staticmethod def pkg_in_cache(pkgid, cache): """ pkg_apt needs to override this for multiarch support. """ return pkgid in cache @abstractmethod def pkg_remove(self): raise NotImplementedError def sdict(self): return { 'installed': self.pkg_installed_cached(), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('installed', True), bool): raise BundleError(_( "expected boolean for 'installed' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/pkg_apk.py000066400000000000000000000015411417502274300212300ustar00rootroot00000000000000from shlex import quote from bundlewrap.items.pkg import Pkg class ApkPkg(Pkg): """ A package installed by apk. """ BUNDLE_ATTRIBUTE_NAME = "pkg_apk" ITEM_TYPE_NAME = "pkg_apk" @property def quoted(self): return quote(self.name) def pkg_all_installed(self): pkgs = self.run("apk list --installed").stdout.decode("utf-8") for line in pkgs.splitlines(): pkg_name = line.split()[0] yield f"{self.ITEM_TYPE_NAME}:{pkg_name}" def pkg_install(self): self.run(f"apk add {self.quoted}", may_fail=True) def pkg_installed(self): result = self.run(f"apk info --installed {self.quoted}", may_fail=True) return result.return_code == 0 and self.quoted in result.stdout_text def pkg_remove(self): self.run(f"apk del {self.quoted}", may_fail=True) bundlewrap-4.13.6/bundlewrap/items/pkg_apt.py000066400000000000000000000045151417502274300212450ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items.pkg import Pkg from bundlewrap.utils.text import mark_for_translation as _ class AptPkg(Pkg): """ A package installed by apt. """ BUNDLE_ATTRIBUTE_NAME = "pkg_apt" ITEM_TYPE_NAME = "pkg_apt" WHEN_CREATING_ATTRIBUTES = { 'start_service': True, } def pkg_all_installed(self): result = self.run("dpkg -l | grep '^ii'") for line in result.stdout.decode('utf-8').strip().split("\n"): pkg_name = line[4:].split()[0].replace(":", "_") yield "{}:{}".format(self.ITEM_TYPE_NAME, pkg_name) def pkg_install(self): runlevel = "" if self.when_creating['start_service'] else "RUNLEVEL=1 " self.run( runlevel + "DEBIAN_FRONTEND=noninteractive " "apt-get -qy -o Dpkg::Options::=--force-confold --no-install-recommends " "install {}".format(quote(self.name.replace("_", ":"))), may_fail=True, ) def pkg_installed(self): result = self.run( "dpkg -s {} | grep '^Status: '".format(quote(self.name.replace("_", ":"))), may_fail=True, ) return result.return_code == 0 and " installed" in result.stdout_text @staticmethod def pkg_in_cache(pkgid, cache): pkgtype, pkgname = pkgid.split(":") if "_" in pkgname: return pkgid in cache else: for cached_pkgid in cache: if cached_pkgid is None: continue if cached_pkgid == pkgid or cached_pkgid.startswith(pkgid + ":"): return True return False def pkg_remove(self): self.run( "DEBIAN_FRONTEND=noninteractive " "apt-get -qy purge {}".format(quote(self.name.replace("_", ":"))) ) @classmethod def validate_attributes(cls, bundle, item_id, attributes): super(AptPkg, cls).validate_attributes(bundle, item_id, attributes) if not isinstance(attributes.get('when_creating', {}).get('start_service', True), bool): raise BundleError(_( "expected boolean for 'start_service' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/pkg_dnf.py000066400000000000000000000017341417502274300212300ustar00rootroot00000000000000from shlex import quote from bundlewrap.items.pkg import Pkg class DnfPkg(Pkg): """ A package installed by dnf. """ BUNDLE_ATTRIBUTE_NAME = "pkg_dnf" ITEM_TYPE_NAME = "pkg_dnf" @classmethod def block_concurrent(cls, node_os, node_os_version): return ["pkg_dnf", "pkg_yum"] def pkg_all_installed(self): result = self.run("dnf -d0 -e0 list installed") for line in result.stdout.decode('utf-8').strip().split("\n"): yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0]) def pkg_install(self): self.run("dnf -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.run( "dnf -d0 -e0 list installed {}".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.run("dnf -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) bundlewrap-4.13.6/bundlewrap/items/pkg_freebsd.py000066400000000000000000000055001417502274300220660ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def parse_pkg_name(pkgname, line): # Contains the assumption that version may not contain '-', which is covered # according to the FreeBSD docs (Section 5.2.4, "PKGNAMEPREFIX and PKGNAMESUFFIX") installed_package, _sep, installed_version = line.rpartition('-') assert installed_package != "", _( "Unexpected FreeBSD package name: {line}").format(line=line) return installed_package == pkgname, installed_version def pkg_install(node, pkgname, version): # Setting version to None means "don't specify version". if version is None: full_name = pkgname else: full_name = pkgname + "-" + version return node.run("pkg install -y {}".format(full_name), may_fail=True) def pkg_installed(node, pkgname): result = node.run( "pkg info | cut -f 1 -d ' '", may_fail=True, ) for line in result.stdout.decode('utf-8').strip().splitlines(): found, installed_version = parse_pkg_name(pkgname, line) if found: return installed_version return False def pkg_remove(node, pkgname): return node.run("pkg delete -y -R {}".format(quote(pkgname)), may_fail=True) class FreeBSDPkg(Item): """ A package installed via pkg install/pkg delete. """ BUNDLE_ATTRIBUTE_NAME = "pkg_freebsd" ITEM_ATTRIBUTES = { 'installed': True, 'version': None, } ITEM_TYPE_NAME = "pkg_freebsd" def __repr__(self): return "".format( self.name, self.attributes['installed'], ) @classmethod def block_concurrent(cls, node_os, node_os_version): return [cls.ITEM_TYPE_NAME] def cdict(self): cdict = self.attributes.copy() if cdict['version'] is None or not cdict['installed']: del cdict['version'] return cdict def fix(self, status): if self.attributes['installed'] is False: pkg_remove(self.node, self.name) else: pkg_install( self.node, self.name, self.attributes['version'] ) def sdict(self): version = pkg_installed(self.node, self.name) return { 'installed': bool(version), 'version': version if version else _("none"), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('installed', True), bool): raise BundleError(_( "expected boolean for 'installed' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/pkg_openbsd.py000066400000000000000000000105641417502274300221140ustar00rootroot00000000000000from shlex import quote import re from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ PKGSPEC_REGEX = re.compile(r"^(.+)-(\d.*)$") def parse_pkg_name(pkgname, line): matches = PKGSPEC_REGEX.match(line) assert matches is not None, _("Unexpected OpenBSD package name: {line}").format(line=line) installed_package, installed_version_and_more = matches.groups() assert not installed_version_and_more.endswith("-"), \ _("Unexpected OpenBSD package name (ends in dash): {line}").format(line=line) if installed_package == pkgname: if "-" in installed_version_and_more: tokens = installed_version_and_more.split("-") installed_version = tokens[0] installed_flavor = "-".join(tokens[1:]) else: installed_version = installed_version_and_more installed_flavor = "" return True, installed_version, installed_flavor else: return False, None, None def pkg_install(node, pkgname, flavor, version): # Setting either flavor or version to None means "don't specify this # component". Setting flavor to the empty string means choosing the # "normal" flavor. # # flavor = "", version = None: "pkgname--" # flavor = "foo", version = None: "pkgname--foo" # flavor = None, version = None: "pkgname" (a) # flavor = "", version = "1.0": "pkgname-1.0" (b) # flavor = "foo", version = "1.0": "pkgname-1.0-foo" # flavor = None, version = "1.0": "pkgname-1.0" # flavor = None, version = "-foo": "pkgname--foo" (backwards compat) if flavor is None and version is None: # Case "(a)" full_name = pkgname elif flavor == "" and version is not None: # Case "(b)" full_name = "{}-{}".format(pkgname, version) else: version_part = "-" if version is None else "-{}".format(version) flavor_part = "" if flavor is None else "-{}".format(flavor) full_name = "{}{}{}".format(pkgname, version_part, flavor_part) return node.run("pkg_add -r -I {}".format(full_name), may_fail=True) def pkg_installed(node, pkgname): result = node.run( "pkg_info | cut -f 1 -d ' '", may_fail=True, ) for line in result.stdout.decode('utf-8').strip().splitlines(): found, installed_version, installed_flavor = parse_pkg_name(pkgname, line) if found: return installed_version, installed_flavor return False, None def pkg_remove(node, pkgname): return node.run("pkg_delete -I -D dependencies {}".format(quote(pkgname)), may_fail=True) class OpenBSDPkg(Item): """ A package installed by pkg_add/pkg_delete. """ BUNDLE_ATTRIBUTE_NAME = "pkg_openbsd" ITEM_ATTRIBUTES = { 'installed': True, 'flavor': "", 'version': None, } ITEM_TYPE_NAME = "pkg_openbsd" def __repr__(self): return "".format( self.name, self.attributes['installed'], ) @classmethod def block_concurrent(cls, node_os, node_os_version): return [cls.ITEM_TYPE_NAME] def cdict(self): cdict = self.attributes.copy() if not cdict['installed']: del cdict['flavor'] if cdict['version'] is None or not cdict['installed']: del cdict['version'] return cdict def fix(self, status): if self.attributes['installed'] is False: pkg_remove(self.node, self.name) else: pkg_install( self.node, self.name, self.attributes['flavor'], self.attributes['version'] ) def sdict(self): version, flavor = pkg_installed(self.node, self.name) return { 'installed': bool(version), 'flavor': flavor if flavor is not None else _("none"), 'version': version if version else _("none"), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('installed', True), bool): raise BundleError(_( "expected boolean for 'installed' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/pkg_opkg.py000066400000000000000000000015611417502274300214170ustar00rootroot00000000000000from shlex import quote from bundlewrap.items.pkg import Pkg class OpkgPkg(Pkg): """ A package installed by opkg. """ BUNDLE_ATTRIBUTE_NAME = "pkg_opkg" ITEM_TYPE_NAME = "pkg_opkg" def pkg_all_installed(self): result = self.run("opkg list-installed") for line in result.stdout.decode('utf-8').strip().split("\n"): if line: yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0]) def pkg_install(self): self.run("opkg install {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.run( "opkg status {} | grep ^Status: | grep installed".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.run("opkg remove {}".format(quote(self.name)), may_fail=True) bundlewrap-4.13.6/bundlewrap/items/pkg_pacman.py000066400000000000000000000031241417502274300217130ustar00rootroot00000000000000from os.path import basename, join from shlex import quote from bundlewrap.items.pkg import Pkg class PacmanPkg(Pkg): """ A package installed by pacman. """ BUNDLE_ATTRIBUTE_NAME = "pkg_pacman" ITEM_ATTRIBUTES = { 'installed': True, 'tarball': None, } ITEM_TYPE_NAME = "pkg_pacman" def cdict(self): # TODO/FIXME: this is bad because it ignores tarball # (However, that's not part of the node's state, so bw won't # "fix" it anyway, so ... I guess we can live with that.) return {'installed': self.attributes['installed']} def pkg_all_installed(self): pkgs = self.run("pacman -Qq").stdout.decode('utf-8') for line in pkgs.splitlines(): yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()) def pkg_install(self): if self.attributes['tarball']: local_file = join(self.item_dir, self.attributes['tarball']) remote_file = "/tmp/{}".format(basename(local_file)) self.node.upload(local_file, remote_file) self.run("pacman --noconfirm -U {}".format(quote(remote_file)), may_fail=True) self.run("rm -- {}".format(quote(remote_file))) else: self.run("pacman --noconfirm -S {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.run( "pacman -Q {}".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.run("pacman --noconfirm -Rs {}".format(quote(self.name)), may_fail=True) bundlewrap-4.13.6/bundlewrap/items/pkg_pamac.py000066400000000000000000000040161417502274300215360ustar00rootroot00000000000000from shlex import quote from bundlewrap.items.pkg import Pkg from bundlewrap.exceptions import BundleError from bundlewrap.utils.text import mark_for_translation as _ class PamacPkg(Pkg): """ A package installed by pamac/pacman. """ BUNDLE_ATTRIBUTE_NAME = "pkg_pamac" ITEM_ATTRIBUTES = { 'installed': True, } WHEN_CREATING_ATTRIBUTES = { 'aur': False, } ITEM_TYPE_NAME = "pkg_pamac" @classmethod def block_concurrent(cls, node_os, node_os_version): return ["pkg_pacman", "pkg_pamac"] def cdict(self): return {'installed': self.attributes['installed']} def pkg_all_installed(self): pkgs = self.run("pacman -Qq").stdout.decode('utf-8') for line in pkgs.splitlines(): yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()) def pkg_install(self): if self.when_creating['aur']: self.run("pamac build --no-keep --no-confirm {}".format(quote(self.name)), may_fail=True) else: self.run("pamac install --no-upgrade --no-confirm {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.run( "pacman -Q {}".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.run("pamac remove --no-confirm --unneeded --orphans {}".format(quote(self.name)), may_fail=True) def get_auto_deps(self, items): deps = [] for item in items: if item == self: continue if item.ITEM_TYPE_NAME in ("pkg_pacman") and item.name == self.name: raise BundleError(_( "{item} is declared both by pkg_pacman (in bundle {bundle_pacman}) " "and pkg_pamac (in bundle {bundle_pamac})" ).format( item=item.name, bundle_pacman=item.bundle.name, bundle_pamac=self.bundle.name, )) return deps bundlewrap-4.13.6/bundlewrap/items/pkg_pip.py000066400000000000000000000100241417502274300212410ustar00rootroot00000000000000from os.path import join, split from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ class PipPkg(Item): """ A package installed by pip. """ BUNDLE_ATTRIBUTE_NAME = "pkg_pip" ITEM_ATTRIBUTES = { 'installed': True, 'version': None, } ITEM_TYPE_NAME = "pkg_pip" @classmethod def block_concurrent(cls, node_os, node_os_version): return [cls.ITEM_TYPE_NAME] def __repr__(self): return "".format( self.name, self.attributes['installed'], ) def cdict(self): cdict = {'installed': self.attributes['installed']} if self.attributes.get('version') is not None: cdict['version'] = self.attributes['version'] return cdict def get_auto_deps(self, items): for item in items: if item == self: continue if ( item.ITEM_TYPE_NAME == self.ITEM_TYPE_NAME and item.name.lower() == self.name.lower() ): raise BundleError(_( "{item1} (from bundle '{bundle1}') has name collision with " "{item2} (from bundle '{bundle2}')" ).format( item1=item.id, bundle1=item.bundle.name, item2=self.id, bundle2=self.bundle.name, )) return [] def fix(self, status): if self.attributes['installed'] is False: self._pkg_remove(self.name) else: self._pkg_install(self.name, version=self.attributes['version']) def sdict(self): install_status = self._pkg_installed(self.name) return { 'installed': bool(install_status), 'version': None if install_status is False else install_status, } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('installed', True), bool): raise BundleError(_( "expected boolean for 'installed' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) if 'version' in attributes and attributes.get('installed', True) is False: raise BundleError(_( "cannot set version for uninstalled package on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) @classmethod def validate_name(cls, bundle, name): if "_" in split(name)[1]: raise BundleError( f"Underscores are not allowed in pkg_pip names " f"because pip will convert them to dashes anyway. " f"Just use dashes. (pkg_pip:{name} in bundle {bundle.name})" ) def _pkg_install(self, pkgname, version=None): if version: pkgname = "{}=={}".format(pkgname, version) pip_path, pkgname = self._split_path(pkgname) return self.run("{} install -U {}".format(quote(pip_path), quote(pkgname)), may_fail=True) def _pkg_installed(self, pkgname): pip_path, pkgname = self._split_path(pkgname) result = self.run( "{} freeze | grep -i '^{}=='".format(quote(pip_path), pkgname), may_fail=True, ) if result.return_code != 0: return False else: return result.stdout_text.split("=")[-1].strip() def _pkg_remove(self, pkgname): pip_path, pkgname = self._split_path(pkgname) return self.run( "{} uninstall -y {}".format(quote(pip_path), quote(pkgname)), may_fail=True, ) def _split_path(self, pkgname): virtualenv, pkgname = split(pkgname) pip_path = join(virtualenv, "bin", "pip") if virtualenv else self.node.pip_command return pip_path, pkgname bundlewrap-4.13.6/bundlewrap/items/pkg_snap.py000066400000000000000000000014711417502274300214200ustar00rootroot00000000000000from shlex import quote from bundlewrap.items.pkg import Pkg class SnapPkg(Pkg): """ A package installed by snap. """ BUNDLE_ATTRIBUTE_NAME = "pkg_snap" ITEM_TYPE_NAME = "pkg_snap" def pkg_all_installed(self): result = self.run("snap list") for line in result.stdout.decode('utf-8').strip().split("\n"): yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(" ")[0]) def pkg_install(self): self.run("snap install {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.run( "snap list {}".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.run("snap remove {}".format(quote(self.name)), may_fail=True) bundlewrap-4.13.6/bundlewrap/items/pkg_yum.py000066400000000000000000000017341417502274300212730ustar00rootroot00000000000000from shlex import quote from bundlewrap.items.pkg import Pkg class YumPkg(Pkg): """ A package installed by yum. """ BUNDLE_ATTRIBUTE_NAME = "pkg_yum" ITEM_TYPE_NAME = "pkg_yum" @classmethod def block_concurrent(cls, node_os, node_os_version): return ["pkg_dnf", "pkg_yum"] def pkg_all_installed(self): result = self.run("yum -d0 -e0 list installed") for line in result.stdout.decode('utf-8').strip().split("\n"): yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0]) def pkg_install(self): self.run("yum -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) def pkg_installed(self): result = self.run( "yum -d0 -e0 list installed {}".format(quote(self.name)), may_fail=True, ) return result.return_code == 0 def pkg_remove(self): self.run("yum -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) bundlewrap-4.13.6/bundlewrap/items/pkg_zypper.py000066400000000000000000000036731417502274300220160ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ ZYPPER_OPTS = "--non-interactive " + \ "--non-interactive-include-reboot-patches " + \ "--quiet" def pkg_install(node, pkgname): return node.run("zypper {} install {}".format(ZYPPER_OPTS, quote(pkgname)), may_fail=True) def pkg_installed(node, pkgname): result = node.run( "zypper search --match-exact --installed-only " "--type package {}".format(quote(pkgname)), may_fail=True, ) if result.return_code != 0: return False else: return True def pkg_remove(node, pkgname): return node.run("zypper {} remove {}".format(ZYPPER_OPTS, quote(pkgname)), may_fail=True) class ZypperPkg(Item): """ A package installed by zypper. """ BUNDLE_ATTRIBUTE_NAME = "pkg_zypper" ITEM_ATTRIBUTES = { 'installed': True, } ITEM_TYPE_NAME = "pkg_zypper" @classmethod def block_concurrent(cls, node_os, node_os_version): return [cls.ITEM_TYPE_NAME] def __repr__(self): return "".format( self.name, self.attributes['installed'], ) def fix(self, status): if self.attributes['installed'] is False: pkg_remove(self.node, self.name) else: pkg_install(self.node, self.name) def sdict(self): return { 'installed': pkg_installed(self.node, self.name), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('installed', True), bool): raise BundleError(_( "expected boolean for 'installed' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/postgres_dbs.py000066400000000000000000000062651417502274300223220ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import force_text, mark_for_translation as _ def create_db(node, name, owner, when_creating): template = None cmd = "createdb -wO {} ".format(owner) if when_creating.get('collation') is not None: cmd += "--lc-collate={} ".format(when_creating['collation']) template = "template0" if when_creating.get('ctype') is not None: cmd += "--lc-ctype={} ".format(when_creating['ctype']) template = "template0" if when_creating.get('encoding') is not None: cmd += "--encoding={} ".format(when_creating['encoding']) template = "template0" if template is not None: cmd += "--template={} ".format(template) cmd += name return node.run(cmd, user="postgres") def drop_db(node, name): return node.run("dropdb -w {}".format(quote(name)), user="postgres") def get_databases(node): output = node.run("psql -Anqt -F '|' -c '\\l' | grep '|'", user="postgres").stdout result = {} for line in force_text(output).strip().split("\n"): db, owner = line.strip().split("|", 2)[:2] result[db] = { 'owner': owner, } return result def set_owner(node, name, owner): sql = f"ALTER DATABASE \\\"{name}\\\" OWNER TO \\\"{owner}\\\"" return node.run(f"psql -nqw -c \"{sql}\"", user="postgres") class PostgresDB(Item): """ A postgres database. """ BUNDLE_ATTRIBUTE_NAME = "postgres_dbs" ITEM_ATTRIBUTES = { 'delete': False, 'owner': "postgres", } ITEM_TYPE_NAME = "postgres_db" WHEN_CREATING_ATTRIBUTES = { 'collation': None, 'ctype': None, 'encoding': None, } def __repr__(self): return "".format(self.name) def cdict(self): if self.attributes['delete']: return None else: return {'owner': self.attributes['owner']} def fix(self, status): if status.must_be_deleted: drop_db(self.node, self.name) elif status.must_be_created: create_db(self.node, self.name, self.attributes['owner'], self.when_creating) elif 'owner' in status.keys_to_fix: set_owner(self.node, self.name, self.attributes['owner']) else: raise AssertionError("this shouldn't happen") def get_auto_deps(self, items): deps = [] for item in items: if item.ITEM_TYPE_NAME == "postgres_role" and item.name == self.attributes['owner']: deps.append(item.id) return deps def sdict(self): databases = get_databases(self.node) if self.name not in databases: return None else: return {'owner': databases[self.name]['owner']} @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('delete', True), bool): raise BundleError(_( "expected boolean for 'delete' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/postgres_roles.py000066400000000000000000000074051417502274300226730ustar00rootroot00000000000000from passlib.apps import postgres_context from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import force_text, mark_for_translation as _ AUTHID_COLUMNS = { "rolcanlogin": 'can_login', "rolsuper": 'superuser', "rolpassword": 'password_hash', } def delete_role(node, role): node.run("dropuser -w {}".format(role), user="postgres") def fix_role(node, role, attrs, create=False): password = " PASSWORD '{}'".format(attrs['password_hash']) sql = "{operation} ROLE \\\"{role}\\\" WITH LOGIN {superuser}SUPERUSER{password}".format( operation="CREATE" if create else "ALTER", password="" if attrs['password_hash'] is None else password, role=role, superuser="" if attrs['superuser'] is True else "NO", ) node.run(f"psql -nqw -c \"{sql}\"", user="postgres") def get_role(node, role): sql = f"SELECT rolcanlogin, rolsuper, rolpassword from pg_authid WHERE rolname='{role}'" result = node.run(f"psql -Anqwx -F '|' -c \"{sql}\"", user="postgres") role_attrs = {} for line in force_text(result.stdout).strip().split("\n"): try: key, value = line.split("|") except ValueError: pass else: role_attrs[AUTHID_COLUMNS[key]] = value for bool_attr in ('can_login', 'superuser'): if bool_attr in role_attrs: role_attrs[bool_attr] = role_attrs[bool_attr] == "t" return role_attrs if role_attrs else None class PostgresRole(Item): """ A postgres role. """ BUNDLE_ATTRIBUTE_NAME = "postgres_roles" ITEM_ATTRIBUTES = { 'can_login': True, 'delete': False, 'password': None, 'password_hash': None, 'superuser': False, } ITEM_TYPE_NAME = "postgres_role" def __repr__(self): return "".format(self.name) def cdict(self): if self.attributes['delete']: return None cdict = self.attributes.copy() del cdict['delete'] del cdict['password'] return cdict def fix(self, status): if status.must_be_deleted: delete_role(self.node, self.name) elif status.must_be_created: fix_role(self.node, self.name, self.attributes, create=True) else: fix_role(self.node, self.name, self.attributes) def sdict(self): return get_role(self.node, self.name) def patch_attributes(self, attributes): if 'password' in attributes: attributes['password_hash'] = postgres_context.encrypt( force_text(attributes['password']), user=self.name, ) return attributes @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not attributes.get('delete', False): if attributes.get('password') is None and attributes.get('password_hash') is None: raise BundleError(_( "expected either 'password' or 'password_hash' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) if attributes.get('password') is not None and attributes.get('password_hash') is not None: raise BundleError(_( "can't define both 'password' and 'password_hash' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) if not isinstance(attributes.get('delete', True), bool): raise BundleError(_( "expected boolean for 'delete' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/routeros.py000066400000000000000000000133341417502274300215010ustar00rootroot00000000000000from contextlib import suppress from threading import Lock from bundlewrap.exceptions import BundleError from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item from bundlewrap.operations import RunResult from bundlewrap.utils.text import mark_for_translation as _ from librouteros import connect # very basic connection management, connections are never closed CONNECTIONS = {} CONNECTION_LOCK = Lock() class RouterOS(Item): """ RouterOS configuration. """ BUNDLE_ATTRIBUTE_NAME = "routeros" ITEM_ATTRIBUTES = { 'delete': False, } ITEM_TYPE_NAME = "routeros" REJECT_UNKNOWN_ATTRIBUTES = False def __repr__(self): return f"" def cdict(self): if self.attributes['delete']: return None cdict = self.attributes.copy() if '_comment' in cdict: # work around 'comment' being a builtin attribute cdict['comment'] = cdict['_comment'] del cdict['_comment'] del cdict['delete'] return cdict def fix(self, status): if status.must_be_created: self._add(self.name.split("?", 1)[0], status.cdict) elif status.must_be_deleted: self._remove(self.name.split("?", 1)[0], status.sdict['.id']) else: for key in status.keys_to_fix: self._set( self.name.split("?", 1)[0], status.sdict.get('.id'), key, status.cdict[key], ) def sdict(self): result = self._get(self.name) if result: # API doesn't return comment at all if emtpy result.setdefault('comment', '') # undo automatic type conversion in librouteros for key, value in tuple(result.items()): if value is True: result[key] = "true" elif value is False: result[key] = "false" elif isinstance(value, int): result[key] = str(value) return result def display_on_create(self, cdict): for key in tuple(cdict.keys()): if cdict[key].count(",") > 2: cdict[key] = cdict[key].split(",") return cdict def display_dicts(self, cdict, sdict, keys): for key in keys: if cdict[key].count(",") > 2 or sdict[key].count(",") > 2: cdict[key] = cdict[key].split(",") sdict[key] = sdict[key].split(",") return (cdict, sdict, keys) def display_on_delete(self, sdict): with suppress(KeyError): del sdict[".id"] for key in tuple(sdict.keys()): if sdict[key].count(",") > 2: sdict[key] = sdict[key].split(",") return sdict def patch_attributes(self, attributes): for key in tuple(attributes.keys()): if key in BUILTIN_ITEM_ATTRIBUTES: continue value = attributes[key] if value is True: attributes[key] = "true" elif value is False: attributes[key] = "false" elif isinstance(value, set): attributes[key] = ",".join(sorted(value)) elif isinstance(value, (tuple, list)): attributes[key] = ",".join(value) elif isinstance(value, int): attributes[key] = str(value) return attributes @property def _connection(self): try: connection = CONNECTIONS[self.node] except KeyError: connection = connect( # str() to resolve Faults username=str(self.node.username), password=str(self.node.password or ""), host=self.node.hostname, ) CONNECTIONS[self.node] = connection return connection def _run(self, *args): with CONNECTION_LOCK: result = tuple(self._connection.rawCmd(*args)) run_result = RunResult() run_result.stdout = repr(result) run_result.stderr = "" self._command_results.append({ 'command': repr(args), 'result': run_result, }) return result def _add(self, command, kwargs): identifier = self.name.split("?", 1)[1] for identifier_component in identifier.split("&"): identifier_key, identifier_value = identifier_component.split("=", 1) kwargs[identifier_key] = identifier_value command += "/add" arguments = [f"={key}={value}" for key, value in kwargs.items()] self._run(command, *arguments) def _get(self, command): if "?" in command: command, query = command.split("?", 1) query = query.split("&") query = ["?=" + condition for condition in query] query.append("?#&") # AND all conditions result = self._run(command + "/print", *query) else: result = self._run(command + "/print") if not result: return None elif len(result) == 1: return result[0] else: raise BundleError(_( "{item} on {node} returned ambiguous data from API: {result}" ).format( item=self.id, node=self.node.name, result=repr(result), )) def _set(self, command, api_id, key, value): command += "/set" kvstr = f"={key}={value}" if api_id is None: self._run(command, kvstr) else: self._run(command, f"=.id={api_id}", kvstr) def _remove(self, command, api_id): self._run(command + "/remove", f"=.id={api_id}") bundlewrap-4.13.6/bundlewrap/items/svc_freebsd.py000066400000000000000000000066301417502274300221050ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def svc_start(node, svcname): return node.run("/usr/sbin/service {} start".format(quote(svcname)), may_fail=True) def svc_running(node, svcname): result = node.run("/usr/sbin/service {} status".format(quote(svcname)), may_fail=True) return "is running as" in result.stdout_text def svc_stop(node, svcname): return node.run("/usr/sbin/service {} stop".format(quote(svcname)), may_fail=True) def svc_enable(node, svcname): return node.run("/usr/sbin/service {} enable".format(quote(svcname)), may_fail=True) def svc_enabled(node, svcname): result = node.run("/usr/sbin/service {} enabled".format(svcname), may_fail=True,) return result.return_code == 0 def svc_disable(node, svcname): return node.run("/usr/sbin/service {} disable".format(quote(svcname)), may_fail=True) class SvcFreeBSD(Item): """ A service managed by FreeBSD. """ BUNDLE_ATTRIBUTE_NAME = "svc_freebsd" ITEM_ATTRIBUTES = { 'running': True, 'enabled': True } ITEM_TYPE_NAME = "svc_freebsd" @classmethod def block_concurrent(cls, node_os, node_os_version): # SH: This should apply here as well # https://github.com/bundlewrap/bundlewrap/issues/554 return [cls.ITEM_TYPE_NAME] def __repr__(self): return "".format( self.name, self.attributes['running'], self.attributes['enabled'], ) def fix(self, status): if 'enabled' in status.keys_to_fix: if self.attributes['enabled'] is False: svc_disable(self.node, self.name) else: svc_enable(self.node, self.name) if self.attributes['running'] is False: svc_stop(self.node, self.name) else: svc_start(self.node, self.name) def get_canned_actions(self): return { 'stop': { 'command': "/usr/sbin/service {0} stop".format(self.name), 'needed_by': {self.id}, }, 'stopstart': { 'command': "/usr/sbin/service {0} stop && /usr/sbin/service {0} start".format(self.name), 'needs': {self.id}, }, 'restart': { 'command': "/usr/sbin/service {} restart".format(self.name), 'needs': { # make sure we don't restart and stopstart simultaneously f"{self.id}:stopstart", # with only the dep on stopstart, we might still end # up reloading if the service itself is skipped # because the stopstart action has cascade_skip False self.id, }, }, } def sdict(self): return { 'enabled': svc_enabled(self.node, self.name), 'running': svc_running(self.node, self.name), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('running', True), bool): raise BundleError(_( "expected boolean for 'running' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/svc_openbsd.py000066400000000000000000000064651417502274300221330ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def svc_start(node, svcname): return node.run("/etc/rc.d/{} start".format(quote(svcname)), may_fail=True) def svc_running(node, svcname): result = node.run("/etc/rc.d/{} check".format(quote(svcname)), may_fail=True) return "ok" in result.stdout_text def svc_stop(node, svcname): return node.run("/etc/rc.d/{} stop".format(quote(svcname)), may_fail=True) def svc_enable(node, svcname): return node.run("rcctl set {} status on".format(quote(svcname)), may_fail=True) def svc_enabled(node, svcname): result = node.run( "rcctl ls on | grep '^{}$'".format(svcname), may_fail=True, ) return result.return_code == 0 def svc_disable(node, svcname): return node.run("rcctl set {} status off".format(quote(svcname)), may_fail=True) class SvcOpenBSD(Item): """ A service managed by OpenBSD rc.d. """ BUNDLE_ATTRIBUTE_NAME = "svc_openbsd" ITEM_ATTRIBUTES = { 'running': True, 'enabled': True } ITEM_TYPE_NAME = "svc_openbsd" @classmethod def block_concurrent(cls, node_os, node_os_version): # https://github.com/bundlewrap/bundlewrap/issues/554 return [cls.ITEM_TYPE_NAME] def __repr__(self): return "".format( self.name, self.attributes['running'], self.attributes['enabled'], ) def fix(self, status): if 'enabled' in status.keys_to_fix: if self.attributes['enabled'] is False: svc_disable(self.node, self.name) else: svc_enable(self.node, self.name) if self.attributes['running'] is False: svc_stop(self.node, self.name) else: svc_start(self.node, self.name) def get_canned_actions(self): return { 'stop': { 'command': "/etc/rc.d/{0} stop".format(self.name), 'needed_by': {self.id}, }, 'stopstart': { 'command': "/etc/rc.d/{0} stop && /etc/rc.d/{0} start".format(self.name), 'needs': {self.id}, }, 'restart': { 'command': "/etc/rc.d/{} restart".format(self.name), 'needs': { # make sure we don't restart and stopstart simultaneously f"{self.id}:stopstart", # with only the dep on stopstart, we might still end # up reloading if the service itself is skipped # because the stopstart action has cascade_skip False self.id, }, }, } def sdict(self): return { 'enabled': svc_enabled(self.node, self.name), 'running': svc_running(self.node, self.name), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('running', True), bool): raise BundleError(_( "expected boolean for 'running' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/svc_openrc.py000066400000000000000000000064401417502274300217600ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def svc_start(node, svcname): return node.run(f"rc-service {quote(svcname)} start", may_fail=True) def svc_running(node, svcname): result = node.run(f"rc-service {quote(svcname)} status", may_fail=True) return result.return_code == 0 and "started" in result.stdout_text def svc_stop(node, svcname): return node.run(f"rc-service {quote(svcname)} stop", may_fail=True) def svc_enable(node, svcname): return node.run(f"rc-update add {quote(svcname)}", may_fail=True) def svc_enabled(node, svcname): result = node.run( f"rc-update show default | grep -w {quote(svcname)}", may_fail=True ) return result.return_code == 0 and svcname in result.stdout_text def svc_disable(node, svcname): return node.run(f"rc-update del {quote(svcname)}", may_fail=True) class SvcOpenRC(Item): """ A service managed by OpenRC init scripts. """ BUNDLE_ATTRIBUTE_NAME = "svc_openrc" ITEM_ATTRIBUTES = { "running": True, "enabled": True, } ITEM_TYPE_NAME = "svc_openrc" def __repr__(self): return "".format( self.name, self.attributes["enabled"], self.attributes["running"], ) def fix(self, status): if "enabled" in status.keys_to_fix: if self.attributes["enabled"]: svc_enable(self.node, self.name) else: svc_disable(self.node, self.name) if "running" in status.keys_to_fix: if self.attributes["running"]: svc_start(self.node, self.name) else: svc_stop(self.node, self.name) def get_canned_actions(self): return { "stop": { "command": f"rc-service {self.name} stop", "needed_by": {self.id}, }, "restart": { "command": f"rc-service {self.name} restart", "needs": {self.id}, }, "reload": { "command": f"rc-service {self.name} reload".format(self.name), "needs": { # make sure we don't reload and restart simultaneously f"{self.id}:restart", # with only the dep on restart, we might still end # up reloading if the service itself is skipped # because the restart action has cascade_skip False self.id, }, }, } def sdict(self): return { "enabled": svc_enabled(self.node, self.name), "running": svc_running(self.node, self.name), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): for attribute in ("enabled", "running"): if attributes.get(attribute, None) not in (True, False, None): raise BundleError( _( "expected boolean or None for '{attribute}' on {item} in bundle '{bundle}'" ).format( attribute=attribute, bundle=bundle.name, item=item_id, ) ) bundlewrap-4.13.6/bundlewrap/items/svc_systemd.py000066400000000000000000000106101417502274300221540ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import force_text, mark_for_translation as _ def svc_start(node, svcname): return node.run("systemctl start -- {}".format(quote(svcname)), may_fail=True) def svc_running(node, svcname): result = node.run( "systemctl status -- {}".format(quote(svcname)), may_fail=True, ) return result.return_code == 0 def svc_stop(node, svcname): return node.run("systemctl stop -- {}".format(quote(svcname)), may_fail=True) def svc_enable(node, svcname): return node.run("systemctl enable -- {}".format(quote(svcname)), may_fail=True) def svc_enabled(node, svcname): result = node.run( "systemctl is-enabled -- {}".format(quote(svcname)), may_fail=True, ) return ( result.return_code == 0 and force_text(result.stdout).strip() != "runtime-enabled" ) def svc_disable(node, svcname): return node.run("systemctl disable -- {}".format(quote(svcname)), may_fail=True) def svc_mask(node, svcname): return node.run("systemctl mask -- {}".format(quote(svcname)), may_fail=True) def svc_masked(node, svcname): result = node.run( "systemctl is-enabled -- {}".format(quote(svcname)), may_fail=True, ) return ( result.return_code == 1 and force_text(result.stdout).strip() == "masked" ) def svc_unmask(node, svcname): return node.run("systemctl unmask -- {}".format(quote(svcname)), may_fail=True) class SvcSystemd(Item): """ A service managed by systemd. """ BUNDLE_ATTRIBUTE_NAME = "svc_systemd" ITEM_ATTRIBUTES = { 'enabled': True, 'running': True, 'masked': False, } ITEM_TYPE_NAME = "svc_systemd" def __repr__(self): return "".format( self.name, self.attributes['enabled'], self.attributes['running'], self.attributes['masked'], ) def cdict(self): cdict = {} for option, value in self.attributes.items(): if value is not None: cdict[option] = value return cdict def fix(self, status): if 'masked' in status.keys_to_fix: if self.attributes['masked']: svc_mask(self.node, self.name) else: svc_unmask(self.node, self.name) if 'enabled' in status.keys_to_fix: if self.attributes['enabled']: svc_enable(self.node, self.name) else: svc_disable(self.node, self.name) if 'running' in status.keys_to_fix: if self.attributes['running']: svc_start(self.node, self.name) else: svc_stop(self.node, self.name) def get_canned_actions(self): return { 'stop': { 'command': "systemctl stop -- {}".format(self.name), 'needed_by': {self.id}, }, 'restart': { 'command': "systemctl restart -- {}".format(self.name), 'needs': {self.id}, }, 'reload': { 'command': "systemctl reload -- {}".format(self.name), 'needs': { # make sure we don't reload and restart simultaneously f"{self.id}:restart", # with only the dep on restart, we might still end # up reloading if the service itself is skipped # because the restart action has cascade_skip False self.id, }, }, } def sdict(self): return { 'enabled': svc_enabled(self.node, self.name), 'running': svc_running(self.node, self.name), 'masked': svc_masked(self.node, self.name), } @classmethod def validate_attributes(cls, bundle, item_id, attributes): for attribute in ('enabled', 'running'): if attributes.get(attribute, None) not in (True, False, None): raise BundleError(_( "expected boolean or None for '{attribute}' on {item} in bundle '{bundle}'" ).format( attribute=attribute, bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/svc_systemv.py000066400000000000000000000046171417502274300222100ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def svc_start(node, svcname): return node.run("/etc/init.d/{} start".format(quote(svcname)), may_fail=True) def svc_running(node, svcname): result = node.run( "/etc/init.d/{} status".format(quote(svcname)), may_fail=True, ) return result.return_code == 0 def svc_stop(node, svcname): return node.run("/etc/init.d/{} stop".format(quote(svcname)), may_fail=True) class SvcSystemV(Item): """ A service managed by traditional System V init scripts. """ BUNDLE_ATTRIBUTE_NAME = "svc_systemv" ITEM_ATTRIBUTES = { 'running': True, } ITEM_TYPE_NAME = "svc_systemv" def __repr__(self): return "".format( self.name, self.attributes['running'], ) def fix(self, status): if self.attributes['running'] is False: svc_stop(self.node, self.name) else: svc_start(self.node, self.name) def get_canned_actions(self): return { 'stop': { 'command': "/etc/init.d/{} stop".format(self.name), 'needed_by': {self.id}, }, 'restart': { 'command': "/etc/init.d/{} restart".format(self.name), 'needs': {self.id}, }, 'reload': { 'command': "/etc/init.d/{} reload".format(self.name), 'needs': { # make sure we don't reload and restart simultaneously f"{self.id}:restart", # with only the dep on restart, we might still end # up reloading if the service itself is skipped # because the restart action has cascade_skip False self.id, }, }, } def sdict(self): return {'running': svc_running(self.node, self.name)} @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('running', True), bool): raise BundleError(_( "expected boolean for 'running' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/svc_upstart.py000066400000000000000000000056251417502274300222000ustar00rootroot00000000000000from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def svc_start(node, svcname): return node.run("initctl start --no-wait -- {}".format(quote(svcname)), may_fail=True) def svc_running(node, svcname): result = node.run("initctl status -- {}".format(quote(svcname)), may_fail=True) if result.return_code != 0: return False return " start/" in result.stdout_text def svc_stop(node, svcname): return node.run("initctl stop --no-wait -- {}".format(quote(svcname)), may_fail=True) class SvcUpstart(Item): """ A service managed by Upstart. """ BUNDLE_ATTRIBUTE_NAME = "svc_upstart" ITEM_ATTRIBUTES = { 'running': True, } ITEM_TYPE_NAME = "svc_upstart" def __repr__(self): return "".format( self.name, self.attributes['running'], ) def fix(self, status): if self.attributes['running'] is False: svc_stop(self.node, self.name) else: svc_start(self.node, self.name) def get_canned_actions(self): return { 'stop': { 'command': "stop {0}".format(self.name), 'needed_by': {self.id}, }, 'stopstart': { 'command': "stop {0} && start {0}".format(self.name), 'needs': {self.id}, }, 'restart': { 'command': "restart {}".format(self.name), 'needs': { # make sure we don't restart and stopstart simultaneously f"{self.id}:stopstart", # with only the dep on stopstart, we might still end # up reloading if the service itself is skipped # because the stopstart action has cascade_skip False self.id, }, }, 'reload': { 'command': "reload {}".format(self.name), 'needs': { # make sure we don't restart and reload simultaneously f"{self.id}:restart", # with only the dep on restart, we might still end # up reloading if the service itself is skipped # because the restart action has cascade_skip False self.id, }, }, } def sdict(self): return {'running': svc_running(self.node, self.name)} @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes.get('running', True), bool): raise BundleError(_( "expected boolean for 'running' on {item} in bundle '{bundle}'" ).format( bundle=bundle.name, item=item_id, )) bundlewrap-4.13.6/bundlewrap/items/symlinks.py000066400000000000000000000146001417502274300214650ustar00rootroot00000000000000from collections import defaultdict from os.path import dirname, normpath from shlex import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.remote import PathInfo from bundlewrap.utils.text import mark_for_translation as _ from bundlewrap.utils.text import is_subdirectory ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) class Symlink(Item): """ A symbolic link. """ BUNDLE_ATTRIBUTE_NAME = "symlinks" ITEM_ATTRIBUTES = { 'group': "root", 'owner': "root", 'target': None, } ITEM_TYPE_NAME = "symlink" REQUIRED_ATTRIBUTES = ['target'] def __repr__(self): return "".format( quote(self.name), self.attributes['target'], ) def cdict(self): cdict = { 'target': self.attributes['target'], 'type': 'symlink', } for optional_attr in ('group', 'owner'): if self.attributes[optional_attr] is not None: cdict[optional_attr] = self.attributes[optional_attr] return cdict def display_on_create(self, cdict): del cdict['type'] return cdict def fix(self, status): if status.must_be_created or 'type' in status.keys_to_fix: # fixing the type fixes everything self._fix_type(status) return for fix_type in ('target', 'owner', 'group'): if fix_type in status.keys_to_fix: if fix_type == 'group' and 'owner' in status.keys_to_fix: # owner and group are fixed with a single chown continue getattr(self, "_fix_" + fix_type)(status) def _fix_owner(self, status): group = self.attributes['group'] or "" if group: group = ":" + quote(group) if self.node.os in self.node.OS_FAMILY_BSD: command = "chown -h {}{} {}" else: command = "chown -h {}{} -- {}" self.run(command.format( quote(self.attributes['owner'] or ""), group, quote(self.name), )) _fix_group = _fix_owner def _fix_target(self, status): if self.node.os in self.node.OS_FAMILY_BSD: self.run("ln -sfh -- {} {}".format( quote(self.attributes['target']), quote(self.name), )) else: self.run("ln -sfT -- {} {}".format( quote(self.attributes['target']), quote(self.name), )) def _fix_type(self, status): self.run("rm -rf -- {}".format(quote(self.name))) self.run("mkdir -p -- {}".format(quote(dirname(self.name)))) self.run("ln -s -- {} {}".format( quote(self.attributes['target']), quote(self.name), )) if self.attributes['owner'] or self.attributes['group']: self._fix_owner(status) def get_auto_deps(self, items): deps = [] for item in items: if item == self: continue if item.ITEM_TYPE_NAME == "file" and ( is_subdirectory(item.name, self.name) or item.name == self.name ): raise BundleError(_( "{item1} (from bundle '{bundle1}') blocking path to " "{item2} (from bundle '{bundle2}')" ).format( item1=item.id, bundle1=item.bundle.name, item2=self.id, bundle2=self.bundle.name, )) elif item.ITEM_TYPE_NAME == "user" and item.name == self.attributes['owner']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME == "group" and item.name == self.attributes['group']: if item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) elif item.ITEM_TYPE_NAME in ("directory", "symlink"): if is_subdirectory(item.name, self.name): deps.append(item.id) return deps def patch_attributes(self, attributes): if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD: # BSD doesn't have a root group, so we have to use a # different default value here attributes['group'] = 'wheel' return attributes def sdict(self): path_info = PathInfo(self.node, self.name) if not path_info.exists: return None else: return { 'target': path_info.symlink_target if path_info.is_symlink else "", 'type': 'symlink' if path_info.is_symlink else path_info.stat['type'], 'owner': path_info.owner, 'group': path_info.group, } @classmethod def validate_attributes(cls, bundle, item_id, attributes): for key, value in attributes.items(): ATTRIBUTE_VALIDATORS[key](item_id, value) @classmethod def validate_name(cls, bundle, name): if normpath(name) == "/": raise BundleError(_("'/' cannot be a file")) if normpath(name) != name: raise BundleError(_( "'{path}' is an invalid symlink path, should be '{normpath}' (bundle '{bundle}')" ).format( path=name, normpath=normpath(name), bundle=bundle.name, )) bundlewrap-4.13.6/bundlewrap/items/users.py000066400000000000000000000314041417502274300207560ustar00rootroot00000000000000from logging import ERROR, getLogger from shlex import quote from string import ascii_lowercase, digits from passlib.hash import bcrypt, md5_crypt, sha256_crypt, sha512_crypt from bundlewrap.exceptions import BundleError from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item from bundlewrap.utils.text import force_text, mark_for_translation as _ getLogger('passlib').setLevel(ERROR) _ATTRIBUTE_NAMES = { 'full_name': _("full name"), 'gid': _("GID"), 'groups': _("groups"), 'home': _("home dir"), 'password_hash': _("password hash"), 'shell': _("shell"), 'uid': _("UID"), } _ATTRIBUTE_OPTIONS = { 'full_name': "-c", 'gid': "-g", 'groups': "-G", 'home': "-d", 'password_hash': "-p", 'shell': "-s", 'uid': "-u", } # a random static salt if users don't provide one _DEFAULT_SALT = "uJzJlYdG" # bcrypt needs special salts. 22 characters long, ending in ".", "O", "e", "u" # see https://bitbucket.org/ecollins/passlib/issues/25 _DEFAULT_BCRYPT_SALT = "oo2ahgheen9Tei0IeJohTO" HASH_METHODS = { 'md5': md5_crypt, 'sha256': sha256_crypt, 'sha512': sha512_crypt, 'bcrypt': bcrypt } _USERNAME_VALID_CHARACTERS = ascii_lowercase + digits + "-_" def _group_name_for_gid(node, gid): """ Returns the group name that matches the gid. """ group_output = node.run("grep -e ':{}:[^:]*$' /etc/group".format(gid), may_fail=True) if group_output.return_code != 0: return None else: return group_output.stdout_text.split(":")[0] def _groups_for_user(node, username): """ Returns the list of group names for the given username on the given node. """ groups = node.run("id -Gn {}".format(username)).stdout_text.strip().split(" ") primary_group = node.run("id -gn {}".format(username)).stdout_text.strip() groups.remove(primary_group) return groups def _parse_passwd_line(line, entries): """ Parses a line from /etc/passwd and returns the information as a dictionary. """ result = dict(zip( entries, line.strip().split(":"), )) result['full_name'] = result['gecos'].split(",")[0] return result class User(Item): """ A user account. """ BUNDLE_ATTRIBUTE_NAME = "users" ITEM_ATTRIBUTES = { 'delete': False, 'full_name': None, 'gid': None, 'groups': None, 'hash_method': 'sha512', 'home': None, 'password': None, 'password_hash': None, 'salt': None, 'shell': None, 'uid': None, 'use_shadow': None, } ITEM_TYPE_NAME = "user" @classmethod def block_concurrent(cls, node_os, node_os_version): # https://github.com/bundlewrap/bundlewrap/issues/367 if node_os in ('openbsd', 'freebsd'): return [cls.ITEM_TYPE_NAME] else: return [] def __repr__(self): return "".format(self.name) def cdict(self): if self.attributes['delete']: return None cdict = self.attributes.copy() del cdict['delete'] del cdict['hash_method'] del cdict['password'] del cdict['salt'] del cdict['use_shadow'] for key in list(cdict.keys()): if cdict[key] is None: del cdict[key] if 'groups' in cdict: cdict['groups'] = set(cdict['groups']) return cdict def fix(self, status): if self.node.os == 'freebsd': # FreeBSD implements the user{add,mod,del} commands using pw(8). command = "pw " else: command = "" if status.must_be_deleted: command += "userdel {}" self.run(command.format(self.name), may_fail=True) else: command += "useradd " if status.must_be_created else "usermod " stdin = None for attr, option in sorted(_ATTRIBUTE_OPTIONS.items()): if (attr in status.keys_to_fix or status.must_be_created) and \ self.attributes[attr] is not None: if attr == 'groups': value = ",".join(self.attributes[attr]) elif attr == 'password_hash' and self.node.os == 'freebsd': # On FreeBSD, pw useradd/usermod -p sets the password expiry time. # Using -H we pass the password hash using file descriptor instead. option = '-H' value = '0' # FD 0 = stdin stdin = self.attributes[attr].encode() else: value = str(self.attributes[attr]) command += "{} {} ".format(option, quote(value)) if self.node.os == 'freebsd': # FreeBSD expects to be the first argument to # `pw useradd/mod`, however we can also pass it using -n # instead. Then it is positionally independent. command += "-n " command += f"{self.name}" self.run(command, data_stdin=stdin, may_fail=True) def display_on_create(self, cdict): for attr_name, attr_display_name in _ATTRIBUTE_NAMES.items(): if attr_name == attr_display_name: # Don't change anything; the `del` below would # always remove the key entirely! continue if attr_name in cdict: cdict[attr_display_name] = cdict[attr_name] del cdict[attr_name] return cdict def display_dicts(self, cdict, sdict, keys): for attr_name, attr_display_name in _ATTRIBUTE_NAMES.items(): if attr_name == attr_display_name: # Don't change anything; the `del`s below would # always remove the key entirely! continue try: keys.remove(attr_name) except ValueError: pass else: keys.append(attr_display_name) cdict[attr_display_name] = cdict[attr_name] sdict[attr_display_name] = sdict[attr_name] del cdict[attr_name] del sdict[attr_name] return (cdict, sdict, keys) def get_auto_deps(self, items): deps = [] groups = self.attributes['groups'] or [] for item in items: if item.ITEM_TYPE_NAME == "group": if not (item.name in groups or ( self.attributes['gid'] in [item.attributes['gid'], item.name] and self.attributes['gid'] is not None )): # we don't need to depend on this group continue elif item.attributes['delete']: raise BundleError(_( "{item1} (from bundle '{bundle1}') depends on item " "{item2} (from bundle '{bundle2}') which is set to be deleted" ).format( item1=self.id, bundle1=self.bundle.name, item2=item.id, bundle2=item.bundle.name, )) else: deps.append(item.id) return deps def sdict(self): # verify content of /etc/passwd if self.node.os in self.node.OS_FAMILY_BSD: password_command = "grep -ae '^{}:' /etc/master.passwd" else: password_command = "grep -ae '^{}:' /etc/passwd" passwd_grep_result = self.run( password_command.format(self.name), may_fail=True, ) if passwd_grep_result.return_code != 0: return None if self.node.os in self.node.OS_FAMILY_BSD: entries = ( 'username', 'passwd_hash', 'uid', 'gid', 'class', 'change', 'expire', 'gecos', 'home', 'shell', ) else: entries = ('username', 'passwd_hash', 'uid', 'gid', 'gecos', 'home', 'shell') sdict = _parse_passwd_line(passwd_grep_result.stdout_text, entries) if self.attributes['gid'] is not None and not self.attributes['gid'].isdigit(): sdict['gid'] = _group_name_for_gid(self.node, sdict['gid']) if self.attributes['password_hash'] is not None: if self.attributes['use_shadow'] and self.node.os not in self.node.OS_FAMILY_BSD: # verify content of /etc/shadow unless we are on OpenBSD shadow_grep_result = self.run( "grep -e '^{}:' /etc/shadow".format(self.name), may_fail=True, ) if shadow_grep_result.return_code != 0: sdict['password_hash'] = None else: sdict['password_hash'] = shadow_grep_result.stdout_text.split(":")[1] else: sdict['password_hash'] = sdict['passwd_hash'] del sdict['passwd_hash'] # verify content of /etc/group sdict['groups'] = set(_groups_for_user(self.node, self.name)) return sdict def patch_attributes(self, attributes): if attributes.get('password', None) is not None: # defaults aren't set yet hash_method = HASH_METHODS[attributes.get( 'hash_method', self.ITEM_ATTRIBUTES['hash_method'], )] salt = force_text(attributes.get('salt', None)) if self.node.os == 'openbsd': attributes['password_hash'] = bcrypt.encrypt( force_text(attributes['password']), rounds=8, # default rounds for OpenBSD accounts salt=_DEFAULT_BCRYPT_SALT if salt is None else salt, ) elif attributes.get('hash_method') == 'md5': attributes['password_hash'] = hash_method.encrypt( force_text(attributes['password']), salt=_DEFAULT_SALT if salt is None else salt, ) else: attributes['password_hash'] = hash_method.encrypt( force_text(attributes['password']), rounds=5000, # default from glibc salt=_DEFAULT_SALT if salt is None else salt, ) if 'use_shadow' not in attributes: attributes['use_shadow'] = self.node.use_shadow_passwords for attr in ('gid', 'uid'): if isinstance(attributes.get(attr), int): attributes[attr] = str(attributes[attr]) return attributes @classmethod def validate_attributes(cls, bundle, item_id, attributes): if attributes.get('delete', False): for attr in attributes.keys(): if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): raise BundleError(_( "{item} from bundle '{bundle}' cannot have other " "attributes besides 'delete'" ).format(item=item_id, bundle=bundle.name)) if 'hash_method' in attributes and \ attributes['hash_method'] not in HASH_METHODS: raise BundleError( _("Invalid hash method for {item} in bundle '{bundle}': '{method}'").format( bundle=bundle.name, item=item_id, method=attributes['hash_method'], ) ) if 'password_hash' in attributes and ( 'password' in attributes or 'salt' in attributes ): raise BundleError(_( "{item} in bundle '{bundle}': 'password_hash' " "cannot be used with 'password' or 'salt'" ).format(bundle=bundle.name, item=item_id)) if 'salt' in attributes and 'password' not in attributes: raise BundleError( _("{}: salt given without a password").format(item_id) ) @classmethod def validate_name(cls, bundle, name): for char in name: if char not in _USERNAME_VALID_CHARACTERS: raise BundleError(_( "Invalid character in username '{user}': {char} (bundle '{bundle}')" ).format(bundle=bundle.name, char=char, user=name)) if name.endswith("_") or name.endswith("-"): raise BundleError(_( "Username '{user}' must not end in dash or underscore (bundle '{bundle}')" ).format(bundle=bundle.name, user=name)) if len(name) > 30: raise BundleError(_( "Username '{user}' is longer than 30 characters (bundle '{bundle}')" ).format(bundle=bundle.name, user=name)) bundlewrap-4.13.6/bundlewrap/items/zfs_dataset.py000066400000000000000000000105171417502274300221260ustar00rootroot00000000000000from pipes import quote from bundlewrap.items import Item class ZFSDataset(Item): """ Creates ZFS datasets and manages their options. """ BUNDLE_ATTRIBUTE_NAME = "zfs_datasets" REJECT_UNKNOWN_ATTRIBUTES = False ITEM_TYPE_NAME = "zfs_dataset" def __repr__(self): return f"" def __create(self, path, options): option_list = [] for option, value in sorted(options.items()): # We must exclude the 'mounted' property here because it's a # read-only "informational" property. if option != 'mounted' and value is not None: option_list.append("-o {}={}".format(quote(option), quote(value))) option_args = " ".join(option_list) self.run( "zfs create {} {}".format( option_args, quote(path), ), may_fail=True, ) if options['mounted'] == 'no': self.__set_option(path, 'mounted', 'no') def __does_exist(self, path): status_result = self.run( "zfs list {}".format(quote(path)), may_fail=True, ) return status_result.return_code == 0 def __get_option(self, path, option): cmd = "zfs get -Hp -o value {} {}".format(quote(option), quote(path)) # We always expect this to succeed since we don't call this function # if we have already established that the dataset does not exist. status_result = self.run(cmd) return status_result.stdout.decode('utf-8').strip() def __set_option(self, path, option, value): if option == 'mounted': # 'mounted' is a read-only property that can not be altered by # 'set'. We need to call 'zfs mount tank/foo'. self.run( "zfs {} {}".format( "mount" if value == 'yes' else "unmount", quote(path), ), may_fail=True, ) else: self.run( "zfs set {}={} {}".format( quote(option), quote(value), quote(path), ), may_fail=True, ) def cdict(self): cdict = {} for option, value in self.attributes.items(): if option == 'mountpoint' and value is None: value = "none" if value is not None: cdict[option] = value cdict['mounted'] = 'no' if cdict.get('mountpoint') in (None, "none") else 'yes' return cdict def fix(self, status): if status.must_be_created: self.__create(self.name, status.cdict) else: for option in status.keys_to_fix: self.__set_option(self.name, option, status.cdict[option]) def get_auto_attrs(self, items): pool = self.name.split("/")[0] pool_item = "zfs_pool:{}".format(pool) needs = set() for item in items: if item.ITEM_TYPE_NAME == "zfs_pool" and item.name == pool: # Add dependency to the pool this dataset resides on. needs.add(pool_item) elif ( item.ITEM_TYPE_NAME == "zfs_dataset" and self.name != item.name ): # Find all other datasets that are parents of this # dataset. # XXX Could be optimized by finding the "largest" # parent only. if self.name.startswith(item.name + "/"): needs.add(item.id) elif ( self.attributes.get('mountpoint') and item.attributes.get('mountpoint') and self.attributes['mountpoint'] != item.attributes['mountpoint'] and self.attributes['mountpoint'].startswith(item.attributes['mountpoint']) ): needs.add(item.id) return {'needs': needs} def sdict(self): if not self.__does_exist(self.name): return None sdict = {} for option in self.attributes: sdict[option] = self.__get_option(self.name, option) sdict['mounted'] = self.__get_option(self.name, 'mounted') return sdict bundlewrap-4.13.6/bundlewrap/items/zfs_pool.py000066400000000000000000000156011417502274300214510ustar00rootroot00000000000000from collections import Counter from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ class ZFSPool(Item): """ Creates ZFS pools. """ BUNDLE_ATTRIBUTE_NAME = "zfs_pools" ITEM_ATTRIBUTES = { 'autoexpand': None, 'autoreplace': None, 'autotrim': None, } WHEN_CREATING_ATTRIBUTES = { 'ashift': None, 'config': None, } ITEM_TYPE_NAME = "zfs_pool" def __repr__(self): return "".format( self.name, self.attributes['autoexpand'], self.attributes['autoreplace'], self.attributes['autotrim'], self.when_creating['ashift'], self.when_creating['config'], ) def cdict(self): ret = {} for i in self.attributes: if self.attributes.get(i) is not None: ret[i] = self.attributes[i] return ret @property def devices_used(self): devices = set() for option in self.when_creating['config']: for device in option['devices']: devices.add(device) return sorted(devices) def fix(self, status): if status.must_be_created: cmdline = [] for option in self.when_creating['config']: if option.get('type'): cmdline.append(option['type']) if option['type'] == 'log' and len(option['devices']) > 1: cmdline.append('mirror') for device in sorted(option['devices']): res = self.run("lsblk -rndo fstype {}".format(quote(device))) detected = res.stdout.decode('UTF-8').strip() if detected != "": raise BundleError(_( "Node {}, ZFSPool {}: Device {} to be used for ZFS, " "but it is not empty! Has '{}'." ).format(self.node.name, self.name, device, detected)) cmdline.append(quote(device)) options = set() if self.when_creating['ashift']: options.add('-o ashift={}'.format(self.when_creating['ashift'])) for opt, value in status.cdict.items(): state_str = 'on' if value else 'off' options.add('-o {}={}'.format(opt, state_str)) self.run('zpool create {} {} {}'.format( ' '.join(sorted(options)), quote(self.name), ' '.join(cmdline), )) elif status.keys_to_fix: for attr in status.keys_to_fix: state_str = 'on' if status.cdict[attr] else 'off' self.run('zpool set {}={} {}'.format(attr, state_str, quote(self.name))) def sdict(self): status_result = self.run('zpool list {}'.format(quote(self.name)), may_fail=True) if status_result.return_code != 0: return None pool_status = {} for line in self.run( 'zpool get all -H -o all {}'.format(quote(self.name)), may_fail=True, ).stdout.decode().splitlines(): try: pname, prop, value, source = line.split() pool_status[prop.strip()] = value.strip() except (IndexError, ValueError): continue sdict = {} for attr in self.attributes: sdict[attr] = (pool_status.get(attr) == 'on') return sdict def test(self): duplicate_devices = [ item for item, count in Counter(self.devices_used).items() if count > 1 ] if duplicate_devices: raise BundleError(_( "{item} on node {node} uses {devices} more than once as an underlying device" ).format( item=self.id, node=self.node.name, devices=_(" and ").join(duplicate_devices), )) # Have a look at all other ZFS pools on this node and check if # multiple pools try to use the same device. for item in self.node.items: if ( item.ITEM_TYPE_NAME == "zfs_pool" and item.name != self.name and set(item.devices_used).intersection(set(self.devices_used)) ): raise BundleError(_( "Both the ZFS pools {self} and {other} on node {node} " "try to use {devices} as the underlying storage device" ).format( self=self.name, other=item.name, node=self.node.name, devices=_(" and ").join( set(item.devices_used).intersection(set(self.devices_used)), ), )) @classmethod def validate_attributes(cls, bundle, item_id, attributes): if 'config' not in attributes.get('when_creating', {}): raise BundleError(_( "{item} on node {node}: required option 'config' missing" ).format( item=item_id, node=bundle.node.name, )) elif not isinstance(attributes['when_creating']['config'], list): raise BundleError(_( "{item} on node {node}: option 'config' must be a list" ).format( item=item_id, node=bundle.node.name, )) for config in attributes['when_creating']['config']: if config.get('type', None) not in { None, 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log', }: raise BundleError(_( "{item} on node {node} has invalid type '{type}', " "must be one of (unset), 'mirror', 'raidz', 'raidz2', " "'raidz3', 'cache', 'log'" ).format( item=item_id, node=bundle.node.name, type=config['type'], )) if not config.get('devices', set()): raise BundleError(_( "{item} on node {node} uses no devices!" ).format( item=item_id, node=bundle.node.name, )) if ( config.get('type') == 'log' and len(config['devices']) not in (1, 2) ): raise BundleError(_( "{item} on node {node} type 'log' must use exactly " "one or two devices" ).format( item=item_id, node=bundle.node.name, )) bundlewrap-4.13.6/bundlewrap/lock.py000066400000000000000000000220351417502274300174240ustar00rootroot00000000000000from datetime import datetime from getpass import getuser import json from os import environ from shlex import quote from socket import gethostname from time import time from .exceptions import NodeLockedException, NoSuchNode, RemoteException from .utils import cached_property, tempfile from .utils.text import ( blue, bold, format_duration, format_timestamp, mark_for_translation as _, parse_duration, red, wrap_question, ) from .utils.ui import io def identity(): return environ.get('BW_IDENTITY', "{}@{}".format( getuser(), gethostname(), )) class NodeLock: def __init__(self, node, interactive=False, ignore=False): self.node = node self.ignore = ignore self.interactive = interactive self.locking_node = _get_locking_node(node) def __enter__(self): if self.locking_node.os not in self.locking_node.OS_FAMILY_UNIX: # no locking required/possible return self with tempfile() as local_path: self.locking_node.run("mkdir -p " + quote(self.locking_node.lock_dir)) if not self.ignore: with io.job(_("{node} checking hard lock status").format(node=bold(self.node.name))): result = self.locking_node.run("mkdir " + quote(self._hard_lock_dir()), may_fail=True) if result.return_code != 0: info = self._get_hard_lock_info(local_path) expired = False try: d = info['date'] except KeyError: info['date'] = _("") info['duration'] = _("") else: duration = datetime.now() - datetime.fromtimestamp(d) info['date'] = format_timestamp(d) info['duration'] = format_duration(duration) if duration > parse_duration(environ.get('BW_HARDLOCK_EXPIRY', "8h")): expired = True io.debug("ignoring expired hard lock on {}".format(self.node.name)) if 'user' not in info: info['user'] = _("") if expired or self.ignore or (self.interactive and io.ask( self._warning_message_hard(info), False, epilogue=blue("?") + " " + bold(self.node.name), )): pass else: raise NodeLockedException(info) with io.job(_("{node} uploading lock file").format(node=bold(self.node.name))): if self.ignore: self.locking_node.run("mkdir -p " + quote(self._hard_lock_dir())) with open(local_path, 'w') as f: f.write(json.dumps({ 'date': time(), 'user': identity(), })) self.locking_node.upload(local_path, self._hard_lock_file()) return self def __exit__(self, type, value, traceback): if self.locking_node.os not in self.locking_node.OS_FAMILY_UNIX: # no locking required/possible return with io.job(_("{node} removing hard lock").format(node=bold(self.node.name))): result = self.locking_node.run("rm -R {}".format(quote(self._hard_lock_dir())), may_fail=True) if result.return_code != 0: io.stderr(_("{x} {node} could not release hard lock").format( node=bold(self.node.name), x=red("!"), )) def _get_hard_lock_info(self, local_path): try: self.locking_node.download(self._hard_lock_file(), local_path) with open(local_path, 'r') as fp: return json.load(fp) except (RemoteException, ValueError): io.stderr(_( "{x} {node_bold} corrupted hard lock: " "unable to read or parse lock file contents " "(clear it with `bw run {node} 'rm -Rf {path}'`)" ).format( node_bold=bold(self.locking_node.name), node=self.locking_node.name, path=self._hard_lock_dir(), x=red("!"), )) return {} def _hard_lock_dir(self): return self.locking_node.lock_dir + "/hard-" + quote(self.node.name) def _hard_lock_file(self): return self._hard_lock_dir() + "/info" def _warning_message_hard(self, info): return wrap_question( red(_("NODE LOCKED")), _( "Looks like somebody is currently using BundleWrap on this node.\n" "You should let them finish or override the lock if it has gone stale.\n" "\n" "locked by {user}\n" " since {date} ({duration} ago)" ).format( user=bold(info['user']), date=info['date'], duration=info['duration'], ), bold(_("Override lock?")), prefix="{x} {node} ".format(node=bold(self.node.name), x=blue("?")), ) @cached_property def soft_locks(self): return softlock_list(self.node) @cached_property def my_soft_locks(self): for lock in self.soft_locks: if lock['user'] == identity(): yield lock @cached_property def other_peoples_soft_locks(self): for lock in self.soft_locks: if lock['user'] != identity(): yield lock def _get_locking_node(node): if node.locking_node is not None: try: return node.repo.get_node(node.locking_node) except NoSuchNode: raise Exception("Invalid locking_node {} for {}".format( node.locking_node, node.name, )) else: return node def _soft_lock_dir(node_name, locking_node): return locking_node.lock_dir + "/soft-" + quote(node_name) def _soft_lock_file(node_name, locking_node, lock_id): return _soft_lock_dir(node_name, locking_node) + "/" + lock_id def softlock_add(node, lock_id, comment="", expiry="8h", item_selectors=None): locking_node = _get_locking_node(node) assert locking_node.os in locking_node.OS_FAMILY_UNIX if "\n" in comment: raise ValueError(_("Lock comments must not contain any newlines")) if not item_selectors: item_selectors = ["*"] expiry_timedelta = parse_duration(expiry) now = time() expiry_timestamp = now + expiry_timedelta.days * 86400 + expiry_timedelta.seconds content = json.dumps({ 'comment': comment, 'date': now, 'expiry': expiry_timestamp, 'id': lock_id, 'items': item_selectors, 'user': identity(), }, indent=None, sort_keys=True) with tempfile() as local_path: with open(local_path, 'w') as f: f.write(content + "\n") locking_node.run("mkdir -p " + quote(_soft_lock_dir(node.name, locking_node))) locking_node.upload(local_path, _soft_lock_file(node.name, locking_node, lock_id), mode='0644') node.repo.hooks.lock_add(node.repo, node, lock_id, item_selectors, expiry_timestamp, comment) return lock_id def softlock_list(node): locking_node = _get_locking_node(node) if locking_node.os not in locking_node.OS_FAMILY_UNIX: return [] with io.job(_("{} checking soft locks").format(bold(node.name))): cat = locking_node.run("cat {}".format(_soft_lock_file(node.name, locking_node, "*")), may_fail=True) if cat.return_code != 0: return [] result = [] for line in cat.stdout.decode('utf-8').strip().split("\n"): try: result.append(json.loads(line.strip())) except json.decoder.JSONDecodeError: io.stderr(_( "{x} {node} unable to parse soft lock file contents, ignoring: {line}" ).format( x=red("!"), node=bold(node.name), line=line.strip(), )) for lock in result[:]: if lock['expiry'] < time(): io.debug(_("removing expired soft lock {id} from node {node}").format( id=lock['id'], node=node.name, )) softlock_remove(node, lock['id']) result.remove(lock) return result def softlock_remove(node, lock_id): locking_node = _get_locking_node(node) assert locking_node.os in locking_node.OS_FAMILY_UNIX io.debug(_("removing soft lock {id} from node {node}").format( id=lock_id, node=node.name, )) locking_node.run("rm {}".format(_soft_lock_file(node.name, locking_node, lock_id))) node.repo.hooks.lock_remove(node.repo, node, lock_id) bundlewrap-4.13.6/bundlewrap/metadata.py000066400000000000000000000255441417502274300202640ustar00rootroot00000000000000from copy import copy from hashlib import sha1 from json import dumps, JSONEncoder from .exceptions import RepositoryError from .utils import Fault from .utils.dicts import ATOMIC_TYPES, map_dict_keys, merge_dict, value_at_key_path from .utils.text import force_text, mark_for_translation as _ METADATA_TYPES = ( # only meant for natively atomic types bool, bytes, Fault, int, str, type(None), ) class DoNotRunAgain(Exception): """ Raised from metadata reactors to indicate they can be disregarded. """ pass def deepcopy_metadata(obj): """ Our own version of deepcopy.copy that doesn't pickle since some Fault callbacks are unpicklable. """ if isinstance(obj, METADATA_TYPES): return obj elif isinstance(obj, dict): new_obj = { copy(key): deepcopy_metadata(value) for key, value in obj.items() } elif isinstance(obj, (list, tuple, set)): new_obj = [ deepcopy_metadata(value) for value in obj ] else: assert False # there should be no other types # Try to preserve the original type, even if its a superclass of # dict, list, tuple or set. return type(obj)(new_obj) def validate_metadata(metadata, _top_level=True): if _top_level and not isinstance(metadata, dict): raise TypeError(_("metadata must be a dict")) if isinstance(metadata, dict): for key, value in metadata.items(): if not isinstance(key, str): raise TypeError(_("metadata keys must be str: {value} is {type}").format( type=type(key), value=repr(key), )) validate_metadata(value, _top_level=False) elif isinstance(metadata, (tuple, list, set)): for value in metadata: validate_metadata(value, _top_level=False) elif not isinstance(metadata, METADATA_TYPES): raise TypeError(_("illegal metadata value type: {value} is {type}").format( type=type(metadata), value=repr(metadata), )) def atomic(obj): """ Wraps a compatible object in a custom class to prevent it from being merged with another object of the same type during metadata compilation. """ try: cls = ATOMIC_TYPES[type(obj)] except KeyError: raise ValueError("atomic() can only be applied to dicts, lists, sets, or tuples " "(not: {})".format(repr(obj))) else: return cls(obj) def check_for_metadata_conflicts(node): check_for_metadata_conflicts_between_groups(node) check_for_metadata_conflicts_between_defaults_and_reactors(node) def check_for_metadata_conflicts_between_defaults_and_reactors(node): """ Finds conflicting metadata keys in bundle defaults and reactors. Dicts can be merged with dicts, sets can be merged with sets, but any other combination is a conflict. """ TYPE_DICT = 1 TYPE_SET = 2 TYPE_OTHER = 3 def paths_with_values_and_types(d): for path in map_dict_keys(d): value = value_at_key_path(d, path) if isinstance(value, dict): yield path, value, TYPE_DICT elif isinstance(value, set): yield path, value, TYPE_SET else: yield path, value, TYPE_OTHER for prefix in ("metadata_defaults:", "metadata_reactor:"): paths = {} node.metadata.get(()) # ensure full metadata is present for partition in node.metadata.stack._partitions: for identifier, layer in partition.items(): if identifier.startswith(prefix): for path, value, current_type in paths_with_values_and_types(layer): try: prev_type, prev_identifier, prev_value = paths[path] except KeyError: paths[path] = current_type, identifier, value else: if ( prev_type == TYPE_DICT and current_type == TYPE_DICT ): pass elif ( prev_type == TYPE_SET and current_type == TYPE_SET ): pass elif value != prev_value: raise ValueError(_( "{node}: {a} and {b} are clashing over this key path: {path} " "({val_a} vs. {val_b})" ).format( a=identifier, b=prev_identifier, node=node.name, path="/".join(path), val_a=repr(value), val_b=repr(prev_value), )) def _recurse_group_tree(path): tail = path[-1] if len(tail.immediate_parent_groups) == 0: yield path else: for p in tail.immediate_parent_groups: yield from _recurse_group_tree(path + [p]) def check_for_metadata_conflicts_between_groups(node): """ Finds metadata keys defined by two groups that are not part of a shared subgroup hierarchy. """ # First, we build a list of subgroup chains. # # A chain is simply a list of groups starting with a parent group # that has no parent groups itself and then descends depth-first # into its subgroups until a subgroup is reached that the node is # not a member of. # Every possible path on every subgroup tree is a separate chain. # # group4 # / \ # group2 group3 # \ / # group1 # # This example has two chains, even though both start and end at the # some groups: # # group1 -> group2 -> group4 # group1 -> group3 -> group4 # chains = [] for group in node.immediate_groups: chains.extend(_recurse_group_tree([group])) # chains now look like this (parents right of children): # [ # [group1], # [group2, group3, group5], # [group2, group4, group5], # [group2, group4, group6, group7], # ] # let's merge metadata for each chain chain_metadata = [] for chain in chains: metadata = {} for group in chain: metadata = merge_dict(metadata, group._attributes.get('metadata', {})) chain_metadata.append(metadata) # create a "key path map" for each chain's metadata chain_metadata_keys = [list(map_dict_keys(metadata)) for metadata in chain_metadata] # compare all metadata keys with other chains and find matches for index1, keymap1 in enumerate(chain_metadata_keys): for keypath in keymap1: for index2, keymap2 in enumerate(chain_metadata_keys): if index1 == index2: # same keymap, don't compare continue else: if keypath in keymap2: if ( type(value_at_key_path(chain_metadata[index1], keypath)) is type(value_at_key_path(chain_metadata[index2], keypath)) and type(value_at_key_path(chain_metadata[index2], keypath)) in (set, dict) ): continue # We now know that there is a conflict between the first # and second chain we're looking at right now. # That is however not a problem if the conflict is caused # by a group that is present in both chains. # So all that's left is to figure out which two single groups # within those chains are at fault so we can report them # to the user if necessary. find_groups_causing_metadata_conflict( node.name, chains[index1], chains[index2], keypath, ) def find_groups_causing_metadata_conflict(node_name, chain1, chain2, keypath): """ Given two chains (lists of groups), find one group in each chain that has conflicting metadata with the other for the given key path. """ chain1_metadata = [ list(map_dict_keys(group._attributes.get('metadata', {}))) for group in chain1 ] chain2_metadata = [ list(map_dict_keys(group._attributes.get('metadata', {}))) for group in chain2 ] bad_keypath = None for index1, keymap1 in enumerate(chain1_metadata): for index2, keymap2 in enumerate(chain2_metadata): if chain1[index1] == chain2[index2]: # same group, ignore continue if ( keypath in keymap1 and keypath in keymap2 and chain1[index1] not in chain2[index2].subgroups and chain2[index2] not in chain1[index1].subgroups ): bad_keypath = keypath bad_group1 = chain1[index1] bad_group2 = chain2[index2] if bad_keypath is not None: raise RepositoryError(_( "Conflicting metadata keys between groups '{group1}' and '{group2}' on node '{node}':\n\n" " metadata['{keypath}']\n\n" "You must either connect both groups through subgroups or have them not define " "conflicting metadata keys. Otherwise there is no way for BundleWrap to determine " "which group's metadata should win when they are merged." ).format( keypath="']['".join(bad_keypath), group1=bad_group1.name, group2=bad_group2.name, node=node_name, )) class MetadataJSONEncoder(JSONEncoder): def default(self, obj): if isinstance(obj, Fault): return obj.value if isinstance(obj, set): return sorted(obj) if isinstance(obj, bytes): return force_text(obj) else: raise ValueError(_("illegal metadata value type: {value} is {type}").format( type=type(obj), value=repr(obj), )) def metadata_to_json(metadata, sort_keys=True): if not isinstance(metadata, dict): # might be NodeMetadataProxy metadata = dict(metadata) return dumps( metadata, cls=MetadataJSONEncoder, indent=4, sort_keys=sort_keys, ) def hash_metadata(sdict): """ Returns a canonical SHA1 hash to describe this dict. """ return sha1(metadata_to_json(sdict).encode('utf-8')).hexdigest() bundlewrap-4.13.6/bundlewrap/metagen.py000066400000000000000000000466771417502274300201360ustar00rootroot00000000000000from collections import defaultdict, Counter from collections.abc import Mapping from contextlib import suppress from os import environ from threading import RLock from traceback import TracebackException from .exceptions import MetadataPersistentKeyError from .metadata import DoNotRunAgain from .node import _flatten_group_hierarchy from .utils import list_starts_with, randomize_order, NO_DEFAULT from .utils.dicts import extra_paths_in_dict from .utils.ui import io, QUIT_EVENT from .utils.metastack import Metastack from .utils.text import bold, mark_for_translation as _, red MAX_METADATA_ITERATIONS = int(environ.get("BW_MAX_METADATA_ITERATIONS", "1000")) class ReactorTree: def __init__(self, path_location=None): self._path_location = path_location self._children = {} self._reactors = set() def add(self, reactor, path): if path: self._children.setdefault( path[0], ReactorTree(path_location=path[0]), ).add(reactor, path[1:]) else: self._reactors.add(reactor) def reactors_for(self, path=None): yield from self._reactors if path: try: child = self._children[path[0]] except KeyError: pass else: yield from child.reactors_for(path[1:]) else: # yield entire subtree for child in self._children.values(): yield from child.reactors_for() class PathSet: """ Collects metadata paths and stores only the highest levels ones. >>> s = PathSet() >>> s.add(("foo", "bar")) >>> s.add(("foo",)) >>> s {"foo"} """ def __init__(self, paths=()): self._covers_cache = {} self._paths = set() for path in paths: self.add(path) def __iter__(self): for path in self._paths: yield path def __len__(self): return len(self._paths) def __repr__(self): return "".format(repr(self._paths)) def add(self, new_path): if self.covers(new_path): return False for existing_path in self._paths.copy(): if list_starts_with(existing_path, new_path): self._paths.remove(existing_path) self._covers_cache = {} self._paths.add(new_path) return True def covers(self, candidate_path): """ Returns True if the given path is already included. """ try: return self._covers_cache[candidate_path] except KeyError: result = False for existing_path in self._paths: if list_starts_with(candidate_path, existing_path): result = True break self._covers_cache[candidate_path] = result return result class NodeMetadataProxy(Mapping): def __init__(self, metagen, node): self._metagen = metagen self._node = node self._completed_paths = PathSet() self._metastack = Metastack() def __contains__(self, key): try: self.get(key, _backwards_compatibility_default=False) except KeyError: return False else: return True def __getitem__(self, key): return self.get((key,), _backwards_compatibility_default=False) def __iter__(self): for key, value in self.get(tuple()).items(): yield key, value def __len__(self): return len(self.keys()) @property def blame(self): if self._metagen._in_a_reactor: raise RuntimeError("cannot call node.metadata.blame from a reactor") else: return self._metastack.as_blame() @property def stack(self): if self._metagen._in_a_reactor: raise RuntimeError("cannot call node.metadata.stack from a reactor") else: return self._metastack def get(self, path, default=NO_DEFAULT, _backwards_compatibility_default=True): if ( default == NO_DEFAULT and _backwards_compatibility_default and not self._metagen._in_a_reactor and "/" not in path ): # make node.metadata.get('foo') work as if it was still a dict # TODO remove in 5.0 default = None if not isinstance(path, (tuple, list)): path = tuple(path.split("/")) if self._metagen._in_a_reactor and self._metagen._record_reactor_call_graph: for provided_path in self._metagen._current_reactor_provides: self._metagen._reactor_call_graph.add(( (self._metagen._current_reactor[0], provided_path), (self._node.name, path), self._metagen._current_reactor, )) with self._metagen._node_metadata_lock: # The lock is required because there are several thread-unsafe things going on here: # # self._metagen._current_reactor_newly_requested_paths # self._metagen._build_node_metadata # self._metastack # # It needs to be an RLock because this method will be recursively # called from _build_node_metadata (when reactors call node.metadata.get()). if self._node not in self._metagen._relevant_nodes: self._metagen._initialize_node(self._node) if self._metagen._in_a_reactor: if self._metagen._reactors[self._metagen._current_reactor]['requested_paths'].add( (self._node.name,) + path ): self._metagen._current_reactor_newly_requested_paths.add( (self._node.name,) + path ) elif not self._completed_paths.covers(path): io.debug(f"metagen triggered by request for {path} on {self._node.name}") self._metagen._trigger_reactors_for_path( (self._node.name,) + path, f"initial request for {path}", ) with io.job(_("building metadata...")): self._metagen._build_node_metadata(self._node) self._completed_paths.add(path) try: return self._metastack.get(path) except KeyError as exc: if default != NO_DEFAULT: return default else: if self._metagen._in_a_reactor: self._metagen._reactors_with_keyerrors[self._metagen._current_reactor] = \ ((self._node.name, path), exc) raise exc def items(self): return self.get(tuple()).items() def keys(self): return self.get(tuple()).keys() def values(self): return self.get(tuple()).values() class MetadataGenerator: def __init__(self): # node.metadata calls these self._node_metadata_proxies = {} # metadata access is multi-threaded, but generation can't be self._node_metadata_lock = RLock() # guard against infinite loops self.__iterations = 0 # all nodes involved with currently requested metadata self._relevant_nodes = set() # keep track of reactors and their dependencies self._reactors = {} # which reactors are currently triggered (and by what) self._reactors_triggered = defaultdict(set) # which reactors raised a KeyError (and for what) self._reactors_with_keyerrors = {} # maps provided paths to their reactors self._provides_tree = ReactorTree() # how often each reactor changed self._reactor_changes = defaultdict(int) # bw plot reactors self._reactor_call_graph = set() self._reactor_runs = defaultdict(int) # are we currently executing a reactor? self._in_a_reactor = False # all new paths not requested before by the current reactor self._current_reactor_newly_requested_paths = set() # should reactor return values be checked against their declared keys? self._verify_reactor_provides = False # should we collect information for `bw plot reactors`? self._record_reactor_call_graph = False def _metadata_proxy_for_node(self, node_name): if node_name not in self._node_metadata_proxies: self._node_metadata_proxies[node_name] = \ NodeMetadataProxy(self, self.get_node(node_name)) return self._node_metadata_proxies[node_name] def _build_node_metadata(self, initial_node_name): self.__iterations = 0 while True: self.__check_iteration_count() io.debug("starting reactor run") reactors_run, only_keyerrors = self.__run_reactors() if not reactors_run: io.debug("reactor run completed, no reactors ran") # TODO maybe proxy._metastack.cache_partition(1) for COMPLETE nodes break elif only_keyerrors: if set(self._reactors_triggered.keys()).difference(reactors_run): io.debug("all reactors raised KeyErrors, but new ones were triggered") else: io.debug("reactor run completed, all threw KeyErrors") break io.debug("reactor run completed, rerunning relevant reactors") if self._reactors_with_keyerrors: msg = _( "These metadata reactors raised a KeyError " "even after all other reactors were done:" ) for source, path_exc in sorted(self._reactors_with_keyerrors.items()): node_name, reactor = source path, exc = path_exc msg += f"\n\n {node_name} {reactor}\n accessing {path}\n\n" for line in TracebackException.from_exception(exc).format(): msg += " " + line raise MetadataPersistentKeyError(msg) io.debug("metadata generation finished") def _initialize_node(self, node): io.debug(f"initializing metadata for {node.name}") with io.job(_("{} assembling static metadata").format(bold(node.name))): # randomize order to increase chance of exposing clashing defaults for defaults_name, defaults in randomize_order(node.metadata_defaults): node.metadata._metastack.set_layer( 2, defaults_name, defaults, ) node.metadata._metastack.cache_partition(2) group_order = _flatten_group_hierarchy(node.groups) for group_name in group_order: node.metadata._metastack.set_layer( 0, "group:{}".format(group_name), self.get_group(group_name)._attributes.get('metadata', {}), ) node.metadata._metastack.set_layer( 0, "node:{}".format(node.name), node._attributes.get('metadata', {}), ) node.metadata._metastack.cache_partition(0) with io.job(_("{} preparing metadata reactors").format(bold(node.name))): io.debug(f"adding {len(list(node.metadata_reactors))} reactors for {node.name}") for reactor_name, reactor in randomize_order(node.metadata_reactors): # randomizing insertion order increases the chance of # exposing weird reactors that depend on execution order self._reactors[(node.name, reactor_name)] = { 'raised_donotrunagain': False, 'reactor': reactor, 'requested_paths': PathSet(), 'trigger_on_change': set(), } for path in getattr(reactor, '_provides', ((),)): self._provides_tree.add( (node.name, reactor_name), (node.name,) + path, ) self._relevant_nodes.add(node) def _trigger_reactors_for_path(self, path, source): result = set() for reactor in self._provides_tree.reactors_for(path): if self._reactors[reactor]['raised_donotrunagain']: continue if reactor != source: # we don't want to trigger ourselves io.debug(f"{source} triggers {reactor}") self._reactors_triggered[reactor].add(source) result.add(reactor) return result def __check_iteration_count(self): self.__iterations += 1 if self.__iterations > MAX_METADATA_ITERATIONS: top_changers = Counter(self._reactor_changes).most_common(25) msg = _( "MAX_METADATA_ITERATIONS({m}) exceeded, " "likely an infinite loop between flip-flopping metadata reactors.\n" "These are the reactors that changed most often:\n\n" ).format(m=MAX_METADATA_ITERATIONS) for reactor, count in top_changers: msg += f" {count}\t{reactor[0]}\t{reactor[1]}\n" raise RuntimeError(msg) def __reactors_to_run(self): reactors_triggered = self._reactors_triggered self._reactors_triggered = defaultdict(set) reactors_with_keyerrors = self._reactors_with_keyerrors self._reactors_with_keyerrors = {} for reactor_id, triggers in reactors_triggered.items(): yield ( reactor_id, f"running reactor {reactor_id} because " f"it was triggered by: {triggers}", ) for reactor_id, path_exc in reactors_with_keyerrors.items(): yield ( reactor_id, f"running reactor {reactor_id} because " f"it previously raised a KeyError for: {path_exc[0]}" ) def __run_reactors(self): reactors_run = set() only_keyerrors = True for reactor_id, debug_msg in self.__reactors_to_run(): if QUIT_EVENT.is_set(): # It's important that we don't just `break` here and # end up returning incomplete metadata. raise KeyboardInterrupt reactors_run.add(reactor_id) node_name, reactor_name = reactor_id io.debug(debug_msg) with io.job(_("building metadata ({} nodes, {} reactors, {} iterations)...").format( len(self._relevant_nodes), len(self._reactors), self.__iterations, )): self.__run_reactor( self.get_node(node_name), reactor_name, self._reactors[reactor_id]['reactor'], ) if (node_name, reactor_name) not in self._reactors_with_keyerrors: only_keyerrors = False return reactors_run, only_keyerrors def __run_reactor(self, node, reactor_name, reactor): # make sure the reactor doesn't react to its own output old_metadata = node.metadata._metastack.pop_layer(1, reactor_name) self._in_a_reactor = True self._current_reactor = (node.name, reactor_name) self._current_reactor_provides = getattr(reactor, '_provides', (("/",),)) # used in .get() self._current_reactor_newly_requested_paths = set() self._reactor_runs[self._current_reactor] += 1 try: new_metadata = reactor(node.metadata) except KeyError as exc: if self._current_reactor not in self._reactors_with_keyerrors: # Uncomment this in 5.0 and remove the rest of this block # # this is a KeyError that didn't result from metadata.get() # io.stderr(_( # "{x} KeyError while executing metadata reactor " # "{metaproc} for node {node}:" # ).format( # x=red("!!!"), # metaproc=reactor_name, # node=node.name, # )) # raise exc self._reactors_with_keyerrors[self._current_reactor] = ( ('UNKNOWN', ('UNKNOWN',)), exc, ) io.debug( f"{self._current_reactor} raised KeyError: " f"{self._reactors_with_keyerrors[self._current_reactor]}" ) return False except DoNotRunAgain: self._reactors[self._current_reactor]['raised_donotrunagain'] = True # clear any previously stored exception with suppress(KeyError): del self._reactors_with_keyerrors[self._current_reactor] self._current_reactor_newly_requested_paths.clear() io.debug(f"{self._current_reactor} raised DoNotRunAgain") return False except Exception as exc: io.stderr(_( "{x} Exception while executing metadata reactor " "{metaproc} for node {node}:" ).format( x=red("!!!"), metaproc=reactor_name, node=node.name, )) raise exc finally: self._in_a_reactor = False with suppress(KeyError): del self._reactors_triggered[self._current_reactor] for path in self._current_reactor_newly_requested_paths: for needed_reactor in self._trigger_reactors_for_path(path, self._current_reactor): self._reactors[needed_reactor]['trigger_on_change'].add(self._current_reactor) # reactor terminated normally, clear any previously stored exception with suppress(KeyError): del self._reactors_with_keyerrors[self._current_reactor] if self._verify_reactor_provides and getattr(reactor, '_provides', None): extra_paths = extra_paths_in_dict(new_metadata, reactor._provides) if extra_paths: raise ValueError(_( "{reactor_name} on {node_name} returned the following key paths, " "but didn't declare them with @metadata_reactor.provides():\n" "{paths}" ).format( node_name=node.name, reactor_name=reactor_name, paths="\n".join(["/".join(path) for path in sorted(extra_paths)]), )) try: node.metadata._metastack.set_layer( 1, reactor_name, new_metadata, ) except TypeError as exc: # TODO catch validation errors better io.stderr(_( "{x} Exception after executing metadata reactor " "{metaproc} for node {node}:" ).format( x=red("!!!"), metaproc=reactor_name, node=node.name, )) raise exc if old_metadata != new_metadata: io.debug(f"{self._current_reactor} returned changed result") self._reactor_changes[self._current_reactor] += 1 for triggered_reactor in self._reactors[self._current_reactor]['trigger_on_change']: io.debug(f"rerun of {triggered_reactor} triggered by {self._current_reactor}") self._reactors_triggered[triggered_reactor].add(self._current_reactor) else: io.debug(f"{self._current_reactor} returned same result") bundlewrap-4.13.6/bundlewrap/node.py000066400000000000000000001074311417502274300174250ustar00rootroot00000000000000from contextlib import suppress from datetime import datetime, timedelta from hashlib import md5 from os import environ, mkdir from os.path import exists, join from threading import Lock from tomlkit import dumps as toml_dump, parse as toml_parse from . import operations from .bundle import Bundle from .concurrency import WorkerPool from .deps import find_item, ItemDependencyLoop from .exceptions import ( BundleError, GracefulApplyException, ItemSkipped, NodeLockedException, NoSuchBundle, RemoteException, RepositoryError, SkipNode, ) from .group import GROUP_ATTR_DEFAULTS, GROUP_ATTR_TYPES from .itemqueue import ItemQueue from .items import Item from .lock import NodeLock from .metadata import hash_metadata from .utils import cached_property, error_context, get_file_contents, names from .utils.dicts import ( dict_to_text, dict_to_toml, diff_dict, hash_statedict, set_key_at_path, validate_dict, COLLECTION_OF_STRINGS, ) from .utils.text import ( blue, bold, cyan, force_text, format_duration, green, mark_for_translation as _, prefix_lines, red, toml_clean, validate_name, yellow, ) from .utils.ui import io NODE_ATTR_TYPES = GROUP_ATTR_TYPES.copy() NODE_ATTR_TYPES['groups'] = COLLECTION_OF_STRINGS NODE_ATTR_TYPES['hostname'] = str class ApplyResult: """ Holds information about an apply run for a node. """ def __init__(self, node, item_results): self.node_name = node.name self.correct = 0 self.fixed = 0 self.skipped = 0 self.failed = 0 self.total = 0 for item_id, result, duration in item_results: self.total += 1 if result == Item.STATUS_ACTION_SUCCEEDED: self.correct += 1 elif result == Item.STATUS_OK: self.correct += 1 elif result == Item.STATUS_FIXED: self.fixed += 1 elif result == Item.STATUS_SKIPPED: self.skipped += 1 elif result == Item.STATUS_FAILED: self.failed += 1 else: raise RuntimeError(_( "can't make sense of results for {} on {}: {}" ).format(item_id, self.node_name, result)) self.start = None self.end = None def __lt__(self, other): return self.node_name < other.node_name @property def duration(self): return self.end - self.start def format_node_result(result): output = [] output.append(("{count} OK").format(count=result.correct)) if result.fixed: output.append(green(_("{count} fixed").format(count=result.fixed))) else: output.append(_("{count} fixed").format(count=result.fixed)) if result.skipped: output.append(yellow(_("{count} skipped").format(count=result.skipped))) else: output.append(_("{count} skipped").format(count=result.skipped)) if result.failed: output.append(red(_("{count} failed").format(count=result.failed))) else: output.append(_("{count} failed").format(count=result.failed)) return ", ".join(output) def handle_apply_result( node, item, status_code, interactive=False, details=None, show_diff=True, created=None, deleted=None, ): if status_code == Item.STATUS_SKIPPED and details in ( Item.SKIP_REASON_NO_TRIGGER, Item.SKIP_REASON_UNLESS, ): # skipped for "unless" or "not triggered", don't output those return formatted_result = format_item_result( status_code, node.name, item.bundle.name, item.id, interactive=interactive, details=details, show_diff=show_diff, created=created, deleted=deleted, ) if formatted_result is not None: if status_code == Item.STATUS_FAILED: io.stderr(formatted_result) if item._command_results: io.stderr(format_item_command_results(item._command_results)) # free up memory del item._command_results else: io.stdout(formatted_result) def apply_items( node, autoskip_selector="", autoonly_selector="", my_soft_locks=(), other_peoples_soft_locks=(), workers=1, interactive=False, show_diff=True, ): item_queue = ItemQueue(node) # the item queue might contain new generated items (canned actions) # adjust progress total accordingly extra_items = len(item_queue.all_items) - len(node.items) io.progress_increase_total(increment=extra_items) results = [] def tasks_available(): return bool(item_queue.items_without_deps) def next_task(): item = item_queue.pop() return { 'task_id': "{}:{}".format(node.name, item.id), 'target': item.apply, 'kwargs': { 'autoskip_selector': autoskip_selector, 'autoonly_selector': autoonly_selector, 'my_soft_locks': my_soft_locks, 'other_peoples_soft_locks': other_peoples_soft_locks, 'interactive': interactive, 'show_diff': show_diff, }, } def handle_result(task_id, return_value, duration): item_id = task_id.split(":", 1)[1] item = find_item(item_id, item_queue.pending_items) status_code, details, created, deleted = return_value if status_code == Item.STATUS_FAILED: for skipped_item in item_queue.item_failed(item): handle_apply_result( node, skipped_item, Item.STATUS_SKIPPED, interactive=interactive, details=Item.SKIP_REASON_DEP_FAILED, ) results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0))) elif status_code in (Item.STATUS_FIXED, Item.STATUS_ACTION_SUCCEEDED): item_queue.item_fixed(item) elif status_code == Item.STATUS_OK: item_queue.item_ok(item) elif status_code == Item.STATUS_SKIPPED: for skipped_item in item_queue.item_skipped(item): skip_reason = Item.SKIP_REASON_DEP_SKIPPED for lock in other_peoples_soft_locks: for selector in lock['items']: if skipped_item.covered_by_autoskip_selector(selector): skip_reason = Item.SKIP_REASON_SOFTLOCK break handle_apply_result( node, skipped_item, Item.STATUS_SKIPPED, interactive=interactive, details=skip_reason, ) results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0))) else: raise AssertionError(_( "unknown item status returned for {item}: {status}".format( item=item.id, status=repr(status_code), ), )) handle_apply_result( node, item, status_code, interactive=interactive, details=details, show_diff=show_diff, created=created, deleted=deleted, ) io.progress_advance() results.append((item.id, status_code, duration)) def handle_exception(task_id, exc, traceback): item_id = task_id.split(":", 1)[1] item = find_item(item_id, item_queue.pending_items) for skipped_item in item_queue.item_failed(item): handle_apply_result( node, skipped_item, Item.STATUS_SKIPPED, interactive=interactive, details=Item.SKIP_REASON_DEP_FAILED, ) results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0))) handle_apply_result( node, item, Item.STATUS_FAILED, interactive=interactive, details=None, show_diff=False, ) results.append((item.id, Item.STATUS_FAILED, timedelta(0))) output = prefix_lines(traceback, red("│ ")) output += prefix_lines(repr(exc), red("│ ")) output += red("╵") io.stderr(output) io.progress_advance() worker_pool = WorkerPool( tasks_available, next_task, handle_result=handle_result, handle_exception=handle_exception, pool_id="apply_{}".format(node.name), workers=workers, ) worker_pool.run() # we have no items without deps left and none are processing # there must be a loop if item_queue.items_with_deps: raise ItemDependencyLoop(item_queue.items_with_deps) return results def _flatten_group_hierarchy(groups): """ Takes a list of groups and returns a list of group names ordered so that parent groups will appear before any of their subgroups. """ # dict mapping groups to subgroups child_groups = {} for group in groups: child_groups[group.name] = list(names(group.subgroups)) # dict mapping groups to parent groups parent_groups = {} for child_group in child_groups: parent_groups[child_group] = [] for parent_group, subgroups in child_groups.items(): if child_group in subgroups: parent_groups[child_group].append(parent_group) order = [] while True: top_level_group = None for group, parents in parent_groups.items(): if parents: continue else: top_level_group = group break if not top_level_group: if parent_groups: raise RuntimeError( _("encountered subgroup loop that should have been detected") ) else: break order.append(top_level_group) del parent_groups[top_level_group] for group in parent_groups: if top_level_group in parent_groups[group]: parent_groups[group].remove(top_level_group) return order def format_item_command_results(results): output = "" for result in results: stdout = result['result'].stdout_text.strip() stderr = result['result'].stderr_text.strip() # show command output += "{b}".format(b=red('│')) output += "\n{b} {command} (return code: {code}{no_output})\n".format( b=red('├─'), command=bold(result['command']), code=bold(result['result'].return_code), no_output='' if stdout or stderr else '; no output' ) # show output lines = "" if stdout or stderr: lines += "\n" if stdout: lines += stdout.strip() if stderr: lines += stderr.strip() output += prefix_lines(lines, red("│ ")) output += red("╵") return output.lstrip('\n') def format_item_result( result, node, bundle, item, interactive=False, details=None, show_diff=True, created=None, deleted=None, ): if created or deleted or details is None: details_text = "" elif result == Item.STATUS_SKIPPED: details_text = "({})".format(Item.SKIP_REASON_DESC[details]) else: details_text = "({})".format(", ".join(sorted(details[2]))) if result == Item.STATUS_FAILED: if created: status = red(_("failed to create")) elif deleted: status = red(_("failed to delete")) else: status = red(_("failed")) if show_diff and not created and not deleted: output = "{x} {node} {bundle} {item} {status}\n".format( bundle=bold(bundle), item=item, node=bold(node), status=status, x=bold(red("✘")), ) diff = diff_dict(details[1], details[0], skip_missing_in_target=True) output += prefix_lines(diff, f"{red('│')} ") output += red("╵") return output else: return "{x} {node} {bundle} {item} {status} {details}".format( bundle=bold(bundle), details=details_text, item=item, node=bold(node), status=status, x=bold(red("✘")), ) elif result == Item.STATUS_ACTION_SUCCEEDED: return "{x} {node} {bundle} {item} {status}".format( bundle=bold(bundle), item=item, node=bold(node), status=green(_("succeeded")), x=bold(green("✓")), ) elif result == Item.STATUS_SKIPPED: return "{x} {node} {bundle} {item} {status} {details}".format( bundle=bold(bundle), details=details_text, item=item, node=bold(node), x=bold(yellow("»")), status=yellow(_("skipped")), ) elif result == Item.STATUS_FIXED: if created: status = green(_("created")) elif deleted: status = green(_("deleted")) else: status = green(_("fixed")) if not interactive and show_diff: output = "{x} {node} {bundle} {item} {status}\n".format( bundle=bold(bundle), item=item, node=bold(node), x=bold(green("✓")), status=status, ) diff = "\n" if created: diff += dict_to_text(details, value_color=green) elif deleted: diff += dict_to_text(details, value_color=red) else: diff += diff_dict(details[1], details[0], skip_missing_in_target=True) output += prefix_lines(diff, f"{green('│')} ") output += green("╵") return output else: return "{x} {node} {bundle} {item} {status}".format( bundle=bold(bundle), item=item, node=bold(node), x=bold(green("✓")), status=status, ) class Node: OS_FAMILY_BSD = ( 'freebsd', 'macos', 'netbsd', 'openbsd', ) OS_FAMILY_DEBIAN = ( 'debian', 'ubuntu', 'raspbian', ) OS_FAMILY_REDHAT = ( 'rhel', 'centos', 'fedora', 'oraclelinux', ) OS_FAMILY_LINUX = ( 'alpine', 'amazonlinux', 'arch', 'opensuse', 'openwrt', 'gentoo', 'linux', ) + \ OS_FAMILY_DEBIAN + \ OS_FAMILY_REDHAT OS_FAMILY_UNIX = OS_FAMILY_BSD + OS_FAMILY_LINUX OS_KNOWN = OS_FAMILY_UNIX + ('kubernetes', 'routeros') def __init__(self, name, attributes=None): if attributes is None: attributes = {} if not validate_name(name): raise RepositoryError(_("'{}' is not a valid node name").format(name)) with error_context(node_name=name): validate_dict(attributes, NODE_ATTR_TYPES) self._add_host_keys = environ.get('BW_ADD_HOST_KEYS', False) == "1" self._attributes = attributes self._ssh_conn_established = False self._ssh_first_conn_lock = Lock() self.file_path = attributes.get('file_path') self.hostname = attributes.get('hostname', name) self.name = name for attr in GROUP_ATTR_DEFAULTS: setattr(self, "_{}".format(attr), attributes.get(attr)) def __lt__(self, other): return self.name < other.name def __repr__(self): return "".format(self.name) @cached_property def bundles(self): with io.job(_("{node} loading bundles").format(node=bold(self.name))): bundle_names = set(self._attributes.get('bundles', set())) for group in self.groups: for bundle_name in set(group._attributes.get('bundles', set())): bundle_names.add(bundle_name) for bundle_name in bundle_names: try: yield Bundle(self, bundle_name) except NoSuchBundle: raise NoSuchBundle(_( "Node '{node}' wants bundle '{bundle}', but it doesn't exist." ).format( bundle=bundle_name, node=self.name, )) @cached_property def cdict(self): node_dict = {} for item in self.items: with suppress(AttributeError): # actions have no cdict node_dict[item.id] = item.hash() return node_dict def covered_by_autoskip_selector(self, autoskip_selector): """ True if this node should be skipped based on the given selector string (e.g. "node:foo,group:bar"). """ components = [c.strip() for c in autoskip_selector] if "node:{}".format(self.name) in components: return True for group in self.groups: if "group:{}".format(group.name) in components: return True return False def group_membership_hash(self): return hash_statedict(sorted(names(self.groups))) @property def immediate_groups(self): for group in self.repo.groups: if group.name in self._attributes.get('groups', set()): yield self.repo.get_group(group.name) elif self in group._nodes_from_members: yield self.repo.get_group(group.name) else: for pattern in group._member_patterns: if pattern.search(self.name) is not None: yield self.repo.get_group(group.name) @cached_property @io.job_wrapper(_("{} determining groups").format(bold("{0.name}"))) def groups(self): result = set() for group in self.immediate_groups: result.add(group) for parent_group in group.parent_groups: # these are not just the *immediate* parent groups result.add(parent_group) return result def has_any_bundle(self, bundle_list): for bundle_name in bundle_list: if self.has_bundle(bundle_name): return True return False def has_bundle(self, bundle_name): for bundle in self.bundles: if bundle.name == bundle_name: return True return False def hash(self): return hash_statedict(self.cdict) def in_any_group(self, group_list): for group_name in group_list: if self.in_group(group_name): return True return False def in_group(self, group_name): for group in self.groups: if group.name == group_name: return True return False @cached_property def items(self): items = {} if not self.dummy: for bundle in self.bundles: for item in bundle.items: if item.id in items: raise BundleError(_( "duplicate definition of {item} in bundles '{bundle1}' and '{bundle2}'" ).format( item=item.id, bundle1=item.bundle.name, bundle2=items[item.id].bundle.name, )) else: items[item.id] = item return set(items.values()) @cached_property def magic_number(self): return int(md5(self.name.encode('UTF-8')).hexdigest(), 16) def apply( self, autoskip_selector=(), autoonly_selector=(), interactive=False, force=False, show_diff=True, skip_list=(), workers=4, ): if not list(self.items): io.stdout(_("{x} {node} has no items").format( node=bold(self.name), x=yellow("»"), )) return None if self.covered_by_autoskip_selector(autoskip_selector): io.stdout(_("{x} {node} skipped by --skip").format( node=bold(self.name), x=yellow("»"), )) return None if self.name in skip_list: io.stdout(_("{x} {node} skipped by --resume-file").format( node=bold(self.name), x=yellow("»"), )) return None try: self.repo.hooks.node_apply_start( self.repo, self, interactive=interactive, ) except SkipNode as exc: io.stdout(_("{x} {node} skipped by hook ({reason})").format( node=bold(self.name), reason=str(exc) or _("no reason given"), x=yellow("»"), )) return None start = datetime.now() io.stdout(_("{x} {node} {started} at {time}").format( node=bold(self.name), started=bold(_("started")), time=start.strftime("%Y-%m-%d %H:%M:%S"), x=blue("i"), )) error = False try: # Running "true" is meant to catch connection errors early, # but this only works on UNIX-y systems (i.e., not k8s). if self.os in self.OS_FAMILY_UNIX: self.run("true") except RemoteException as exc: io.stdout(_("{x} {node} Connection error: {msg}").format( msg=exc, node=bold(self.name), x=red("!"), )) error = _("Connection error (details above)") item_results = [] else: try: with NodeLock(self, interactive=interactive, ignore=force) as lock: item_results = apply_items( self, autoskip_selector=autoskip_selector, autoonly_selector=autoonly_selector, my_soft_locks=lock.my_soft_locks, other_peoples_soft_locks=lock.other_peoples_soft_locks, workers=workers, interactive=interactive, show_diff=show_diff, ) except NodeLockedException as e: if not interactive: io.stderr(_( "{x} {node} already locked by {user} at {date} ({duration} ago, " "`bw apply -f` to override)" ).format( date=bold(e.args[0]['date']), duration=e.args[0]['duration'], node=bold(self.name), user=bold(e.args[0]['user']), x=red("!"), )) error = _("Node locked (details above)") item_results = [] result = ApplyResult(self, item_results) result.start = start result.end = datetime.now() io.stdout(_("{x} {node} {completed} after {time} ({stats})").format( completed=bold(_("completed")), node=bold(self.name), stats=format_node_result(result), time=format_duration(result.end - start), x=blue("i"), )) self.repo.hooks.node_apply_end( self.repo, self, duration=result.duration, interactive=interactive, result=result, ) if error: raise GracefulApplyException(error) else: return result def download(self, remote_path, local_path): return operations.download( self.hostname, remote_path, local_path, add_host_keys=self._add_host_keys, username=self.username, wrapper_inner=self.cmd_wrapper_inner, wrapper_outer=self.cmd_wrapper_outer, ) def get_item(self, item_id): return find_item(item_id, self.items) @property def metadata(self): return self.repo._metadata_proxy_for_node(self.name) def metadata_get(self, *args, **kwargs): # TODO remove in 5.0 return self.metadata.get(*args, _backwards_compatibility_default=False, **kwargs) def metadata_hash(self): return hash_metadata(self.metadata) @property def metadata_defaults(self): for bundle in self.bundles: if bundle._metadata_defaults_and_reactors[0]: yield ( "metadata_defaults:{}".format(bundle.name), bundle._metadata_defaults_and_reactors[0], ) @property def metadata_reactors(self): for bundle in self.bundles: for reactor in bundle._metadata_defaults_and_reactors[1]: yield ( "metadata_reactor:{}.{}".format( bundle.name, reactor.__name__, ), reactor, ) @property def partial_metadata(self): """ Deprecated, remove in 5.0.0 """ return self.metadata def run(self, command, data_stdin=None, may_fail=False, log_output=False, user="root"): assert self.os in self.OS_FAMILY_UNIX if log_output: def log_function(msg): io.stdout("{x} {node} {msg}".format( node=bold(self.name), msg=force_text(msg).rstrip("\n"), x=cyan("›"), )) else: log_function = None if not self._ssh_conn_established: # Sometimes we're opening SSH connections to a node too fast # for OpenSSH to establish the ControlMaster socket for the # second and following connections to use. # To prevent this, we just wait until a first dummy command # has completed on the node before trying to reuse the # multiplexed connection. if self._ssh_first_conn_lock.acquire(False): try: with io.job(_("{} establishing connection...").format(bold(self.name))): operations.run(self.hostname, "true", add_host_keys=self._add_host_keys) self._ssh_conn_established = True finally: self._ssh_first_conn_lock.release() else: # we didn't get the lock immediately, now we just wait # until it is released before we proceed with self._ssh_first_conn_lock: pass return operations.run( self.hostname, command, add_host_keys=self._add_host_keys, data_stdin=data_stdin, ignore_failure=may_fail, log_function=log_function, username=self.username, wrapper_inner=self.cmd_wrapper_inner, wrapper_outer=self.cmd_wrapper_outer, user=user, ) @cached_property def toml(self): if not self.file_path or not self.file_path.endswith(".toml"): raise ValueError(_("node {} not in TOML format").format(self.name)) return toml_parse(get_file_contents(self.file_path)) def toml_save(self): try: toml_doc = self.toml except ValueError: attributes = self._attributes.copy() del attributes['file_path'] toml_doc = dict_to_toml(attributes) self.file_path = join(self.repo.path, "nodes", self.name + ".toml") if not exists(join(self.repo.path, "nodes")): mkdir(join(self.repo.path, "nodes")) with open(self.file_path, 'w') as f: f.write(toml_clean(toml_dump(toml_doc))) def toml_set(self, path, value): if not isinstance(path, tuple): path = path.split("/") set_key_at_path(self.toml, path, value) def upload(self, local_path, remote_path, mode=None, owner="", group="", may_fail=False): assert self.os in self.OS_FAMILY_UNIX return operations.upload( self.hostname, local_path, remote_path, add_host_keys=self._add_host_keys, group=group, mode=mode, owner=owner, ignore_failure=may_fail, username=self.username, wrapper_inner=self.cmd_wrapper_inner, wrapper_outer=self.cmd_wrapper_outer, ) def verify( self, autoskip_selector=(), autoonly_selector=(), show_all=False, show_diff=True, workers=4, ): result = [] start = datetime.now() if not self.items: io.stdout(_("{x} {node} has no items").format(node=bold(self.name), x=yellow("!"))) else: result = verify_items( self, autoskip_selector=autoskip_selector, autoonly_selector=autoonly_selector, show_all=show_all, show_diff=show_diff, workers=workers, ) return { 'good': result.count(True), 'bad': result.count(False), 'unknown': result.count(None), 'duration': datetime.now() - start, } def build_attr_property(attr, default): def method(self): attr_source = None attr_value = None group_order = [ self.repo.get_group(group_name) for group_name in _flatten_group_hierarchy(self.groups) ] for group in group_order: if getattr(group, attr) is not None: attr_source = "group:{}".format(group.name) attr_value = getattr(group, attr) if getattr(self, "_{}".format(attr)) is not None: attr_source = "node" attr_value = getattr(self, "_{}".format(attr)) if attr_value is None: attr_source = "default" attr_value = default io.debug(_("node {node} gets its {attr} attribute from: {source}").format( node=self.name, attr=attr, source=attr_source, )) return attr_value method.__name__ = "_group_attr_{}".format(attr) # required for cached_property return cached_property(method) for attr, default in GROUP_ATTR_DEFAULTS.items(): setattr(Node, attr, build_attr_property(attr, default)) def verify_items( node, autoskip_selector=(), autoonly_selector=(), show_all=False, show_diff=True, workers=1, ): items = [] for item in node.items: if not item.triggered: items.append(item) else: io.progress_advance() try: # See comment in node.apply(). if node.os in node.OS_FAMILY_UNIX: node.run("true") except RemoteException as exc: io.stdout(_("{x} {node} Connection error: {msg}").format( msg=exc, node=bold(node.name), x=red("!"), )) for item in items: io.progress_advance() return [None for item in items] def tasks_available(): return bool(items) def next_task(): while True: try: item = items.pop() except IndexError: return None if item._faults_missing_for_attributes: if item.error_on_missing_fault: item._raise_for_faults() else: io.progress_advance() io.stdout(_("{x} {node} {bundle} {item} ({msg})").format( bundle=bold(item.bundle.name), item=item.id, msg=yellow(_("Fault unavailable")), node=bold(node.name), x=yellow("»"), )) else: return { 'task_id': node.name + ":" + item.bundle.name + ":" + item.id, 'target': item.verify, 'kwargs': { 'autoskip_selector': autoskip_selector, 'autoonly_selector': autoonly_selector, }, } def handle_exception(task_id, exception, traceback): node_name, bundle_name, item_id = task_id.split(":", 2) io.progress_advance() if isinstance(exception, (ItemSkipped, NotImplementedError)): pass else: # Unlike with `bw apply`, it is OK for `bw verify` to encounter # exceptions when getting an item's status. `bw verify` doesn't # care about dependencies and therefore cannot know that looking # up a database user requires the database to be installed in # the first place. item = node.get_item(task_id.split(":", 2)[2]) output = _("{x} {node} {bundle} {item} (unable to get status)\n").format( bundle=bold(bundle_name), item=item_id, node=bold(node_name), x=bold(cyan("?")), ) output += prefix_lines(traceback, cyan("│ ")) output += prefix_lines(repr(exception), cyan("│ ")) if item._command_results: output += format_item_command_results(item._command_results) # free up memory del item._command_results io.stderr(output) return None # count this result as "unknown" def handle_result(task_id, return_value, duration): io.progress_advance() unless_result, item_status, display = return_value node_name, bundle_name, item_id = task_id.split(":", 2) if not unless_result and not item_status.correct: if item_status.must_be_created: details_text = red(_("missing")) elif item_status.must_be_deleted: details_text = red(_("found")) elif show_diff: details_text = "" else: details_text = ", ".join(sorted(display[2])) if show_diff: diff = "\n" if item_status.must_be_created: diff += dict_to_text(display, value_color=green) elif item_status.must_be_deleted: diff += dict_to_text(display, value_color=red) else: diff += diff_dict(display[1], display[0], skip_missing_in_target=True) output = "{x} {node} {bundle} {item} {details}\n".format( bundle=bold(bundle_name), details=details_text, item=item_id, node=bold(node_name), x=red("✘"), ) output += prefix_lines(diff, red('│ ')) io.stderr(output + red("╵")) else: io.stderr("{x} {node} {bundle} {item} {details}".format( bundle=bold(bundle_name), details=details_text, item=item_id, node=bold(node_name), x=red("✘"), )) return False else: if show_all: io.stdout("{x} {node} {bundle} {item}".format( bundle=bold(bundle_name), item=item_id, node=bold(node_name), x=green("✓"), )) return True worker_pool = WorkerPool( tasks_available, next_task, handle_result, handle_exception=handle_exception, pool_id="verify_{}".format(node.name), workers=workers, ) return worker_pool.run() bundlewrap-4.13.6/bundlewrap/operations.py000066400000000000000000000243551417502274300206660ustar00rootroot00000000000000from datetime import datetime from shlex import quote from select import select from shlex import split from subprocess import Popen, PIPE from threading import Event, Thread from os import close, environ, pipe, read, setpgrp from .exceptions import RemoteException from .utils import cached_property from .utils.text import force_text, LineBuffer, mark_for_translation as _, randstr from .utils.ui import io def output_thread_body(line_buffer, read_fd, quit_event, read_until_eof): # see run() for details while True: r, w, x = select([read_fd], [], [], 0.1) if r: chunk = read(read_fd, 1024) if chunk: line_buffer.write(chunk) else: # EOF return elif quit_event.is_set() and not read_until_eof: # one last chance to read output after the child process # has died while True: r, w, x = select([read_fd], [], [], 0) if r: line_buffer.write(read(read_fd, 1024)) else: break return def download( hostname, remote_path, local_path, add_host_keys=False, username=None, wrapper_inner="{}", wrapper_outer="{}", ): """ Download a file. """ io.debug(_("downloading {host}:{path} -> {target}").format( host=hostname, path=remote_path, target=local_path)) result = run( hostname, "cat {}".format(quote(remote_path)), # See issue #39. add_host_keys=add_host_keys, username=username, wrapper_inner=wrapper_inner, wrapper_outer=wrapper_outer, ) if result.return_code == 0: with open(local_path, "wb") as f: f.write(result.stdout) else: raise RemoteException(_( "reading file '{path}' on {host} failed: {error}" ).format( error=force_text(result.stderr) + force_text(result.stdout), host=hostname, path=remote_path, )) class RunResult: def __init__(self): self.duration = None self.return_code = None self.stderr = None self.stdout = None @cached_property def stderr_text(self): return force_text(self.stderr) @cached_property def stdout_text(self): return force_text(self.stdout) def run_local( command, data_stdin=None, log_function=None, shell=False, ): """ Runs a command on the local system. """ # LineBuffer objects take care of always printing complete lines # which have been properly terminated by a newline. This is only # relevant when using `bw run`. # Does nothing when log_function is None. stderr_lb = LineBuffer(log_function) stdout_lb = LineBuffer(log_function) # Create pipes which will be used by the SSH child process. We do # not use subprocess.PIPE because we need to be able to continuously # check those pipes for new output, so we can feed it to the # LineBuffers during `bw run`. stdout_fd_r, stdout_fd_w = pipe() stderr_fd_r, stderr_fd_w = pipe() cmd_id = randstr(length=4).upper() io.debug("running command with ID {}: {}".format(cmd_id, " ".join(command))) start = datetime.utcnow() # Launch the child process. It's important that SSH gets a dummy # stdin, i.e. it must *not* read from the terminal. Otherwise, it # can steal user input. child_process = Popen( command, preexec_fn=setpgrp, shell=shell, stdin=PIPE, stderr=stderr_fd_w, stdout=stdout_fd_w, ) io._child_pids.append(child_process.pid) if data_stdin is not None: child_process.stdin.write(data_stdin) quit_event = Event() stdout_thread = Thread( args=(stdout_lb, stdout_fd_r, quit_event, True), target=output_thread_body, ) stderr_thread = Thread( args=(stderr_lb, stderr_fd_r, quit_event, False), target=output_thread_body, ) stdout_thread.start() stderr_thread.start() try: child_process.communicate() finally: # Once we end up here, the child process has terminated. # # Now, the big question is: Why do we need an Event here? # # Problem is, a user could use SSH multiplexing with # auto-forking (e.g., "ControlPersist 10m"). In this case, # OpenSSH forks another process which holds the "master" # connection. This forked process *inherits* our pipes (at least # for stderr). Thus, only when that master process finally # terminates (possibly after many minutes), we will be informed # about EOF on our stderr pipe. That doesn't work. bw will hang. # # So, instead, we use a busy loop in output_thread_body() which # checks for quit_event being set. Unfortunately there is no way # to be absolutely sure that we received all output from stderr # because we never get a proper EOF there. All we can do is hope # that all output has arrived on the reading end of the pipe by # the time the quit_event is checked in the thread. # # Luckily stdout is a somewhat simpler affair: we can just close # the writing end of the pipe, causing the reader thread to # shut down as it sees the EOF. io._child_pids.remove(child_process.pid) quit_event.set() close(stdout_fd_w) stdout_thread.join() stderr_thread.join() stdout_lb.close() stderr_lb.close() for fd in (stdout_fd_r, stderr_fd_r, stderr_fd_w): close(fd) io.debug("command with ID {} finished with return code {}".format( cmd_id, child_process.returncode, )) result = RunResult() result.duration = datetime.utcnow() - start result.stdout = stdout_lb.record.getvalue() result.stderr = stderr_lb.record.getvalue() result.return_code = child_process.returncode return result def run( hostname, command, add_host_keys=False, data_stdin=None, ignore_failure=False, raise_for_return_codes=( 126, # command not executable 127, # command not found 255, # SSH error ), log_function=None, username=None, # SSH auth wrapper_inner="{}", wrapper_outer="{}", user="root", # remote user running the command ): """ Runs a command on a remote system. """ shell_command = wrapper_outer.format(quote(wrapper_inner.format(command)), user) ssh_command = [ "ssh", "-o", "BatchMode=yes", "-o", "KbdInteractiveAuthentication=no", "-o", "PasswordAuthentication=no", "-o", "StrictHostKeyChecking=no" if add_host_keys else "StrictHostKeyChecking=yes", ] if username: ssh_command += ["-l", str(username)] extra_args = environ.get("BW_SSH_ARGS", "").strip() if extra_args: ssh_command.extend(split(extra_args)) ssh_command.append(hostname) ssh_command.append(shell_command) result = run_local( ssh_command, data_stdin=data_stdin, log_function=log_function, ) if result.return_code != 0: error_msg = _( "Non-zero return code ({rcode}) running '{command}' " "on '{host}':\n\n{result}\n\n" ).format( command=command, host=hostname, rcode=result.return_code, result=force_text(result.stdout) + force_text(result.stderr), ) io.debug(error_msg) if not ignore_failure or result.return_code in raise_for_return_codes: raise RemoteException(error_msg) return result def upload( hostname, local_path, remote_path, add_host_keys=False, group="", mode=None, owner="", ignore_failure=False, username=None, wrapper_inner="{}", wrapper_outer="{}", ): """ Upload a file. """ io.debug(_("uploading {path} -> {host}:{target}").format( host=hostname, path=local_path, target=remote_path)) temp_filename = ".bundlewrap_tmp_" + randstr() scp_hostname = hostname if ':' in hostname: scp_hostname = f"[{hostname}]" scp_command = [ "scp", "-o", "BatchMode=yes", "-o", "StrictHostKeyChecking=no" if add_host_keys else "StrictHostKeyChecking=yes", ] extra_args = environ.get("BW_SCP_ARGS", environ.get("BW_SSH_ARGS", "")).strip() if extra_args: scp_command.extend(split(extra_args)) scp_command.append(local_path) if username: scp_command.append(f"{username}@{scp_hostname}:{temp_filename}") else: scp_command.append(f"{scp_hostname}:{temp_filename}") scp_process = run_local(scp_command) if scp_process.return_code != 0: if ignore_failure: return False raise RemoteException(_( "Upload to {host} failed for {failed}:\n\n{result}\n\n" ).format( failed=remote_path, host=hostname, result=force_text(scp_process.stdout) + force_text(scp_process.stderr), )) if owner or group: if group: group = ":" + quote(group) result = run( hostname, "chown {}{} {}".format( quote(owner), group, quote(temp_filename), ), add_host_keys=add_host_keys, ignore_failure=ignore_failure, wrapper_inner=wrapper_inner, wrapper_outer=wrapper_outer, ) if result.return_code != 0: return False if mode: result = run( hostname, "chmod {} {}".format( mode, quote(temp_filename), ), add_host_keys=add_host_keys, ignore_failure=ignore_failure, wrapper_inner=wrapper_inner, wrapper_outer=wrapper_outer, ) if result.return_code != 0: return False result = run( hostname, "mv -f {} {}".format( quote(temp_filename), quote(remote_path), ), add_host_keys=add_host_keys, ignore_failure=ignore_failure, wrapper_inner=wrapper_inner, wrapper_outer=wrapper_outer, ) return result.return_code == 0 bundlewrap-4.13.6/bundlewrap/repo.py000066400000000000000000000425341417502274300174470ustar00rootroot00000000000000from contextlib import suppress from importlib.util import module_from_spec, spec_from_file_location from inspect import isabstract from os import listdir, mkdir, walk from os.path import abspath, dirname, isdir, isfile, join from pkg_resources import DistributionNotFound, require, VersionConflict from tomlkit import parse as toml_parse from . import items, VERSION_STRING from .bundle import FILENAME_ITEMS from .exceptions import ( NoSuchGroup, NoSuchNode, NoSuchRepository, MissingRepoDependency, RepositoryError, ) from .group import Group from .metagen import MetadataGenerator from .node import Node from .secrets import FILENAME_SECRETS, generate_initial_secrets_cfg, SecretProxy from .utils import ( cached_property, error_context, get_file_contents, names, ) from .utils.scm import get_git_branch, get_git_clean, get_rev from .utils.dicts import hash_statedict, untoml from .utils.text import mark_for_translation as _, red, validate_name from .utils.ui import io DIRNAME_BUNDLES = "bundles" DIRNAME_DATA = "data" DIRNAME_HOOKS = "hooks" DIRNAME_ITEM_TYPES = "items" DIRNAME_LIBS = "libs" FILENAME_GROUPS = "groups.py" FILENAME_NODES = "nodes.py" FILENAME_REQUIREMENTS = "requirements.txt" HOOK_EVENTS = ( 'action_run_end', 'action_run_start', 'apply_end', 'apply_start', 'item_apply_end', 'item_apply_start', 'lock_add', 'lock_remove', 'lock_show', 'node_apply_end', 'node_apply_start', 'node_run_end', 'node_run_start', 'run_end', 'run_start', 'test', 'test_node', ) INITIAL_CONTENT = { FILENAME_GROUPS: _(""" groups = { #'group-1': { # 'bundles': ( # 'bundle-1', # ), # 'members': ( # 'node-1', # ), # 'subgroups': ( # 'group-2', # ), #}, 'all': { 'member_patterns': ( r".*", ), }, } """), FILENAME_NODES: _(""" nodes = { 'node-1': { 'hostname': "localhost", }, } """), FILENAME_REQUIREMENTS: "bundlewrap>={}\n".format(VERSION_STRING), FILENAME_SECRETS: generate_initial_secrets_cfg, } class HooksProxy: def __init__(self, repo, path): self.repo = repo self.__hook_cache = {} self.__module_cache = {} self.__path = path self.__registered_hooks = None def __getattr__(self, attrname): if attrname not in HOOK_EVENTS: raise AttributeError if self.__registered_hooks is None: self._register_hooks() event = attrname if event not in self.__hook_cache: # build a list of files that define a hook for the event files = [] for filename, events in self.__registered_hooks.items(): if event in events: files.append(filename) # define a function that calls all hook functions def hook(*args, **kwargs): for filename in files: self.__module_cache[filename][event](*args, **kwargs) self.__hook_cache[event] = hook return self.__hook_cache[event] def _register_hooks(self): """ Builds an internal dictionary of defined hooks. Priming __module_cache here is just a performance shortcut and could be left out. """ self.__registered_hooks = {} if not isdir(self.__path): return for filename in listdir(self.__path): filepath = join(self.__path, filename) if not filename.endswith(".py") or \ not isfile(filepath) or \ filename.startswith("_"): continue self.__module_cache[filename] = {} self.__registered_hooks[filename] = [] for name, obj in self.repo.get_all_attrs_from_file(filepath).items(): if name not in HOOK_EVENTS: continue self.__module_cache[filename][name] = obj self.__registered_hooks[filename].append(name) class LibsProxy: def __init__(self, path): self.__module_cache = {} self.__path = path def __getattr__(self, attrname): if attrname.startswith("__") and attrname.endswith("__"): raise AttributeError(attrname) if attrname not in self.__module_cache: filename = attrname + ".py" filepath = join(self.__path, filename) try: spec = spec_from_file_location( 'bundlewrap.repo.libs_{}'.format(attrname), filepath, ) mod = module_from_spec(spec) spec.loader.exec_module(mod) except Exception: io.stderr(_("Exception while trying to load {}:").format(filepath)) raise self.__module_cache[attrname] = mod return self.__module_cache[attrname] class Repository(MetadataGenerator): def __init__(self, repo_path=None): super().__init__() if repo_path is None: self.path = "/dev/null" else: self.path = self._discover_root_path(abspath(repo_path)) self._set_path(self.path) self.bundle_names = [] self.group_dict = {} self.node_dict = {} self._get_all_attr_code_cache = {} self._get_all_attr_result_cache = {} if repo_path is not None: self.populate_from_path(self.path) else: self.item_classes = list(self.items_from_dir(items.__path__[0])) def __eq__(self, other): if self.path == "/dev/null": # in-memory repos are never equal return False return self.path == other.path def __repr__(self): return "".format(self.path) @staticmethod def is_repo(path): """ Validates whether the given path is a bundlewrap repository. """ try: assert isdir(path) assert isfile(join(path, "nodes.py")) assert isfile(join(path, "groups.py")) except AssertionError: return False return True def add_group(self, group): """ Adds the given group object to this repo. """ if group.name in names(self.nodes): raise RepositoryError(_("you cannot have a node and a group " "both named '{}'").format(group.name)) if group.name in names(self.groups): raise RepositoryError(_("you cannot have two groups " "both named '{}'").format(group.name)) group.repo = self self.group_dict[group.name] = group def add_node(self, node): """ Adds the given node object to this repo. """ if node.name in names(self.groups): raise RepositoryError(_("you cannot have a node and a group " "both named '{}'").format(node.name)) if node.name in names(self.nodes): raise RepositoryError(_("you cannot have two nodes " "both named '{}'").format(node.name)) node.repo = self self.node_dict[node.name] = node @cached_property def branch(self): return get_git_branch() @cached_property def cdict(self): repo_dict = {} for node in self.nodes: repo_dict[node.name] = node.hash() return repo_dict @cached_property def clean(self): return get_git_clean() @classmethod def create(cls, path): """ Creates and returns a repository at path, which must exist and be empty. """ if listdir(path): raise ValueError(_("'{}' is not an empty directory".format( path ))) for filename, content in INITIAL_CONTENT.items(): if callable(content): content = content() with open(join(path, filename), 'w') as f: f.write(content.strip() + "\n") mkdir(join(path, DIRNAME_BUNDLES)) mkdir(join(path, DIRNAME_ITEM_TYPES)) return cls(path) def create_bundle(self, bundle_name): """ Creates an empty bundle. """ if not validate_name(bundle_name): raise ValueError(_("'{}' is not a valid bundle name").format(bundle_name)) bundle_dir = join(self.bundles_dir, bundle_name) # deliberately not using makedirs() so this will raise an # exception if the directory exists mkdir(bundle_dir) mkdir(join(bundle_dir, "files")) open(join(bundle_dir, FILENAME_ITEMS), 'a').close() def get_all_attrs_from_file(self, path, base_env=None): """ Reads all 'attributes' (if it were a module) from a source file. """ if base_env is None: base_env = {} if not base_env and path in self._get_all_attr_result_cache: # do not allow caching when passing in a base env because that # breaks repeated calls with different base envs for the same # file return self._get_all_attr_result_cache[path] if path not in self._get_all_attr_code_cache: source = get_file_contents(path) with error_context(path=path): self._get_all_attr_code_cache[path] = \ compile(source, path, mode='exec') code = self._get_all_attr_code_cache[path] env = base_env.copy() with error_context(path=path): exec(code, env) if not base_env: self._get_all_attr_result_cache[path] = env return env def nodes_or_groups_from_file(self, path, attribute, preexisting): try: flat_dict = self.get_all_attrs_from_file( path, base_env={ attribute: preexisting, 'libs': self.libs, 'repo_path': self.path, 'vault': self.vault, }, )[attribute] except KeyError: raise RepositoryError(_( "{} must define a '{}' variable" ).format(path, attribute)) if not isinstance(flat_dict, dict): raise ValueError(_("'{v}' in '{p}' must be a dict").format( v=attribute, p=path, )) for name, infodict in flat_dict.items(): infodict.setdefault('file_path', path) yield (name, infodict) def nodes_or_groups_from_dir(self, directory): path = join(self.path, directory) if not isdir(path): return for root_dir, _dirs, files in walk(path): for filename in files: filepath = join(root_dir, filename) if not filename.endswith(".toml") or \ not isfile(filepath) or \ filename.startswith("_"): continue with error_context(filepath=filepath): infodict = untoml(toml_parse(get_file_contents(filepath))) infodict['file_path'] = filepath yield filename[:-5], infodict def items_from_dir(self, path): """ Looks for Item subclasses in the given path. An alternative method would involve metaclasses (as Django does it), but then it gets very hard to have two separate repos in the same process, because both of them would register config item classes globally. """ if not isdir(path): return for root_dir, _dirs, files in walk(path): for filename in files: filepath = join(root_dir, filename) if not filename.endswith(".py") or \ not isfile(filepath) or \ filename.startswith("_"): continue for name, obj in self.get_all_attrs_from_file(filepath).items(): if obj == items.Item or name.startswith("_"): continue with suppress(TypeError): if issubclass(obj, items.Item) and not isabstract(obj): yield obj def _discover_root_path(self, path): while True: if self.is_repo(path): return path previous_component = dirname(path) if path == previous_component: raise NoSuchRepository path = previous_component def get_group(self, group_name): try: return self.group_dict[group_name] except KeyError: raise NoSuchGroup(group_name) def get_node(self, node_name): try: return self.node_dict[node_name] except KeyError: raise NoSuchNode(node_name) def group_membership_hash(self): return hash_statedict(sorted(names(self.groups))) @property def groups(self): # TODO 5.0 make this a cached set return sorted(self.group_dict.values()) def hash(self): return hash_statedict(self.cdict) @property def nodes(self): # TODO 5.0 make this a cached set return sorted(self.node_dict.values()) def nodes_in_all_groups(self, group_names): """ Returns a list of nodes where every node is a member of every group given. """ base_group = set(self.get_group(group_names[0]).nodes) for group_name in group_names[1:]: if not base_group: # quit early if we have already eliminated every node break base_group.intersection_update(set(self.get_group(group_name).nodes)) result = list(base_group) result.sort() return result def nodes_in_any_group(self, group_names): """ Returns all nodes that are a member of at least one of the given groups. """ for node in self.nodes: if node.in_any_group(group_names): yield node def nodes_in_group(self, group_name): """ Returns a list of nodes in the given group. """ return self.nodes_in_all_groups([group_name]) def metadata_hash(self): repo_dict = {} for node in self.nodes: repo_dict[node.name] = node.metadata_hash() return hash_statedict(repo_dict) def populate_from_path(self, path): if not self.is_repo(path): raise NoSuchRepository( _("'{}' is not a bundlewrap repository").format(path) ) if path != self.path: self._set_path(path) # check requirements.txt try: with open(join(path, FILENAME_REQUIREMENTS)) as f: lines = f.readlines() except Exception: pass else: try: require(lines) except DistributionNotFound as exc: raise MissingRepoDependency(_( "{x} Python package '{pkg}' is listed in {filename}, but wasn't found. " "You probably have to install it with `pip install {pkg}`." ).format( filename=FILENAME_REQUIREMENTS, pkg=exc.req, x=red("!"), )) except VersionConflict as exc: raise MissingRepoDependency(_( "{x} Python package '{required}' is listed in {filename}, " "but only '{existing}' was found. " "You probably have to upgrade it with `pip install {required}`." ).format( existing=exc.dist, filename=FILENAME_REQUIREMENTS, required=exc.req, x=red("!"), )) self.vault = SecretProxy(self) # populate bundles self.bundle_names = [] for dir_entry in listdir(self.bundles_dir): if validate_name(dir_entry): self.bundle_names.append(dir_entry) # populate groups toml_groups = dict(self.nodes_or_groups_from_dir("groups")) self.group_dict = {} for group in self.nodes_or_groups_from_file(self.groups_file, 'groups', toml_groups): self.add_group(Group(*group)) # populate items self.item_classes = list(self.items_from_dir(items.__path__[0])) for item_class in self.items_from_dir(self.items_dir): self.item_classes.append(item_class) # populate nodes toml_nodes = dict(self.nodes_or_groups_from_dir("nodes")) self.node_dict = {} for node in self.nodes_or_groups_from_file(self.nodes_file, 'nodes', toml_nodes): self.add_node(Node(*node)) @cached_property def revision(self): return get_rev() def _set_path(self, path): self.path = path self.bundles_dir = join(self.path, DIRNAME_BUNDLES) self.data_dir = join(self.path, DIRNAME_DATA) self.hooks_dir = join(self.path, DIRNAME_HOOKS) self.items_dir = join(self.path, DIRNAME_ITEM_TYPES) self.groups_file = join(self.path, FILENAME_GROUPS) self.libs_dir = join(self.path, DIRNAME_LIBS) self.nodes_file = join(self.path, FILENAME_NODES) self.hooks = HooksProxy(self, self.hooks_dir) self.libs = LibsProxy(self.libs_dir) bundlewrap-4.13.6/bundlewrap/secrets.py000066400000000000000000000301551417502274300201460ustar00rootroot00000000000000from base64 import b64encode, urlsafe_b64decode from configparser import ConfigParser import hashlib import hmac from os import environ from os.path import join from string import ascii_letters, punctuation, digits from subprocess import PIPE, run from cryptography.fernet import Fernet from .exceptions import FaultUnavailable from .utils import Fault, get_file_contents from .utils.text import force_text, mark_for_translation as _ from .utils.ui import io HUMAN_CHARS_START = list("bcdfghjklmnprstvwxz") HUMAN_CHARS_VOWELS = list("aeiou") + ["ai", "ao", "au", "ea", "ee", "ei", "eu", "ia", "ie", "oo", "ou"] HUMAN_CHARS_CONS = HUMAN_CHARS_START + ["bb", "bl", "cc", "ch", "ck", "dd", "dr", "ds", "dt", "ff", "gg", "gn", "kl", "ll", "mb", "md", "mm", "mp", "mt", "nc", "nd", "nn", "np", "nt", "pp", "rr", "rt", "sh", "ss", "st", "tl", "ts", "tt"] FILENAME_SECRETS = ".secrets.cfg" def choice_prng(lst, prng): return lst[next(prng) % (len(lst) - 1)] def generate_initial_secrets_cfg(): return ( "# DO NOT COMMIT THIS FILE\n" "# share it with your team through a secure channel\n\n" "[generate]\nkey = {}\n\n" "[encrypt]\nkey = {}\n" ).format( SecretProxy.random_key(), SecretProxy.random_key(), ) def random(seed): """ Provides a way to get repeatable random numbers from the given seed. Unlike random.seed(), this approach provides consistent results across platforms. See also http://stackoverflow.com/a/18992474 """ while True: seed = hashlib.sha512(seed).digest() for character in seed: try: yield ord(character) except TypeError: # Python 3 yield character class SecretProxy: @staticmethod def random_key(): """ Provided as a helper to generate new keys from `bw debug`. """ return Fernet.generate_key().decode('utf-8') def __init__(self, repo): self.repo = repo self.keys = self._load_keys() def _decrypt(self, cryptotext=None, key=None): """ Decrypts a given encrypted password. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return "decrypted text" key, cryptotext = self._determine_key_to_use(cryptotext.encode('utf-8'), key, cryptotext) return Fernet(key).decrypt(cryptotext).decode('utf-8') def _decrypt_file(self, source_path=None, key=None): """ Decrypts the file at source_path (relative to data/) and returns the plaintext as unicode. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return "decrypted file" cryptotext = get_file_contents(join(self.repo.data_dir, source_path)) key, cryptotext = self._determine_key_to_use(cryptotext, key, source_path) f = Fernet(key) return f.decrypt(cryptotext).decode('utf-8') def _decrypt_file_as_base64(self, source_path=None, key=None): """ Decrypts the file at source_path (relative to data/) and returns the plaintext as base64. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return b64encode("decrypted file as base64").decode('utf-8') cryptotext = get_file_contents(join(self.repo.data_dir, source_path)) key, cryptotext = self._determine_key_to_use(cryptotext, key, source_path) f = Fernet(key) return b64encode(f.decrypt(cryptotext)).decode('utf-8') def _determine_key_to_use(self, cryptotext, key, entity_description): key_delim = cryptotext.find(b'$') if key_delim > -1: key_from_text = cryptotext[:key_delim].decode('utf-8') cryptotext = cryptotext[key_delim + 1:] else: key_from_text = None if key is None: if key_from_text is not None: key = key_from_text else: key = 'encrypt' try: key = self.keys[key] except KeyError: raise FaultUnavailable(_( "Key '{key}' not available for decryption of the following entity, " "check your {file}: {entity_description}" ).format( file=FILENAME_SECRETS, key=key, entity_description=entity_description, )) return key, cryptotext def _generate_human_password( self, identifier=None, digits=2, key='generate', per_word=3, words=4, ): """ Like _generate_password(), but creates a password which can be typed more easily by human beings. A "word" consists of an upper case character (usually an actual consonant), followed by an alternating pattern of "vowels" and "consonants". Those lists of characters are defined at the top of this file. Note that something like "tl" is considered "a consonant" as well. Similarly, "au" and friends are "a vowel". Words are separated by dashes. By default, you also get some digits at the end of the password. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return "generatedpassword" prng = self._get_prng(identifier, key) pwd = "" is_start = True word_length = 0 words_done = 0 while words_done < words: if is_start: add = choice_prng(HUMAN_CHARS_START, prng).upper() is_start = False is_vowel = True else: if is_vowel: add = choice_prng(HUMAN_CHARS_VOWELS, prng) else: add = choice_prng(HUMAN_CHARS_CONS, prng) is_vowel = not is_vowel pwd += add word_length += 1 if word_length == per_word: pwd += "-" word_length = 0 words_done += 1 is_start = True if digits > 0: for i in range(digits): pwd += str(next(prng) % 10) else: # Strip trailing dash which is always added by the routine # above. pwd = pwd[:-1] return pwd def _generate_password(self, identifier=None, key='generate', length=32, symbols=False): """ Derives a password from the given identifier and the shared key in the repository. This is done by seeding a random generator with an SHA512 HMAC built from the key and the given identifier. One could just use the HMAC digest itself as a password, but the PRNG allows for more control over password length and complexity. """ if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return ("generatedpassword"*length)[:length] prng = self._get_prng(identifier, key) alphabet = ascii_letters + digits if symbols: alphabet += punctuation return "".join([choice_prng(alphabet, prng) for i in range(length)]) def _generate_random_bytes_as_base64(self, identifier=None, key='generate', length=32): if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": return b64encode(bytearray([ord("a") for i in range(length)])).decode() prng = self._get_prng(identifier, key) return b64encode(bytearray([next(prng) for i in range(length)])).decode() def _get_prng(self, identifier, key): try: key_encoded = self.keys[key] except KeyError: raise FaultUnavailable(_( "Key '{key}' not available to generate password '{password}', check your {file}" ).format( file=FILENAME_SECRETS, key=key, password=identifier, )) h = hmac.new(urlsafe_b64decode(key_encoded), digestmod=hashlib.sha512) h.update(identifier.encode('utf-8')) return random(h.digest()) def _load_keys(self): config = ConfigParser() secrets_file = join(self.repo.path, FILENAME_SECRETS) try: config.read(secrets_file) except IOError: io.debug(_("unable to read {}").format(secrets_file)) return {} result = {} for section in config.sections(): result[section] = config.get(section, 'key').encode('utf-8') return result @staticmethod def cmd(cmdline, as_text=True, strip=True): def callback(): output = run( cmdline, check=True, shell=True, stdout=PIPE, # replace with capture_output=True # when dropping support for Python 3.6 ).stdout if as_text: output = force_text(output) if strip: output = output.strip() return output return Fault( 'bw secrets cmd ' + cmdline, callback, ) def decrypt(self, cryptotext, key=None): return Fault( 'bw secrets decrypt', self._decrypt, cryptotext=cryptotext, key=key, ) def decrypt_file(self, source_path, key=None): return Fault( 'bw secrets decrypt_file', self._decrypt_file, source_path=source_path, key=key, ) def decrypt_file_as_base64(self, source_path, key=None): return Fault( 'bw secrets decrypt_file_as_base64', self._decrypt_file_as_base64, source_path=source_path, key=key, ) def encrypt(self, plaintext, key='encrypt'): """ Encrypts a given plaintext password and returns a string that can be fed into decrypt() to get the password back. """ key_name = key try: key = self.keys[key] except KeyError: raise KeyError(_( "Key '{key}' not available for encryption, check your {file}" ).format( file=FILENAME_SECRETS, key=key, )) return key_name + '$' + Fernet(key).encrypt(plaintext.encode('utf-8')).decode('utf-8') def encrypt_file(self, source_path, target_path, key='encrypt'): """ Encrypts the file at source_path and places the result at target_path. The source_path is relative to CWD or absolute, while target_path is relative to data/. """ key_name = key try: key = self.keys[key] except KeyError: raise KeyError(_( "Key '{key}' not available for file encryption, check your {file}" ).format( file=FILENAME_SECRETS, key=key, )) plaintext = get_file_contents(source_path) fernet = Fernet(key) target_file = join(self.repo.data_dir, target_path) with open(target_file, 'wb') as f: f.write(key_name.encode('utf-8') + b'$') f.write(fernet.encrypt(plaintext)) return target_file def human_password_for( self, identifier, digits=2, key='generate', per_word=3, words=4, ): return Fault( 'bw secrets human_password_for', self._generate_human_password, identifier=identifier, digits=digits, key=key, per_word=per_word, words=words, ) def password_for(self, identifier, key='generate', length=32, symbols=False): return Fault( 'bw secrets password_for', self._generate_password, identifier=identifier, key=key, length=length, symbols=symbols, ) def random_bytes_as_base64_for(self, identifier, key='generate', length=32): return Fault( 'bw secrets random_bytes_as_base64', self._generate_random_bytes_as_base64, identifier=identifier, key=key, length=length, ) bundlewrap-4.13.6/bundlewrap/utils/000077500000000000000000000000001417502274300172605ustar00rootroot00000000000000bundlewrap-4.13.6/bundlewrap/utils/__init__.py000066400000000000000000000215501417502274300213740ustar00rootroot00000000000000from base64 import b64encode from codecs import getwriter from contextlib import contextmanager import hashlib from inspect import isgenerator from os import chmod, close, makedirs, remove from os.path import dirname, exists from random import shuffle import stat from sys import stderr, stdout from tempfile import mkstemp from passlib.hash import apr_md5_crypt from requests import get from ..exceptions import DontCache, FaultUnavailable class NO_DEFAULT: pass MODE644 = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH STDERR_WRITER = getwriter('utf-8')(stderr.buffer) STDOUT_WRITER = getwriter('utf-8')(stdout.buffer) def cached_property(prop): """ A replacement for the property decorator that will only compute the attribute's value on the first call and serve a cached copy from then on. """ def cache_wrapper(self): if not hasattr(self, "_cache"): self._cache = {} if prop.__name__ not in self._cache: try: return_value = prop(self) if isgenerator(return_value): return_value = tuple(return_value) except DontCache as exc: return exc.obj else: self._cache[prop.__name__] = return_value return self._cache[prop.__name__] return property(cache_wrapper) def download(url, path): with error_context(url=url, path=path): if not exists(dirname(path)): makedirs(dirname(path)) if exists(path): chmod(path, MODE644) with open(path, 'wb') as f: r = get(url, stream=True) r.raise_for_status() for block in r.iter_content(1024): if not block: break else: f.write(block) class ErrorContext(Exception): pass @contextmanager def error_context(**kwargs): """ This can be used to provide context for critical exceptions. Since we're processing lots of different dicts, a "KeyError: foo" will often not be helpful, since it's not clear which dict is missing the key. >>> with error_context(arbitrary_kwarg="helpful hint"): ... {}["foo"] ... Traceback (most recent call last): [...] KeyError: 'foo' The above exception was the direct cause of the following exception: Traceback (most recent call last): [...] bundlewrap.utils.ErrorContext: ACTUAL EXCEPTION ABOVE {'arbitrary_kwarg': 'helpful hint'} Careful though: Only use this in places where you don't expect exceptions to occur, since they will indiscriminately be reraised as ErrorContext. """ try: yield except Exception as exc: raise ErrorContext("ACTUAL EXCEPTION ABOVE\n" + repr(kwargs)) from exc class Fault: """ A proxy object for lazy access to things that may not really be available at the time of use. This let's us gracefully skip items that require information that's currently not available. """ def __init__(self, fault_identifier, callback, **kwargs): if isinstance(fault_identifier, list): self.id_list = fault_identifier else: self.id_list = [fault_identifier] for key, value in sorted(kwargs.items()): self.id_list.append(hash(key)) self.id_list.append(_recursive_hash(value)) self._available = None self._exc = None self._value = None self.callback = callback self.kwargs = kwargs def _resolve(self): if self._available is None: try: self._value = self.callback(**self.kwargs) if isinstance(self._value, Fault): self._value = self._value.value self._available = True except FaultUnavailable as exc: self._available = False self._exc = exc def __add__(self, other): if isinstance(other, Fault): def callback(): return self.value + other.value return Fault(self.id_list + other.id_list, callback) else: def callback(): return self.value + other return Fault(self.id_list + ['raw {}'.format(repr(other))], callback) def __eq__(self, other): if not isinstance(other, Fault): return False else: return self.id_list == other.id_list def __hash__(self): return hash(tuple(self.id_list)) def __iter__(self): yield from self.value def __len__(self): return len(self.value) def __lt__(self, other): if isinstance(other, Fault): return self.value < other.value else: return self.value < other def __gt__(self, other): if isinstance(other, Fault): return self.value > other.value else: return self.value > other def __repr__(self): return f"" def __str__(self): return str(self.value) def b64encode(self): def callback(): return b64encode(self.value.encode('UTF-8')).decode('UTF-8') return Fault(self.id_list + ['b64encode'], callback) def format_into(self, format_string): def callback(): return format_string.format(self.value) return Fault(self.id_list + ['format_into ' + format_string], callback) def as_htpasswd_entry(self, username): def callback(): return '{}:{}'.format( username, apr_md5_crypt.encrypt( self.value, salt=hashlib.sha512(self.id_list[0].encode('utf-8')).hexdigest()[:8], ), ) return Fault(self.id_list + ['as_htpasswd_entry ' + username], callback) @property def is_available(self): self._resolve() return self._available @property def value(self): self._resolve() if not self._available: raise self._exc return self._value def _make_method_callback(method_name): def method(self, *args, **kwargs): def callback(): return getattr(self.value, method_name)(*args, **kwargs) return Fault(self.id_list + [method_name], callback) return method for method_name in ( 'format', 'lower', 'lstrip', 'replace', 'rstrip', 'strip', 'upper', 'zfill', ): setattr(Fault, method_name, _make_method_callback(method_name)) def _recursive_hash(obj): hashes = [] if isinstance(obj, list): for i in obj: hashes.append(_recursive_hash(i)) return hash(tuple(hashes)) elif isinstance(obj, set): for i in sorted(obj): hashes.append(_recursive_hash(i)) return hash(tuple(hashes)) elif isinstance(obj, dict): for k, v in sorted(obj.items()): hashes.append(hash(k)) hashes.append(_recursive_hash(v)) return hash(tuple(hashes)) else: return hash(obj) def get_file_contents(path): with error_context(path=path): with open(path, 'rb') as f: content = f.read() return content def hash_local_file(path): """ Retuns the sha1 hash of a file on the local machine. """ return sha1(get_file_contents(path)) def list_starts_with(list_a, list_b): """ Returns True if list_a starts with list_b. """ list_a = tuple(list_a) list_b = tuple(list_b) try: return list_a[:len(list_b)] == list_b except IndexError: return False def names(obj_list): """ Iterator over the name properties of a given list of objects. repo.nodes will give you node objects names(repo.nodes) will give you node names """ for obj in obj_list: yield obj.name def randomize_order(obj): if isinstance(obj, dict): result = list(obj.items()) else: result = list(obj) shuffle(result) return result def sha1(data): """ Returns hex SHA1 hash for input. """ hasher = hashlib.sha1() hasher.update(data) return hasher.hexdigest() class SkipList: """ Used to maintain a list of nodes that have already been visited. """ def __init__(self, path): self.path = path if path and exists(path): with open(path) as f: self._list_items = set(f.read().strip().split("\n")) else: self._list_items = set() def __contains__(self, item): return item in self._list_items def add(self, item): if self.path: self._list_items.add(item) def dump(self): if self.path: with open(self.path, 'w') as f: f.write("\n".join(sorted(self._list_items)) + "\n") @contextmanager def tempfile(): handle, path = mkstemp() close(handle) yield path remove(path) bundlewrap-4.13.6/bundlewrap/utils/cmdline.py000066400000000000000000000132671417502274300212560ustar00rootroot00000000000000from functools import wraps from sys import exit, stderr, stdout from traceback import print_exc from ..exceptions import NoSuchGroup, NoSuchItem, NoSuchNode from . import names from .text import mark_for_translation as _, red from .ui import io, QUIT_EVENT def exit_on_keyboardinterrupt(f): @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except KeyboardInterrupt: exit(130) # https://tldp.org/LDP/abs/html/exitcodes.html return wrapper def suppress_broken_pipe_msg(f): """ Oh boy. CPython does funny things with SIGPIPE. By default, it is caught and raised as a BrokenPipeError. When do we get a SIGPIPE? Most commonly when piping into head: bw nodes | head -n 1 head will exit after receiving the first line, causing the kernel to send SIGPIPE to our process. Since in most cases, we can't just quit early, we simply ignore BrokenPipeError in utils.ui.write_to_stream. Unfortunately, Python will still print a message: Exception ignored in: <_io.TextIOWrapper name='' mode='w' encoding='UTF-8'> BrokenPipeError: [Errno 32] Broken pipe See also http://bugs.python.org/issue11380. The crazy try/finally construct below is taken from there and I quote: This will: - capture any exceptions *you've* raised as the context for the errors raised in this handler - expose any exceptions generated during this thing itself - prevent the interpreter dying during shutdown in flush_std_files by closing the files (you can't easily wipe out the pending writes that have failed) CAVEAT: There is a seamingly easier method floating around on the net (http://stackoverflow.com/a/16865106) that restores the default behavior for SIGPIPE (i.e. not turning it into a BrokenPipeError): from signal import signal, SIGPIPE, SIG_DFL signal(SIGPIPE,SIG_DFL) This worked fine for a while but broke when using multiprocessing.Manager() to share the list of jobs in utils.ui between processes. When the main process terminated, it quit with return code 141 (indicating a broken pipe), and the background process used for the manager continued to hang around indefinitely. Bonus fun: This was observed only on Ubuntu Trusty (14.04). """ @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except Exception: print_exc() exit(1) finally: try: stdout.flush() finally: try: stdout.close() finally: try: stderr.flush() finally: stderr.close() return wrapper def count_items(nodes): count = 0 for node in nodes: if QUIT_EVENT.is_set(): return 0 count += len(node.items) return count def get_group(repo, group_name): try: return repo.get_group(group_name) except NoSuchGroup: io.stderr(_("{x} No such group: {group}").format( group=group_name, x=red("!!!"), )) exit(1) def get_item(node, item_id): try: return node.get_item(item_id) except NoSuchItem: io.stderr(_("{x} No such item on node '{node}': {item}").format( item=item_id, node=node.name, x=red("!!!"), )) exit(1) def get_node(repo, node_name): try: return repo.get_node(node_name) except NoSuchNode: io.stderr(_("{x} No such node: {node}").format( node=node_name, x=red("!!!"), )) exit(1) HELP_get_target_nodes = _("""expression to select target nodes: my_node # to select a single node my_group # all nodes in this group bundle:my_bundle # all nodes with this bundle !bundle:my_bundle # all nodes without this bundle !group:my_group # all nodes not in this group "lambda:node.metadata_get('foo/magic', 47) < 3" # all nodes whose metadata["foo"]["magic"] is less than three """) def get_target_nodes(repo, target_strings): targets = set() for name in target_strings: name = name.strip() if name.startswith("bundle:"): bundle_name = name.split(":", 1)[1] for node in repo.nodes: if bundle_name in names(node.bundles): targets.add(node) elif name.startswith("!bundle:"): bundle_name = name.split(":", 1)[1] for node in repo.nodes: if bundle_name not in names(node.bundles): targets.add(node) elif name.startswith("!group:"): group_name = name.split(":", 1)[1] for node in repo.nodes: if group_name not in names(node.groups): targets.add(node) elif name.startswith("lambda:"): expression = eval("lambda node: " + name.split(":", 1)[1]) for node in repo.nodes: if expression(node): targets.add(node) else: try: targets.add(repo.get_node(name)) except NoSuchNode: try: group = repo.get_group(name) except NoSuchGroup: io.stderr(_("{x} No such node or group: {name}").format( x=red("!!!"), name=name, )) exit(1) else: targets.update(group.nodes) return targets bundlewrap-4.13.6/bundlewrap/utils/dicts.py000066400000000000000000000372151417502274300207500ustar00rootroot00000000000000from copy import copy from datetime import datetime, date, time from difflib import unified_diff from hashlib import sha1 from json import dumps, JSONEncoder from tomlkit import document as toml_document from tomlkit import items as toml_types from tomlkit.container import Container as TOMLContainer from . import Fault from .text import bold, green, red, yellow from .text import force_text, mark_for_translation as _ DIFF_MAX_INLINE_LENGTH = 36 DIFF_MAX_LINE_LENGTH = 1024 class _MISSING_KEY: pass class _Atomic: """ This and the following related classes are used to mark objects as non-mergeable for the purposes of merge_dict(). """ pass class _AtomicDict(dict, _Atomic): pass class _AtomicList(list, _Atomic): pass class _AtomicSet(set, _Atomic): pass class _AtomicTuple(tuple, _Atomic): pass ATOMIC_TYPES = { dict: _AtomicDict, list: _AtomicList, set: _AtomicSet, tuple: _AtomicTuple, } def dict_to_toml(dict_obj): toml_doc = toml_document() for key, value in dict_obj.items(): if isinstance(value, tuple): toml_doc[key] = list(value) elif isinstance(value, set): toml_doc[key] = sorted(value) elif isinstance(value, dict): toml_doc[key] = dict_to_toml(value) else: toml_doc[key] = value return toml_doc def diff_keys(dict1, dict2): differing_keys = set() for key in set(dict1.keys()) | set(dict2.keys()): try: if dict1[key] != dict2[key]: differing_keys.add(key) except KeyError: differing_keys.add(key) return differing_keys def diff_normalize_bool(value): return "yes" if value else "no" def diff_normalize_bytes(value): return value.decode('utf-8', 'backslashreplace') def diff_normalize_list(value): if isinstance(value, set): value = sorted(value) else: # convert tuples and create copies of lists before possibly # appending stuff later on (see below) value = list(value) # make sure that *if* we have lines, the last one will also end with # a newline if value: value.append("") return "\n".join([str(i) for i in value]) TYPE_DIFF_NORMALIZE = { bool: diff_normalize_bool, bytes: diff_normalize_bytes, float: str, int: str, list: diff_normalize_list, type(None): str, set: diff_normalize_list, tuple: diff_normalize_list, } VALID_STATEDICT_TYPES = tuple(TYPE_DIFF_NORMALIZE.keys()) + (str,) def diff_normalize(value): if isinstance(value, str): return value try: normalize = TYPE_DIFF_NORMALIZE[type(value)] except KeyError: raise TypeError(_("unable to diff {} ({})").format( repr(value), type(value), )) return normalize(value) def diff_text(value1, value2): max_length = max(len(value1), len(value2)) value1, value2 = force_text(value1), force_text(value2) if ( "\n" not in value1 and "\n" not in value2 ): if max_length < DIFF_MAX_INLINE_LENGTH: return "{} → {}".format( red(value1), green(value2), ) elif max_length < DIFF_MAX_LINE_LENGTH: return " {}\n→ {}".format( red(value1), green(value2), ) output = "" for line in tuple(unified_diff( value1.splitlines(True), value2.splitlines(True), ))[2:]: suffix = "" if len(line) > DIFF_MAX_LINE_LENGTH: suffix += _(" (line truncated after {} characters)").format(DIFF_MAX_LINE_LENGTH) if not line.endswith("\n"): suffix += _(" (no newline at end of file)") line = line[:DIFF_MAX_LINE_LENGTH].rstrip("\n") if line.startswith("+"): line = green(line) elif line.startswith("-"): line = red(line) output += line + suffix + "\n" return output.rstrip("\n") def diff_value(value1, value2): if value1 == _MISSING_KEY: value1 = yellow(_("")) if value2 == _MISSING_KEY: value2 = yellow(_("")) return diff_text(diff_normalize(value1), diff_normalize(value2)) def diff_dict(dict1, dict2, skip_missing_in_target=False): def handle_multiline(key, diff): if "\n" in diff: return bold(key) + "\n" + diff + "\n" else: return bold(key) + " " + diff + "\n" output = "" if dict1 is None and dict2 is None: return "" elif dict1 is None: for key, value in sorted(dict2.items()): output += handle_multiline(key, green(str(value))) elif dict2 is None: for key, value in sorted(dict1.items()): output += handle_multiline(key, red(str(value))) else: for key in sorted(diff_keys(dict1, dict2)): if skip_missing_in_target and key not in dict2: # this is used to hide anything not present in a cdict # and thus not relevant to the diff/user continue value1 = dict1.get(key, _MISSING_KEY) value2 = dict2.get(key, _MISSING_KEY) diff = diff_value(value1, value2) output += handle_multiline(key, diff) return output def dict_to_text(dict_obj, value_color=str): output = "" for key, value in sorted(dict_obj.items()): value = diff_normalize(value) if "\n" in value: output += bold(key) + "\n" for line in value.splitlines(): output += value_color(line) + "\n" else: output += bold(key) + " " + value_color(value) + "\n" return output.rstrip("\n") class FaultResolvingJSONEncoder(JSONEncoder): def default(self, obj): if isinstance(obj, Fault): return self.default(obj.value) elif isinstance(obj, set): return sorted(obj) else: return JSONEncoder.default(self, obj) def hash_statedict(sdict): """ Returns a canonical SHA1 hash to describe this dict. """ return sha1(statedict_to_json(sdict).encode('utf-8')).hexdigest() def map_dict_keys(dict_obj, leaves_only=False, _base=None,): """ Return a set of key paths for the given dict. E.g.: >>> map_dict_keys({'foo': {'bar': 1}, 'baz': 2}) set([('foo', 'bar'), ('baz',)]) """ if _base is None: _base = () keys = set() for key, value in dict_obj.items(): is_dict = isinstance(value, dict) if is_dict: keys.update(map_dict_keys( value, leaves_only=leaves_only, _base=_base + (key,), )) if not is_dict or not leaves_only: keys.add(_base + (key,)) return keys def extra_paths_in_dict(dict_obj, paths): """ Returns all paths in dict_obj that don't start with any of the given paths. >>> extra_paths_in_dict({'a': 1, 'b': {'c': 1}}, {('b', 'c')}) {('a',)} """ result = set() for actual_path in map_dict_keys(dict_obj, leaves_only=True): for allowed_path in paths: if actual_path[:len(allowed_path)] == allowed_path: break else: result.add(actual_path) return result def merge_dict(base, update): """ Recursively merges the base dict into the update dict. """ if not isinstance(update, dict): return update merged = base.copy() for key, value in update.items(): merge = ( key in base and not isinstance(value, _Atomic) and not isinstance(base[key], _Atomic) ) if merge and isinstance(base[key], dict): merged[key] = merge_dict(base[key], value) elif ( merge and isinstance(base[key], list) and isinstance(value, (list, set, tuple)) ): extended = base[key][:] extended.extend(value) merged[key] = extended elif ( merge and isinstance(base[key], tuple) and isinstance(value, (list, set, tuple)) ): merged[key] = base[key] + tuple(value) elif ( merge and isinstance(base[key], set) and isinstance(value, (list, set, tuple)) ): merged[key] = base[key].union(set(value)) else: # If we don't copy here, we end up with dicts from groups in # node metadata. Not an issue per se, but a nasty pitfall # when users do things like this in items.py: # # my_dict = node.metadata.get('foo', {}) # my_dict['bar'] = 'baz' # # The expectation here is to be able to mangle my_dict # because it is only relevant for the current node. However, # if 'foo' has only been defined in a group, we end up # mangling that dict for every node in the group. # Since we can't really force users to .copy() in this case # (although they should!), we have to do it here. merged[key] = copy(value) return merged def reduce_dict(full_dict, template_dict): """ Take a large dict and recursively remove all keys that are not present in the template dict. Also descends into lists. >>> full_dict = { 'a': [{ 'b': 1, 'c': 2, # this will be removed from final result }], 'd': 3, } >>> template_dict = { 'a': [{ 'b': None, }], 'd': None, 'e': None, } >>> reduce_dict(full_dict, template_dict) { 'a': [{ 'b': 1, }], 'd': 3, } """ if isinstance(full_dict, list): if not isinstance(template_dict, list): return full_dict result = [] for index, full_dict_element in enumerate(full_dict): try: template_dict_element = template_dict[index] except IndexError: template_dict_element = full_dict_element result.append(reduce_dict(full_dict_element, template_dict_element)) return result elif isinstance(full_dict, dict): if not isinstance(template_dict, dict): return full_dict result = {} for key, value in full_dict.items(): if key in template_dict: result[key] = reduce_dict(value, template_dict[key]) return result else: return full_dict def statedict_to_json(sdict, pretty=False): """ Returns a canonical JSON representation of the given statedict. """ if sdict is None: return "" else: return dumps( sdict, cls=FaultResolvingJSONEncoder, indent=4 if pretty else None, sort_keys=True, ) class COLLECTION_OF_STRINGS: pass class TUPLE_OF_INTS: pass def validate_dict(candidate, schema, required_keys=None): if not isinstance(candidate, dict): raise ValueError(_("not a dict: {}").format(repr(candidate))) for key, value in candidate.items(): if key not in schema: raise ValueError(_("illegal key: {}").format(key)) allowed_types = schema[key] if allowed_types == COLLECTION_OF_STRINGS: if not isinstance(value, (list, set, tuple)): raise ValueError(_("key '{k}' is {i}, but should be one of: {t}").format( k=key, i=type(value), t=(list, set, tuple), )) for inner_value in value: if not isinstance(inner_value, str): raise ValueError(_("non-string member in '{k}': {v}").format( k=key, v=repr(inner_value), )) elif allowed_types == TUPLE_OF_INTS: if not isinstance(value, tuple): raise ValueError(_("key '{k}' is {i}, but should be a tuple").format( k=key, i=type(value), )) for inner_value in value: if not isinstance(inner_value, int): raise ValueError(_("non-int member in '{k}': {v}").format( k=key, v=repr(inner_value), )) elif not isinstance(value, allowed_types): raise ValueError(_("key '{k}' is {i}, but should be one of: {t}").format( k=key, i=type(value), t=allowed_types, )) for key in required_keys or []: if key not in candidate: raise ValueError(_("missing required key: {}").format(key)) def validate_statedict(sdict): """ Raises ValueError if the given statedict is invalid. """ if sdict is None: return for key, value in sdict.items(): if not isinstance(force_text(key), str): raise ValueError(_("non-text statedict key: {}").format(key)) if not isinstance(value, VALID_STATEDICT_TYPES) and value is not None: raise ValueError(_( "invalid statedict value for key '{k}': {v}" ).format( k=key, v=repr(value), )) if isinstance(value, (list, tuple)): for index, element in enumerate(value): if not isinstance(element, VALID_STATEDICT_TYPES) and element is not None: raise ValueError(_( "invalid element #{i} in statedict key '{k}': {e}" ).format( e=repr(element), i=index, k=key, )) def delete_key_at_path(d, path): if len(path) == 1: del d[path[0]] else: delete_key_at_path(d[path[0]], path[1:]) def replace_key_at_path(d, path, new_key): if len(path) == 1: value = d[path[0]] del d[path[0]] d[new_key] = value else: replace_key_at_path(d[path[0]], path[1:], new_key) def set_key_at_path(d, path, value): if len(path) == 0: d.update(value) elif len(path) == 1: d[path[0]] = value else: if path[0] not in d: # setdefault doesn't work with tomlkit d[path[0]] = {} set_key_at_path(d[path[0]], path[1:], value) def value_at_key_path(dict_obj, path): """ Given the list of keys in `path`, recursively traverse `dict_obj` and return whatever is found at the end of that path. E.g.: >>> value_at_key_path({'foo': {'bar': 5}}, ['foo', 'bar']) 5 """ if not path: return dict_obj else: nested_dict = dict_obj[path[0]] remaining_path = path[1:] if remaining_path and not isinstance(nested_dict, dict): raise KeyError("/".join(path)) else: return value_at_key_path(nested_dict, remaining_path) TOML_TYPES = { toml_types.Bool: bool, toml_types.Float: float, toml_types.Integer: int, toml_types.Table: dict, toml_types.String: str, toml_types.DateTime: datetime, toml_types.Date: date, toml_types.Time: time, } def untoml(obj): if isinstance(obj, (dict, TOMLContainer)): return {key: untoml(value) for key, value in obj.items()} elif isinstance(obj, set): return {untoml(value) for value in obj} elif isinstance(obj, list): return [untoml(value) for value in obj] elif isinstance(obj, tuple): return tuple(untoml(value) for value in obj) elif isinstance(obj, toml_types.Item): for toml_type, native_type in TOML_TYPES.items(): if isinstance(obj, toml_type): return native_type(obj) return obj bundlewrap-4.13.6/bundlewrap/utils/metastack.py000066400000000000000000000073571417502274300216220ustar00rootroot00000000000000from collections import OrderedDict from sys import version_info from ..metadata import METADATA_TYPES, deepcopy_metadata, validate_metadata, value_at_key_path from .dicts import ATOMIC_TYPES, map_dict_keys, merge_dict UNMERGEABLE = tuple(METADATA_TYPES) + tuple(ATOMIC_TYPES.values()) class Metastack: """ Holds a number of metadata layers. When laid on top of one another, these layers form complete metadata for a node. Each layer comes from one particular source of metadata: a bundle default, a group, the node itself, or a metadata reactor. Metadata reactors are unique in their ability to revise their own layer each time they are run. """ def __init__(self): self._partitions = ( # We rely heavily on insertion order in these dicts. {} if version_info >= (3, 7) else OrderedDict(), # node/groups {} if version_info >= (3, 7) else OrderedDict(), # reactors {} if version_info >= (3, 7) else OrderedDict(), # defaults ) self._cached_partitions = {} def get(self, path): """ Get the value at the given path, merging all layers together. """ result = None undef = True for part_index, partition in enumerate(self._partitions): # prefer cached partitions if available partition = self._cached_partitions.get(part_index, partition) for layer in reversed(list(partition.values())): try: value = value_at_key_path(layer, path) except KeyError: pass else: if undef: # First time we see anything. If we can't merge # it anyway, then return early. if isinstance(value, UNMERGEABLE): return deepcopy_metadata(value) result = {'data': value} undef = False else: result = merge_dict({'data': value}, result) if undef: raise KeyError('/'.join(path)) else: return deepcopy_metadata(result['data']) def as_dict(self, partitions=None): final_dict = {} if partitions is None: partitions = tuple(range(len(self._partitions))) else: partitions = sorted(partitions) for part_index in partitions: # prefer cached partitions if available partition = self._cached_partitions.get(part_index, self._partitions[part_index]) for layer in reversed(list(partition.values())): final_dict = merge_dict(layer, final_dict) return final_dict def as_blame(self): keymap = map_dict_keys(self.as_dict()) blame = {} for path in keymap: for partition in self._partitions: for identifier, layer in partition.items(): try: value_at_key_path(layer, path) except KeyError: pass else: blame.setdefault(path, []).append(identifier) return blame def pop_layer(self, partition_index, identifier): try: return self._partitions[partition_index].pop(identifier) except (KeyError, IndexError): return {} def set_layer(self, partition_index, identifier, new_layer): validate_metadata(new_layer) self._partitions[partition_index][identifier] = new_layer def cache_partition(self, partition_index): self._cached_partitions[partition_index] = { 'merged layers': self.as_dict(partitions=[partition_index]), } bundlewrap-4.13.6/bundlewrap/utils/plot.py000066400000000000000000000252341417502274300206160ustar00rootroot00000000000000from ..exceptions import MetadataPersistentKeyError from . import names from .text import bold, mark_for_translation as _, yellow from .ui import io def explain_item_dependency_loop(items): """ Generates output lines to help users debug the issue. """ items = remove_items_not_contributing_to_loop(items) node_name = items[0].node.name yield _( "There was a dependency problem on node '{node}'. Look at the debug.svg generated " "by the following command and try to find a loop:\n\n\n" "printf '{cmd}' | dot -Tsvg -odebug.svg\n\n\n" ).format( node=node_name, cmd="\\n".join(graph_for_items(node_name, items)), ) yield _( "Additionally, here is a list of all items involved " "and their remaining dependencies:\n" ) for item in items: yield "{}\t{}".format(item.id, ",".join([item.id for item in sorted(item._deps)])) yield "\n\n\n" def graph_for_items( title, items, cluster=True, concurrency=True, regular=True, reverse=True, auto=True, ): items = sorted(items) yield "digraph bundlewrap" yield "{" # Print subgraphs *below* each other yield "rankdir = LR" # Global attributes yield ("graph [color=\"#303030\"; " "fontname=Helvetica; " "penwidth=2; " "shape=box; " "style=\"rounded,dashed\"]") yield ("node [color=\"#303030\"; " "fillcolor=\"#303030\"; " "fontcolor=white; " "fontname=Helvetica; " "shape=box; " "style=\"rounded,filled\"]") yield "edge [arrowhead=vee]" item_ids = {item.id for item in items} if cluster: # Define which items belong to which bundle bundle_number = 0 bundles_seen = set() for item in items: if item.bundle is None or item.bundle.name in bundles_seen: continue yield "subgraph cluster_{}".format(bundle_number) bundle_number += 1 yield "{" yield "label = \"{}\"".format(item.bundle.name) for bitem in item.bundle.items: if bitem.id in item_ids: yield "\"{}\"".format(bitem.id) yield "}" bundles_seen.add(item.bundle.name) # Define dependencies between items for item in items: auto_attrs = item.get_auto_attrs(items) if regular: for dep in sorted(item._deps_needs): if dep.id in auto_attrs.get('needs', set()) and auto: yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(item.id, dep.id) else: yield "\"{}\" -> \"{}\" [color=\"#C24948\",penwidth=2]".format(item.id, dep.id) for dep in sorted(item._deps_after): if dep.id in auto_attrs.get('after', set()) and auto: yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(item.id, dep.id) else: yield "\"{}\" -> \"{}\" [color=\"#42AFFF\",penwidth=2]".format(item.id, dep.id) if concurrency: for dep in sorted(item._deps_concurrency): yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format(item.id, dep.id) if reverse: # FIXME this is not filtering auto deps, but we should rethink filters anyway in 5.0 for dep in sorted(item._deps_before): yield "\"{}\" -> \"{}\" [color=\"#D1CF52\",penwidth=2]".format(item.id, dep.id) for dep in sorted(item._deps_needed_by): yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format(item.id, dep.id) if auto: for dep in sorted(item._deps_triggers): yield "\"{}\" -> \"{}\" [color=\"#fca7f7\",penwidth=2]".format(item.id, dep.id) # Global graph title yield "fontsize = 28" yield "label = \"{}\"".format(title) yield "labelloc = \"t\"" yield "}" def plot_group(groups, nodes, show_nodes): groups = sorted(groups) nodes = sorted(nodes) yield "digraph bundlewrap" yield "{" # Print subgraphs *below* each other yield "rankdir = LR" # Global attributes yield ("node [color=\"#303030\"; " "fillcolor=\"#303030\"; " "fontname=Helvetica]") yield "edge [arrowhead=vee]" for group in groups: yield "\"{}\" [fontcolor=white,style=filled];".format(group.name) for node in nodes: yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name) for group in groups: for subgroup in sorted(group._attributes.get('subgroups', set())): yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) for subgroup in sorted(group._subgroup_names_from_patterns): yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) if show_nodes: for group in groups: for node in nodes: if group in set(node._attributes.get('groups', set())): yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( node.name, group.name) elif node in group._nodes_from_members: yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( group.name, node.name) else: for pattern in group._member_patterns: if pattern.search(node.name) is not None: yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( group.name, node.name) break yield "}" def plot_node_groups(node): yield "digraph bundlewrap" yield "{" # Print subgraphs *below* each other yield "rankdir = LR" # Global attributes yield ("node [color=\"#303030\"; " "fillcolor=\"#303030\"; " "fontname=Helvetica]") yield "edge [arrowhead=vee]" for group in sorted(node.groups): yield "\"{}\" [fontcolor=white,style=filled];".format(group.name) yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name) for group in sorted(node.groups): for subgroup in sorted(group._attributes.get('subgroups', set())): if subgroup in names(node.groups): yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format( group.name, subgroup) for pattern in sorted(group._immediate_subgroup_patterns): for group2 in sorted(node.groups): if pattern.search(group2.name) is not None and group2 != group: yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format( group.name, group2.name) for supergroup in sorted(group._supergroups_from_attribute): yield "\"{}\" -> \"{}\" [color=\"#C24948\",penwidth=2]".format( supergroup.name, group.name) if group.name in node._attributes.get('groups', set()): yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( group.name, node.name) elif node in group._nodes_from_members: yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( group.name, node.name) else: for pattern in sorted(group._member_patterns): if pattern.search(node.name) is not None: yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( group.name, node.name) yield "}" def plot_reactors(repo, node, key_paths, recursive=False): repo._record_reactor_call_graph = True try: for key_path in key_paths: node.metadata.get(key_path) except MetadataPersistentKeyError: io.stderr(_( "{x} MetadataPersistentKeyError was raised, ignoring (use `bw metadata` to see it)" ).format(x=bold(yellow("!")))) yield "digraph bundlewrap" yield "{" # Print subgraphs *below* each other yield "rankdir = LR" # Global attributes yield ("node [color=\"#303030\"; " "fillcolor=\"#303030\"; " "fontname=Helvetica]") yield ("edge [arrowhead=vee; " "fontname=Helvetica]") styles = set() edges = set() for provided_path, required_path, reactor in repo._reactor_call_graph: origin_node_name = provided_path[0] target_node_name = required_path[0] if not recursive and origin_node_name != node.name: continue provided_path = '/'.join(provided_path[1]) reactor_changes = repo._reactor_changes[reactor] reactor_runs = repo._reactor_runs[reactor] reactor_label = f"{reactor[1][17:]} ({reactor_changes}/{reactor_runs})" styles.add(f"\"{reactor_label}\" [shape=box]") edges.add(f"\"{reactor_label}\" -> \"{provided_path}\"") if target_node_name != node.name: full_required_path = f"{required_path[0]}:{'/'.join(required_path[1])}" styles.add(f"\"{full_required_path}\" [color=\"#FF0000\"]") edges.add(f"\"{full_required_path}\" -> \"{reactor_label}\" [color=\"#FF0000\"]") else: edges.add(f"\"{'/'.join(required_path[1])}\" -> \"{reactor_label}\"") for style in sorted(styles): yield style for edge in sorted(edges): yield edge yield "}" def remove_items_not_contributing_to_loop(items): """ We have found a loop. By definition, each item in a loop must have at least one incoming and one outgoing dependency. We can therefore remove all items without either incoming or outgoing dependencies to make the loop more apparent. """ items_with_no_incoming_or_outgoing_deps = set() for item in items: if not item._deps: items_with_no_incoming_or_outgoing_deps.add(item) else: if item in item._deps: continue for other_item in items: if item == other_item: continue if item in other_item._deps: break else: items_with_no_incoming_or_outgoing_deps.add(item) filtered_items = list(filter( lambda item: item not in items_with_no_incoming_or_outgoing_deps, items, )) if len(filtered_items) == len(items): # nothing happened, end recursion return filtered_items else: # we removed something, this might free up other items we can # catch in a second pass return remove_items_not_contributing_to_loop(filtered_items) bundlewrap-4.13.6/bundlewrap/utils/remote.py000066400000000000000000000064031417502274300211300ustar00rootroot00000000000000from shlex import quote from . import cached_property from .text import force_text, mark_for_translation as _ from .ui import io def stat(node, path): if node.os in node.OS_FAMILY_BSD: result = node.run( "stat -f '%Su:%Sg:%p:%z:%HT' -- {}".format(quote(path)), may_fail=True, ) else: result = node.run( "stat -c '%U:%G:%a:%s:%F' -- {}".format(quote(path)), may_fail=True, ) if result.return_code != 0: return {} owner, group, mode, size, ftype = \ force_text(result.stdout).strip().split(":", 5) mode = mode[-4:].zfill(4) # cut off BSD file type file_stat = { 'owner': owner, 'group': group, 'mode': mode, 'size': int(size), 'type': ftype.lower(), } io.debug(_("stat for '{path}' on {node}: {result}".format( node=node.name, path=path, result=repr(file_stat), ))) return file_stat class PathInfo: """ Serves as a proxy to get_path_type. """ def __init__(self, node, path): self.node = node self.path = path self.stat = stat(node, path) def __repr__(self): return "".format(self.node.name, quote(self.path)) @property def exists(self): return bool(self.stat) @property def group(self): return self.stat['group'] @property def is_directory(self): return self.stat['type'] == "directory" @property def is_file(self): return self.stat['type'] in ("regular file", "regular empty file") @property def is_symlink(self): return self.stat['type'] == "symbolic link" @property def is_text_file(self): return self.is_file and ( "text" in self.desc or self.desc in ( "empty", "JSON data", "OpenSSH ED25519 public key", "OpenSSH RSA public key", "OpenSSH DSA public key", ) ) @property def mode(self): return self.stat['mode'] @property def owner(self): return self.stat['owner'] @cached_property def desc(self): return force_text(self.node.run( "file -bh -- {}".format(quote(self.path)) ).stdout).strip() @cached_property def sha1(self): if self.node.os == 'macos': result = self.node.run("shasum -a 1 -- {}".format(quote(self.path))) elif self.node.os in self.node.OS_FAMILY_BSD: result = self.node.run("sha1 -q -- {}".format(quote(self.path))) else: result = self.node.run("sha1sum -- {}".format(quote(self.path))) # sha1sum adds a leading backslash to hashes of files whose name # contains backslash-escaped characters – we must lstrip() that return force_text(result.stdout).strip().lstrip("\\").split()[0] @property def size(self): return self.stat['size'] @property def symlink_target(self): if not self.is_symlink: raise ValueError("{} is not a symlink".format(quote(self.path))) return force_text(self.node.run( "readlink -- {}".format(quote(self.path)), may_fail=True, ).stdout.strip()) bundlewrap-4.13.6/bundlewrap/utils/scm.py000066400000000000000000000034251417502274300204200ustar00rootroot00000000000000from shlex import quote from subprocess import CalledProcessError, check_output, STDOUT from .text import mark_for_translation as _ def get_git_branch(): try: return check_output( "git rev-parse --abbrev-ref HEAD", shell=True, stderr=STDOUT, ).decode().strip() except CalledProcessError: return None def get_git_clean(): try: return not bool(check_output( "git status --porcelain", shell=True, stderr=STDOUT, ).decode().strip()) except CalledProcessError: return None def get_bzr_rev(): try: return check_output( "bzr revno", shell=True, stderr=STDOUT, ).decode().strip() except CalledProcessError: return None def get_git_rev(): try: return check_output( "git rev-parse HEAD", shell=True, stderr=STDOUT, ).decode().strip() except CalledProcessError: return None def get_hg_rev(): try: return check_output( "hg --debug id -i", shell=True, stderr=STDOUT, ).decode().strip().rstrip("+") except CalledProcessError: return None def get_rev(): for scm_rev in (get_git_rev, get_hg_rev, get_bzr_rev): rev = scm_rev() if rev is not None: return rev return None def set_git_rev(rev, detach=False): if not get_git_clean(): raise RuntimeError(_("git working dir not clean, won't change rev")) if detach: command = "git checkout --detach {}".format(quote(rev)) else: command = "git checkout {}".format(quote(rev)) check_output( command, shell=True, stderr=STDOUT, ) bundlewrap-4.13.6/bundlewrap/utils/table.py000066400000000000000000000145351417502274300207310ustar00rootroot00000000000000from os import environ from .text import ansi_clean ROW_SEPARATOR = 1 if environ.get("BW_TABLE_STYLE") == 'ascii': FRAME_TOP_LEFT = "+-" FRAME_TOP_COLUMN_SEPARATOR = "-+-" FRAME_TOP_RIGHT = "-+" FRAME_BOTTOM_LEFT = "+-" FRAME_BOTTOM_COLUMN_SEPARATOR = "-+-" FRAME_BOTTOM_RIGHT = "-+" FRAME_CENTER_LEFT = "+-" FRAME_CENTER_COLUMN_SEPARATOR = "-+-" FRAME_CENTER_RIGHT = "-+" FRAME_COLUMN_FILLER = "-" FRAME_COLUMN_WHITESPACE = " " FRAME_ROW_COLUMN_SEPARATOR_LEFT = "-| " FRAME_ROW_COLUMN_SEPARATOR_NONE = " | " FRAME_ROW_COLUMN_SEPARATOR_BOTH = "-+-" FRAME_ROW_COLUMN_SEPARATOR_RIGHT = " |-" elif environ.get("BW_TABLE_STYLE") == 'grep': FRAME_TOP_LEFT = "" FRAME_TOP_COLUMN_SEPARATOR = "" FRAME_TOP_RIGHT = "" FRAME_BOTTOM_LEFT = "" FRAME_BOTTOM_COLUMN_SEPARATOR = "" FRAME_BOTTOM_RIGHT = "" FRAME_CENTER_LEFT = "" FRAME_CENTER_COLUMN_SEPARATOR = "" FRAME_CENTER_RIGHT = "" FRAME_COLUMN_FILLER = "" FRAME_COLUMN_WHITESPACE = "" FRAME_ROW_COLUMN_SEPARATOR_LEFT = "\t" FRAME_ROW_COLUMN_SEPARATOR_NONE = "\t" FRAME_ROW_COLUMN_SEPARATOR_BOTH = "\t" FRAME_ROW_COLUMN_SEPARATOR_RIGHT = "\t" else: FRAME_TOP_LEFT = "╭─" FRAME_TOP_COLUMN_SEPARATOR = "─┬─" FRAME_TOP_RIGHT = "─╮" FRAME_BOTTOM_LEFT = "╰─" FRAME_BOTTOM_COLUMN_SEPARATOR = "─┴─" FRAME_BOTTOM_RIGHT = "─╯" FRAME_CENTER_LEFT = "├─" FRAME_CENTER_COLUMN_SEPARATOR = "─┼─" FRAME_CENTER_RIGHT = "─┤" FRAME_COLUMN_FILLER = "─" FRAME_COLUMN_WHITESPACE = " " FRAME_ROW_COLUMN_SEPARATOR_LEFT = "─┤ " FRAME_ROW_COLUMN_SEPARATOR_NONE = " │ " FRAME_ROW_COLUMN_SEPARATOR_BOTH = "─┼─" FRAME_ROW_COLUMN_SEPARATOR_RIGHT = " ├─" def _column_widths_for_rows(rows): column_widths = [0 for column in rows[0]] for row in rows: if not isinstance(row, list) and not isinstance(row, tuple): continue for i, column in enumerate(row): if column == ROW_SEPARATOR: continue column_widths[i] = max(column_widths[i], len(ansi_clean(column))) return column_widths def _border_top(column_widths): result = FRAME_TOP_LEFT result += FRAME_TOP_COLUMN_SEPARATOR.join( [FRAME_COLUMN_FILLER * width for width in column_widths] ) result += FRAME_TOP_RIGHT return result def _border_center(column_widths): # FIXME unused? result = FRAME_CENTER_LEFT result += FRAME_CENTER_COLUMN_SEPARATOR.join( [FRAME_COLUMN_FILLER * width for width in column_widths] ) result += FRAME_CENTER_RIGHT return result def _border_bottom(column_widths): result = FRAME_BOTTOM_LEFT result += FRAME_BOTTOM_COLUMN_SEPARATOR.join( [FRAME_COLUMN_FILLER * width for width in column_widths] ) result += FRAME_BOTTOM_RIGHT return result def _empty_row(row): for column_value in row: if column_value != ROW_SEPARATOR and column_value.strip(): return False return True def _row(row, column_widths, alignments): result = "" columns = [] for i, column_value in enumerate(row): alignment = alignments.get(i, 'left') if column_value == ROW_SEPARATOR: columns.append(ROW_SEPARATOR) elif alignment == 'right': columns.append( FRAME_COLUMN_WHITESPACE * (column_widths[i] - len(ansi_clean(column_value))) + column_value ) elif alignment == 'left': columns.append( column_value + FRAME_COLUMN_WHITESPACE * (column_widths[i] - len(ansi_clean(column_value))) ) elif alignment == 'center': prefix = int((column_widths[i] - len(ansi_clean(column_value))) / 2) suffix = (column_widths[i] - len(ansi_clean(column_value)) - prefix) columns.append( FRAME_COLUMN_WHITESPACE * prefix + column_value + FRAME_COLUMN_WHITESPACE * suffix ) else: raise NotImplementedError("no such alignment: {}".format(alignment)) for i, column_value in enumerate(columns): if i == 0: fill_previous_column = False else: fill_previous_column = columns[i - 1] == ROW_SEPARATOR fill_this_column = column_value == ROW_SEPARATOR if fill_previous_column and fill_this_column: result += FRAME_ROW_COLUMN_SEPARATOR_BOTH elif fill_previous_column and not fill_this_column: result += FRAME_ROW_COLUMN_SEPARATOR_LEFT elif not fill_previous_column and fill_this_column: result += FRAME_ROW_COLUMN_SEPARATOR_RIGHT else: result += FRAME_ROW_COLUMN_SEPARATOR_NONE if fill_this_column: result += FRAME_COLUMN_FILLER * column_widths[i] else: result += column_value if fill_this_column: result += FRAME_ROW_COLUMN_SEPARATOR_LEFT else: result += FRAME_ROW_COLUMN_SEPARATOR_NONE return result[1:-1] # strip exactly one whitespace character at each end def render_table(rows, alignments=None): """ Yields lines for a table. rows must be a list of lists of values, with the first row being considered the heading row. Alternatively, an entire row or individual cells can be set to ROW_SEPARATOR to turn it into a separator: rows = [ ["heading1", "heading2"], ROW_SEPARATOR, ["value1", "value2"], ["value3", ROW_SEPARATOR], ] alignments is a dict mapping column indexes to 'left' or 'right'. """ if alignments is None: alignments = {} column_widths = _column_widths_for_rows(rows) if environ.get("BW_TABLE_STYLE") != 'grep': yield _border_top(column_widths) for row_index, row in enumerate(rows): if row == ROW_SEPARATOR: if environ.get("BW_TABLE_STYLE") != 'grep': yield _row([ROW_SEPARATOR] * len(column_widths), column_widths, {}) elif row_index == 0: # heading row ignores alignments yield _row(row, column_widths, {}) elif environ.get("BW_TABLE_STYLE") != 'grep' or not _empty_row(row): yield _row(row, column_widths, alignments) if environ.get("BW_TABLE_STYLE") != 'grep': yield _border_bottom(column_widths) bundlewrap-4.13.6/bundlewrap/utils/testing.py000066400000000000000000000037621417502274300213170ustar00rootroot00000000000000import platform from subprocess import Popen, PIPE from ..bundle import FILENAME_BUNDLE, FILENAME_ITEMS from ..secrets import FILENAME_SECRETS HOST_OS = { "Darwin": 'macos', "Linux": 'linux', } def host_os(): return HOST_OS[platform.system()] def make_repo(tmpdir, bundles=None, groups=None, nodes=None): bundles = {} if bundles is None else bundles groups = {} if groups is None else groups nodes = {} if nodes is None else nodes bundles_dir = tmpdir.mkdir("bundles") for bundle, attrs_and_items in bundles.items(): for key in attrs_and_items: assert key in ("items", "attrs") bundle_dir = bundles_dir.mkdir(bundle) bundle_dir.mkdir("files") bundlepy = bundle_dir.join(FILENAME_BUNDLE) itemspy = bundle_dir.join(FILENAME_ITEMS) items_content = "" for itemtype, itemconfig in attrs_and_items.get('items', {}).items(): items_content += "{} = {}\n".format(itemtype, repr(itemconfig)) itemspy.write(items_content) bundle_content = "" for attrname, attrvalue in attrs_and_items.get('attrs', {}).items(): bundle_content += "{} = {}\n".format(attrname, repr(attrvalue)) bundlepy.write(bundle_content) tmpdir.mkdir("data") tmpdir.mkdir("hooks") tmpdir.mkdir("libs") groupspy = tmpdir.join("groups.py") groupspy.write("groups = {}\n".format(repr(groups))) nodespy = tmpdir.join("nodes.py") nodespy.write("nodes = {}\n".format(repr(nodes))) secrets = tmpdir.join(FILENAME_SECRETS) secrets.write("[generate]\nkey = {}\n\n[encrypt]\nkey = {}\n".format( "Fl53iG1czBcaAPOKhSiJE7RjFU9nIAGkiKDy0k_LoTc=", "DbYiUu5VMfrdeSiKYiAH4rDOAUISipvLSBJI-T0SpeY=", )) def run(command, path=None): process = Popen(command, cwd=path, shell=True, stderr=PIPE, stdout=PIPE) stdout, stderr = process.communicate() print(stdout.decode('utf-8')) print(stderr.decode('utf-8')) return (stdout, stderr, process.returncode) bundlewrap-4.13.6/bundlewrap/utils/text.py000066400000000000000000000157411417502274300206260ustar00rootroot00000000000000from datetime import datetime, timedelta from io import BytesIO from os import environ from os.path import normpath from random import choice import re from string import digits, ascii_letters from . import Fault, STDERR_WRITER ANSI_ESCAPE = re.compile(r'\x1b[^m]*m') HIDE_CURSOR = "\033[?25l" SHOW_CURSOR = "\033[?25h" VALID_NAME_CHARS = digits + ascii_letters + "-_.+" def ansi_clean(input_string): return ANSI_ESCAPE.sub("", force_text(input_string)) def ansi_wrapper(colorizer): if environ.get("BW_COLORS", "1") != "0": return colorizer else: return lambda s, **kwargs: s @ansi_wrapper def blue(text): return "\033[34m{}\033[0m".format(text) @ansi_wrapper def bold(text): return "\033[1m{}\033[0m".format(text) @ansi_wrapper def cyan(text): return "\033[36m{}\033[0m".format(text) @ansi_wrapper def italic(text): return "\033[3m{}\033[0m".format(text) @ansi_wrapper def green(text): return "\033[32m{}\033[0m".format(text) @ansi_wrapper def red(text): return "\033[31m{}\033[0m".format(text) @ansi_wrapper def yellow(text): return "\033[33m{}\033[0m".format(text) def cyan_unless_zero(number): if number == 0: return "0" else: return cyan(str(number)) def green_unless_zero(number): if number == 0: return "0" else: return green(str(number)) def red_unless_zero(number): if number == 0: return "0" else: return red(str(number)) def yellow_unless_zero(number): if number == 0: return "0" else: return yellow(str(number)) def error_summary(errors): if not errors: return if len(errors) == 1: STDERR_WRITER.write(_("\n{x} There was an error, repeated below.\n\n").format( x=red("!!!"), )) STDERR_WRITER.flush() else: STDERR_WRITER.write(_("\n{x} There were {count} errors, repeated below.\n\n").format( count=len(errors), x=red("!!!"), )) STDERR_WRITER.flush() for e in errors: STDERR_WRITER.write(e) STDERR_WRITER.write("\n") STDERR_WRITER.flush() def force_text(data): """ Try to return a text aka unicode object from the given data. """ if isinstance(data, bytes): return data.decode('utf-8', 'replace') elif isinstance(data, Fault): return data.value return data def is_subdirectory(parent, child): """ Returns True if the given child is a subdirectory of the parent. """ parent = normpath(parent) child = normpath(child) if not parent.startswith("/") or not child.startswith("/"): raise ValueError(_("directory paths must be absolute")) if parent == child: return False if parent == "/": return True return child.startswith(parent + "/") def mark_for_translation(s): return s _ = mark_for_translation def prefix_lines(lines, prefix): output = "" for line in lines.splitlines(): output += prefix + line + "\n" return output def randstr(length=24): """ Returns a random alphanumeric string of the given length. """ return ''.join(choice(ascii_letters + digits) for c in range(length)) def validate_name(name): """ Checks whether the given string is a valid name for a node, group, or bundle. """ try: for char in name: assert char in VALID_NAME_CHARS assert not name.startswith(".") except AssertionError: return False return True def wrap_question(title, body, question, prefix=""): output = ("{0}\n" "{0} ╭─ {1}\n" "{0} │\n".format(prefix, title)) for line in body.splitlines(): output += "{0} │ {1}\n".format(prefix, line) output += ("{0} │\n" "{0} ╰─ ".format(prefix) + question) return output class LineBuffer: def __init__(self, target): self.buffer = b"" self.record = BytesIO() self.target = target if target else lambda s: None def close(self): self.flush() if self.buffer: self.record.write(self.buffer) self.target(self.buffer) def flush(self): while b"\n" in self.buffer: chunk, self.buffer = self.buffer.split(b"\n", 1) self.record.write(chunk + b"\n") self.target(chunk + b"\n") def write(self, msg): self.buffer += msg self.flush() def format_duration(duration, msec=False): """ Takes a timedelta and returns something like "1d 5h 4m 3s". """ components = [] if duration.days > 0: components.append(_("{}d").format(duration.days)) seconds = duration.seconds if seconds >= 3600: hours = int(seconds / 3600) seconds -= hours * 3600 components.append(_("{}h").format(hours)) if seconds >= 60: minutes = int(seconds / 60) seconds -= minutes * 60 components.append(_("{}m").format(minutes)) if seconds > 0 or not components: if msec: seconds += duration.microseconds / 1000000.0 components.append(_("{:.3f}s").format(seconds)) else: components.append(_("{}s").format(seconds)) return " ".join(components) def format_timestamp(timestamp): return datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") def parse_duration(duration): """ Parses a string like "1d 5h 4m 3s" into a timedelta. """ days = 0 seconds = 0 for component in duration.strip().split(" "): component = component.strip() if component[-1] == "d": days += int(component[:-1]) elif component[-1] == "h": seconds += int(component[:-1]) * 3600 elif component[-1] == "m": seconds += int(component[:-1]) * 60 elif component[-1] == "s": seconds += int(component[:-1]) else: raise ValueError(_("{} is not a valid duration string").format(repr(duration))) return timedelta(days=days, seconds=seconds) def toml_clean(s): """ Removes duplicate sections from TOML, e.g.: [foo] <--- this line will be removed since it's redundant [foo.bar] baz = 1 """ lines = list(s.splitlines()) result = [] previous = "" for line in lines.copy(): if line.startswith("[") and line.endswith("]"): if line[1:].startswith(previous + "."): result.pop() previous = line[1:-1] else: previous = "" result.append(line) return "\n".join(result) + "\n" def trim_visible_len_to(line, target_len): use_until = 0 visible_len = 0 in_sequence = False while use_until < len(line) and visible_len < target_len: if line[use_until] == "\033": in_sequence = True elif in_sequence and line[use_until] == "m": in_sequence = False elif not in_sequence: visible_len += 1 use_until += 1 return line[:use_until] bundlewrap-4.13.6/bundlewrap/utils/ui.py000066400000000000000000000332551417502274300202570ustar00rootroot00000000000000from contextlib import contextmanager, suppress from datetime import datetime from functools import wraps from os import _exit, environ, getpid, kill from os.path import join from select import select from shutil import get_terminal_size from signal import signal, SIG_DFL, SIGINT, SIGQUIT, SIGTERM from subprocess import PIPE, Popen import sys import termios from time import time from threading import Event, Lock, Thread from . import STDERR_WRITER, STDOUT_WRITER from .table import render_table, ROW_SEPARATOR from .text import ( HIDE_CURSOR, SHOW_CURSOR, ansi_clean, blue, bold, format_duration, mark_for_translation as _, trim_visible_len_to, ) INFO_EVENT = Event() QUIT_EVENT = Event() SHUTDOWN_EVENT_HARD = Event() SHUTDOWN_EVENT_SOFT = Event() TTY = STDOUT_WRITER.isatty() def add_debug_indicator(f): @wraps(f) def wrapped(self, msg, **kwargs): return f(self, "[DEBUG] " + msg, **kwargs) return wrapped def add_debug_timestamp(f): @wraps(f) def wrapped(self, msg, **kwargs): if self.debug_mode: msg = f"[{datetime.now().isoformat()}] {msg}" return f(self, msg, **kwargs) return wrapped def capture_for_debug_logfile(f): @wraps(f) def wrapped(self, msg, **kwargs): if self.debug_log_file and self._active: with self.lock: clean_msg = ansi_clean(msg).rstrip("\n") self.debug_log_file.write( f"[{datetime.now().isoformat()}] {clean_msg}\n" ) return f(self, msg, **kwargs) return wrapped def clear_formatting(f): """ Makes sure formatting from cut-off lines can't bleed into next one """ @wraps(f) def wrapped(self, msg, **kwargs): if TTY and environ.get("BW_COLORS", "1") != "0": msg = "\033[0m" + msg return f(self, msg, **kwargs) return wrapped def sigint_handler(*args, **kwargs): """ This handler is kept short since it interrupts execution of the main thread. It's safer to handle these events in their own thread because the main thread might be holding the IO lock while it is interrupted. """ if not SHUTDOWN_EVENT_SOFT.is_set(): SHUTDOWN_EVENT_SOFT.set() else: SHUTDOWN_EVENT_HARD.set() def sigquit_handler(*args, **kwargs): """ This handler is kept short since it interrupts execution of the main thread. It's safer to handle these events in their own thread because the main thread might be holding the IO lock while it is interrupted. """ INFO_EVENT.set() def spinner(): while True: for c in "⠁⠈⠐⠠⢀⡀⠄⠂": yield c def page_lines(lines): """ View the given list of Unicode lines in a pager (e.g. `less`). """ lines = list(lines) line_width = max([len(ansi_clean(line)) for line in lines]) if ( TTY and ( line_width > get_terminal_size().columns or len(lines) > get_terminal_size().lines ) ): write_to_stream(STDOUT_WRITER, SHOW_CURSOR) env = environ.copy() env["LESS"] = env.get("LESS", "") + " -R" pager = Popen( [environ.get("PAGER", "/usr/bin/less")], env=env, stdin=PIPE, ) with suppress(BrokenPipeError): pager.stdin.write("\n".join(lines).encode('utf-8')) pager.stdin.close() pager.communicate() write_to_stream(STDOUT_WRITER, HIDE_CURSOR) else: for line in lines: io.stdout(line) def write_to_stream(stream, msg): with suppress(BrokenPipeError): if TTY: stream.write(msg) else: stream.write(ansi_clean(msg)) stream.flush() class DrainableStdin: def get_input(self): while True: if QUIT_EVENT.is_set(): return None if select([sys.stdin], [], [], 0.1)[0]: return sys.stdin.readline().strip() def drain(self): if sys.stdin.isatty(): termios.tcflush(sys.stdin, termios.TCIFLUSH) class IOManager: """ Threadsafe singleton class that handles all IO. """ def __init__(self): self._active = False self.debug_log_file = None self.debug_mode = False self.jobs = [] self.lock = Lock() self.progress = 0 self.progress_start = None self.progress_total = 0 self._spinner = spinner() self._last_spinner_character = next(self._spinner) self._last_spinner_update = 0 self._signal_handler_thread = None self._child_pids = [] self._status_line_present = False self._waiting_for_input = False def activate(self): self._active = True if 'BW_DEBUG_LOG_DIR' in environ: self.debug_log_file = open(join( environ['BW_DEBUG_LOG_DIR'], "{}_{}.log".format( datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), getpid(), ), ), 'a') self._signal_handler_thread = Thread( target=self._signal_handler_thread_body, ) # daemon mode is required because we need to keep the thread # around until the end of a soft shutdown to wait for a hard # shutdown signal, but don't have a feasible way of stopping # the thread once the soft shutdown has completed self._signal_handler_thread.daemon = True self._signal_handler_thread.start() signal(SIGINT, sigint_handler) signal(SIGQUIT, sigquit_handler) if TTY: write_to_stream(STDOUT_WRITER, HIDE_CURSOR) def ask(self, question, default, epilogue=None, input_handler=DrainableStdin()): assert self._active answers = _("[Y/n]") if default else _("[y/N]") question = question + " " + answers + " " self._waiting_for_input = True with self.lock: if QUIT_EVENT.is_set(): sys.exit(0) self._clear_last_job() while True: write_to_stream(STDOUT_WRITER, "\a" + question + SHOW_CURSOR) input_handler.drain() answer = input_handler.get_input() if answer is None: if epilogue: write_to_stream(STDOUT_WRITER, "\n" + epilogue + "\n") QUIT_EVENT.set() sys.exit(0) elif answer.lower() in (_("y"), _("yes")) or ( not answer and default ): answer = True break elif answer.lower() in (_("n"), _("no")) or ( not answer and not default ): answer = False break write_to_stream( STDOUT_WRITER, _("Please answer with 'y(es)' or 'n(o)'.\n"), ) if epilogue: write_to_stream(STDOUT_WRITER, epilogue + "\n") write_to_stream(STDOUT_WRITER, HIDE_CURSOR) self._waiting_for_input = False return answer def deactivate(self): self._active = False if TTY: write_to_stream(STDOUT_WRITER, SHOW_CURSOR) signal(SIGINT, SIG_DFL) signal(SIGQUIT, SIG_DFL) self._signal_handler_thread.join() if self.debug_log_file: self.debug_log_file.close() @clear_formatting @add_debug_indicator @capture_for_debug_logfile @add_debug_timestamp def debug(self, msg, append_newline=True): if self.debug_mode: with self.lock: self._write(msg, append_newline=append_newline) def job_add(self, msg): if not self._active: return with self.lock: self._clear_last_job() self.jobs.append(msg) self._write_current_job() def job_del(self, msg): if not self._active: return with self.lock: self._clear_last_job() self.jobs.remove(msg) self._write_current_job() def progress_advance(self, increment=1): with self.lock: self.progress += increment def progress_increase_total(self, increment=1): with self.lock: self.progress_total += increment def progress_set_total(self, total): self.progress = 0 self.progress_start = datetime.utcnow() self.progress_total = total def progress_show(self): if INFO_EVENT.is_set(): INFO_EVENT.clear() table = [] if self.jobs: table.append([bold(_("Running jobs")), self.jobs[0].strip()]) for job in self.jobs[1:]: table.append(["", job.strip()]) try: progress = (self.progress / float(self.progress_total)) elapsed = datetime.utcnow() - self.progress_start remaining = elapsed / progress - elapsed except ZeroDivisionError: pass else: if table: table.append(ROW_SEPARATOR) table.extend([ [bold(_("Progress")), "{:.1f}%".format(progress * 100)], ROW_SEPARATOR, [bold(_("Elapsed")), format_duration(elapsed)], ROW_SEPARATOR, [ bold(_("Remaining")), _("{} (estimate based on progress)").format(format_duration(remaining)) ], ]) output = blue("i") + "\n" if table: for line in render_table(table): output += ("{x} {line}\n".format(x=blue("i"), line=line)) else: output += _("{x} No progress info available at this time.\n").format(x=blue("i")) io.stderr(output + blue("i")) @clear_formatting @capture_for_debug_logfile @add_debug_timestamp def stderr(self, msg, append_newline=True): with self.lock: self._write(msg, append_newline=append_newline, err=True) @clear_formatting @capture_for_debug_logfile @add_debug_timestamp def stdout(self, msg, append_newline=True): with self.lock: self._write(msg, append_newline=append_newline) @contextmanager def job(self, job_text): self.job_add(job_text) try: yield finally: self.job_del(job_text) def job_wrapper(self, job_text): def outer_wrapper(wrapped_function): @wraps(wrapped_function) def inner_wrapper(*args, **kwargs): with self.job(job_text.format(*args, **kwargs)): return wrapped_function(*args, **kwargs) return inner_wrapper return outer_wrapper def _clear_last_job(self): if self._status_line_present and TTY: write_to_stream(STDOUT_WRITER, "\r\033[K") self._status_line_present = False def _signal_handler_thread_body(self): while self._active: self.progress_show() if not self._waiting_for_input: # do not block and ignore SIGINT while .ask()ing with self.lock: self._clear_last_job() self._write_current_job() if QUIT_EVENT.is_set(): if SHUTDOWN_EVENT_HARD.wait(0.1): self.stderr(_("{x} {signal} cleanup interrupted, exiting...").format( signal=bold(_("SIGINT")), x=blue("i"), )) for ssh_pid in self._child_pids: self.debug(_("killing SSH session with PID {pid}").format(pid=ssh_pid)) with suppress(ProcessLookupError): kill(ssh_pid, SIGTERM) self._clear_last_job() if TTY: write_to_stream(STDOUT_WRITER, SHOW_CURSOR) _exit(130) # https://tldp.org/LDP/abs/html/exitcodes.html else: if SHUTDOWN_EVENT_SOFT.wait(0.1): QUIT_EVENT.set() self.stderr(_( "{x} {signal} canceling pending tasks... " "(hit CTRL+C again for immediate dirty exit)" ).format( signal=bold(_("SIGINT")), x=blue("i"), )) def _spinner_character(self): if time() - self._last_spinner_update > 0.2: self._last_spinner_update = time() self._last_spinner_character = next(self._spinner) return self._last_spinner_character def _write(self, msg, append_newline=True, err=False): if not self._active: return self._clear_last_job() if msg is not None: if append_newline: msg += "\n" write_to_stream(STDERR_WRITER if err else STDOUT_WRITER, msg) self._write_current_job() def _write_current_job(self): if self.jobs and TTY: line = "{} ".format(blue(self._spinner_character())) try: progress = (self.progress / float(self.progress_total)) except ZeroDivisionError: pass else: progress_text = "{:.1f}% ".format(progress * 100) line += bold(progress_text) line += self.jobs[-1] write_to_stream( STDOUT_WRITER, trim_visible_len_to(line, get_terminal_size().columns), ) self._status_line_present = True io = IOManager() bundlewrap-4.13.6/docs/000077500000000000000000000000001417502274300147055ustar00rootroot00000000000000bundlewrap-4.13.6/docs/content/000077500000000000000000000000001417502274300163575ustar00rootroot00000000000000bundlewrap-4.13.6/docs/content/CNAME000066400000000000000000000000241417502274300171210ustar00rootroot00000000000000docs.bundlewrap.org bundlewrap-4.13.6/docs/content/bundlewrap.css000066400000000000000000000030211417502274300212300ustar00rootroot00000000000000@import url('https://fonts.googleapis.com/css?family=Maven+Pro'); @import url('https://fonts.googleapis.com/css?family=Open+Sans:400,400i,700'); @import url('https://fonts.googleapis.com/css?family=Source+Code+Pro:400,700'); body, h1, h2, h3, h4, h5, h6 { background: white; font-family: "Open Sans", Helvetica, sans-serif; } body::before { background-color: white; background-image: none; } h1, h2, h3, h4, h5, h6 { margin-top: 0; margin-bottom: 20px; } hr { border-top: 1px solid #f0f0f0; } .navbar { background: black !important; } .navbar, .navbar-fixed-top { border: 0; } .navbar-brand { font-family: "Maven Pro", Helvetica, sans-serif; } .bs-sidebar .nav > li > a { color: black; } .terminal { background-color: black; border-radius: 5px; color: #d7d7d7; font-family: source-code-pro, monospace; font-size: 12px; line-height: 140%; margin-bottom: 32px; margin-top: 32px; overflow: scroll; padding: 10px; padding-left: 15px; white-space: pre; } .btn-blue { background-color: #1a8acc; border: 1px solid white; color: #f0f0f0; } .btn-blue:hover { color: white; } .btn-blueoutline { background-color: white; border: 1px solid #1a8acc; color: #1a8acc; } .btn-blueoutline:hover { color: black; } /* Hide useless Prev/Next and broken 'Edit on GitHub' */ #navbar-collapse > ul.nav.navbar-nav.ml-auto > li:nth-child(2) { display: none; } #navbar-collapse > ul.nav.navbar-nav.ml-auto > li:nth-child(3) { display: none; } #navbar-collapse > ul.nav.navbar-nav.ml-auto > li:nth-child(4) { display: none; } bundlewrap-4.13.6/docs/content/guide/000077500000000000000000000000001417502274300174545ustar00rootroot00000000000000bundlewrap-4.13.6/docs/content/guide/api.md000066400000000000000000000146361417502274300205610ustar00rootroot00000000000000# API While most users will interact with BundleWrap through the `bw` command line utility, you can also use it from your own code to extract data or further automate config management tasks. Even within BundleWrap itself (e.g. templates, libs, and hooks) you are often given repo and/or node objects to work with. Their methods and attributes are documented below. Some general notes on using BundleWrap's API: * There can be an arbitrary amount of `bundlewrap.repo.Repository` objects per process. * Repositories are read as needed and not re-read when something changes. Modifying files in a repo during the lifetime of the matching Repository object may result in undefined behavior.
## Example Here's a short example of how to use BundleWrap to get the uptime for a node. from bundlewrap.repo import Repository repo = Repository("/path/to/my/repo") node = repo.get_node("mynode") uptime = node.run("uptime") print(uptime.stdout)
## Reference ### bundlewrap.repo.Repository(path) The starting point of any interaction with BundleWrap. An object of this class represents the repository at the given path. `path` can be a subpath of your repository (e.g., `bundles/nginx/`) and will internally be resolved to the root path of said repository.
**`.branch`** The current git branch of this repo. `None` if not in a git repo.
**`.clean`** Boolean indicating if there are uncommitted changes in git. `None` if not in a git repo.
**`.groups`** A list of all groups in the repo (instances of `bundlewrap.group.Group`)
**`.nodes`** A list of all nodes in the repo (instances of `bundlewrap.node.Node`)
**`.revision`** The current git, hg or bzr revision of this repo. `None` if no SCM was detected.
**`.get_group(group_name)`** Returns the Group object for the given name.
**`.get_node(node_name)`** Returns the Node object with the given name.
**`.nodes_in_all_groups(group_names)`** Returns a list of Node objects where every node is a member of every group name given.
**`.nodes_in_any_group(group_names)`** Returns all Node objects that are a member of at least one of the given group names.
**`.nodes_in_group(group_name)`** Returns a list of Node objects in the named group.
### bundlewrap.node.Node() A system managed by BundleWrap.
**`.bundles`** A list of all bundles associated with this node (instances of `bundlewrap.bundle.Bundle`)
**`.groups`** A list of `bundlewrap.group.Group` objects this node belongs to
**`.hostname`** The DNS name BundleWrap uses to connect to this node
**`.items`** A list of items on this node (instances of subclasses of `bundlewrap.items.Item`)
**`.magic_number`** A large number derived from the node's name. This number is very likely to be unique for your entire repository. You can, for example, use this number to easily "jitter" cronjobs: '{} {} * * * root /my/script'.format( node.magic_number % 60, node.magic_number % 2 + 4, )
**`.metadata`** A dictionary of custom metadata, merged from information in [nodes.py](../repo/nodes.py.md) and [groups.py](../repo/groups.py.md)
**`.name`** The internal identifier for this node
**`.download(remote_path, local_path)`** Downloads a file from the node. - `remote_path` Which file to get from the node - `local_path` Where to put the file
**`.get_item(item_id)`** Get the Item object with the given ID (e.g. "file:/etc/motd").
**`.has_bundle(bundle_name)`** `True` if the node has a bundle with the given name.
**`.has_any_bundle(bundle_names)`** `True` if the node has a bundle with any of the given names.
**`.in_group(group_name)`** `True` if the node is in a group with the given name.
**`.in_any_group(group_names)`** `True` if the node is in a group with any of the given names.
**`.run(command, may_fail=False)`** Runs a command on the node. Returns an instance of `bundlewrap.operations.RunResult`. - `command` What should be executed on the node - `may_fail` If `False`, `bundlewrap.exceptions.RemoteException` will be raised if the command does not return 0.
**`.upload(local_path, remote_path, mode=None, owner="", group="")`** Uploads a file to the node. - `local_path` Which file to upload - `remote_path` Where to put the file on the target node - `mode` File mode, e.g. "0644" - `owner` Username of the file owner - `group` Group name of the file group
### bundlewrap.group.Group A user-defined group of nodes.
**`.name`** The name of this group
**`.nodes`** A list of all nodes in this group (instances of `bundlewrap.node.Node`, includes subgroup members)
### bundlewrap.utils.Fault A Fault acts as a lazy stand-in object for the result of a given callback function. These objects are returned from the "vault" attached to `Repository` objects: >>> repo.vault.password_for("demo") Only when the `value` property of a Fault is accessed or when the Fault is converted to a string, the callback function is executed. In the example above, this means that the password is only generated when it is really required (e.g. when used in a template). This is particularly useful when used in metadata in connection with [secrets](secrets.md). Users will be able to generate metadata with Faults in it, even if they lack the required keys for the decryption operation represented by the Fault. The key will only be required for files etc. that actually use it. If a Fault cannot be resolved (e.g. for lack of the required key), BundleWrap can just skip the item using the Fault, while still allowing other items on the same node to be applied. Faults also support some rudimentary string operations such as appending a string or another Fault, as well as some string methods: >>> f = repo.vault.password_for("1") + ":" + repo.vault.password_for("2") >>> f >>> f.value 'VOd5PC:JUgYUb' >>> f += " " >>> f.value 'VOd5PC:JUgYUb ' >>> f.strip().value 'VOd5PC:JUgYUb' >>> repo.vault.password_for("1").format_into("Password: {}").value 'Password: VOd5PC' >>> repo.vault.password_for("1").b64encode().value 'Vk9kNVA=' >>> repo.vault.password_for("1").as_htpasswd_entry("username").value 'username:$apr1$8be694c7…' These string methods are supported on Faults: `format`, `lower`, `lstrip`, `replace`, `rstrip`, `strip`, `upper`, `zfill` bundlewrap-4.13.6/docs/content/guide/cli.md000066400000000000000000000065541417502274300205570ustar00rootroot00000000000000# Command Line Interface The `bw` utility is BundleWrap's command line interface.
This page is not meant as a complete reference. It provides a starting point to explore the various subcommands. If you're looking for details, --help is your friend.
## bw apply
bw apply -i mynode
The most important and most used part of BundleWrap, `bw apply` will apply your configuration to a set of [nodes](../repo/nodes.py.md). By default, it operates in a non-interactive mode. When you're trying something new or are otherwise unsure of some changes, use the `-i` switch to have BundleWrap interactively ask before each change is made.
## bw run
$ bw run mygroup "uname -a"
Unsurprisingly, the `run` subcommand is used to run commands on nodes. As with most commands that accept node names, you can also give a `group` name or any combination of node and group names, separated by commas (without spaces, e.g. `node1,group2,node3`). A third option is to use a bundle selector like `bundle:my_bundle`. It will select all nodes with the named `bundle`. You can freely mix and match node names, group names, and bundle selectors. Negation is also possible for bundles and groups. `!bundle:foo` will add all nodes without the foo bundle, while `!group:foo` will add all nodes that aren't in the foo group.
## bw debug $ bw debug bundlewrap X.Y.Z interactive repository inspector > You can access the current repository as 'repo'. >>> len(repo.nodes) 121 This command will drop you into a Python shell with direct access to BundleWrap's [API](api.md). Once you're familiar with it, it can be a very powerful tool.
## bw plot
You'll need Graphviz installed on your machine for this to be useful.
$ bw plot node mynode | dot -Tsvg -omynode.svg
You won't be using this every day, but it's pretty cool. The above command will create an SVG file (you can open these in your browser) that shows the item dependency graph for the given node. You will see bundles as dashed rectangles, static dependencies (defined in BundleWrap itself) in green, auto-generated dependencies (calculated dynamically each time you run `bw apply`) in blue and dependencies you defined yourself in red. It offers an interesting view into the internal complexities BundleWrap has to deal with when figuring out the order in which your items can be applied to your node.
## bw test
$ bw test
✓ node1  samba  pkg_apt:samba
✘ node1  samba  file:/etc/samba/smb.conf

[...]

+----- traceback from worker ------
|
|  Traceback (most recent call last):
|    File "bundlewrap/concurrency.py", line 78, in _worker_process
|      return_value = target(*msg['args'], **msg['kwargs'])
|    File "<string>", line 378, in test
|  BundleError: file:/etc/samba/smb.conf from bundle 'samba' refers to missing file '/path/to/bundlewrap/repo/bundles/samba/files/smb.conf'
|
+----------------------------------
This command is meant to be run automatically like a test suite after every commit. It will try to catch any errors in your bundles and file templates by initializing every item for every node (but without touching the network). bundlewrap-4.13.6/docs/content/guide/dev_item.md000066400000000000000000000144031417502274300215740ustar00rootroot00000000000000# Custom item types ## Step 0: Understand statedicts To represent supposed vs. actual state, BundleWrap uses statedicts. These are normal Python dictionaries with some restrictions: * keys must be Unicode text * every value must be of one of these simple data types: * bool * float * int * Unicode text * None * ...or a list/tuple containing only instances of one of the types above Additional information can be stored in statedicts by using keys that start with an underscore. You may only use this for caching purposes (e.g. storing rendered file template content while the "real" sdict information only contains a hash of this content). BundleWrap will ignore these keys and hide them from the user. The type restrictions noted above do not apply. ## Step 1: Create an item module Create a new file called `/your/bundlewrap/repo/items/foo.py`. You can use this as a template: from bundlewrap.items import Item class Foo(Item): """ A foo. """ BUNDLE_ATTRIBUTE_NAME = "foo" ITEM_ATTRIBUTES = { 'attribute': "default value", } ITEM_TYPE_NAME = "foo" REQUIRED_ATTRIBUTES = ['attribute'] @classmethod def block_concurrent(cls, node_os, node_os_version): """ Return a list of item types that cannot be applied in parallel with this item type. """ return [] def __repr__(self): return "".format(self.attributes['attribute']) def cdict(self): """ Return a statedict that describes the target state of this item as configured in the repo. Returning `None` instead means that the item should not exist. Implementing this method is optional. The default implementation uses the attributes as defined in the bundle. """ raise NotImplementedError def sdict(self): """ Return a statedict that describes the actual state of this item on the node. Returning `None` instead means that the item does not exist on the node. For the item to validate as correct, the values for all keys in self.cdict() have to match this statedict. """ raise NotImplementedError def display_on_create(self, cdict): """ Given a cdict as implemented above, modify it to better suit interactive presentation when an item is created. If there are any when_creating attributes, they will be added to the cdict before it is passed to this method. Implementing this method is optional. """ return cdict def display_dicts(self, cdict, sdict, keys): """ Given cdict and sdict as implemented above, modify them to better suit interactive presentation. The keys parameter is a list of keys whose values differ between cdict and sdict. Implementing this method is optional. """ return (cdict, sdict, keys) def display_on_delete(self, sdict): """ Given an sdict as implemented above, modify it to better suit interactive presentation when an item is deleted. Implementing this method is optional. """ return sdict def fix(self, status): """ Do whatever is necessary to correct this item. The given ItemStatus object has the following useful information: status.keys_to_fix list of cdict keys that need fixing status.cdict cached copy of self.cdict() status.sdict cached copy of self.sdict() """ raise NotImplementedError
## Step 2: Define attributes `BUNDLE_ATTRIBUTE_NAME` is the name of the variable defined in a bundle module that holds the items of this type. If your bundle looks like this: foo = { [...] } ...then you should put `BUNDLE_ATTRIBUTE_NAME = "foo"` here. `ITEM_ATTRIBUTES` is a dictionary of the attributes users will be able to configure for your item. For files, that would be stuff like owner, group, and permissions. Every attribute (even if it's mandatory) needs a default value, `None` is totally acceptable: ITEM_ATTRIBUTES = {'attr1': "default1"} `ITEM_TYPE_NAME` sets the first part of an items ID. For the file items, this is "file". Therefore, file ID look this this: `file:/path`. The second part is the name a user assigns to your item in a bundle. Example: ITEM_TYPE_NAME = "foo" `REQUIRED_ATTRIBUTES` is a list of attribute names that must be set on each item of this type. If BundleWrap encounters an item without all these attributes during bundle inspection, an exception will be raised. Example: REQUIRED_ATTRIBUTES = ['attr1', 'attr2']
Step 3: Implement methods ------------------------- You should probably start with `sdict()`. Use `self.run("command")` to run shell commands on the current node and check the `stdout` property of the returned object. The only other method you have to implement is `fix`. It doesn't have to return anything and just uses `self.run()` to fix the item. To do this efficiently, it may use the provided parameters indicating which keys differ between the should-be sdict and the actual one. Both sdicts are also provided in case you need to know their values. `block_concurrent()` must return a list of item types (e.g. `['pkg_apt']`) that cannot be applied in parallel with this type of item. May include this very item type itself. For most items this is not an issue (e.g. creating multiple files at the same time), but some types of items have to be applied sequentially (e.g. package managers usually employ locks to ensure only one package is installed at a time). If you're having trouble, try looking at the [source code for the items that come with BundleWrap](https://github.com/bundlewrap/bundlewrap/tree/master/bundlewrap/items). The `pkg_*` items are pretty simple and easy to understand while `files` is the most complex to date. Or just drop by on [IRC](irc://irc.libera.chat/bundlewrap) or [GitHub](https://github.com/bundlewrap/bundlewrap/discussions), we're glad to help. bundlewrap-4.13.6/docs/content/guide/env.md000066400000000000000000000103471417502274300205730ustar00rootroot00000000000000# Environment Variables ## `BW_ADD_HOST_KEYS` As BundleWrap uses OpenSSH to connect to hosts, host key checking is involved. By default, strict host key checking is activated. This might not be suitable for your setup. You can set this variable to `1` to cause BundleWrap to set the OpenSSH option `StrictHostKeyChecking=no`. You can also use `bw -a ...` to achieve the same effect.
## `BW_COLORS` Colors are enabled by default. Setting this variable to `0` tells BundleWrap to never use any ANSI color escape sequences.
## `BW_DEBUG_LOG_DIR` Set this to an existing directory path to have BundleWrap write debug logs there (even when you're running `bw` without `--debug`).
Debug logs are verbose and BundleWrap does not rotate them for you. Putting them on a tmpfs or ramdisk will save your SSD and get rid of old logs every time you reboot your machine.

## `BW_GIT_DEPLOY_CACHE` Optional cache directory for `git_deploy` items.
## `BW_HARDLOCK_EXPIRY` [Hard locks](locks.md) are automatically ignored after some time. By default, it's `"8h"`. You can use this variable to override that default.
## `BW_IDENTITY` When BundleWrap [locks](locks.md) a node, it stores a short description about "you". By default, this is the string `$USER@$HOSTNAME`, e.g. `john@mymachine`. You can use `BW_IDENTITY` to specify a custom string. (No variables will be evaluated in user supplied strings.)
## `BW_ITEM_WORKERS` and `BW_NODE_WORKERS` BundleWrap attempts to parallelize work. These two options specify the number of nodes and items, respectively, which will be handled concurrently. To be more precise, when setting `BW_NODE_WORKERS=8` and `BW_ITEM_WORKERS=2`, BundleWrap will work on eight nodes in parallel, each handling two items in parallel. You can also use the command line options `-p` and `-P`, e.g. `bw apply -p ... -P ... ...`, to achieve the same effect. Command line arguments override environment variables. There is no single default for these values. For example, when running `bw apply`, four nodes are being handled by default. However, when running `bw test`, only one node will be tested by default. `BW_NODE_WORKERS` and `BW_ITEM_WORKERS` apply to *all* these operations. Note that you should not set these variables to very high values. First, it can cause high memory consumption on your machine. Second, not all SSH servers can handle massive parallelism. Please refer to your OpenSSH documentation on how to tune your servers for these situations.
## `BW_MAX_METADATA_ITERATIONS` Sets the limit of how often metadata reactors will be run for a node before BundleWrap calls it a loop and terminates with an exception. Defaults to `1000`.
## `BW_REPO_PATH` Set this to a path pointing to your BundleWrap repository. If unset, the current working directory is used. Can be overridden with `bw --repository PATH`. Keep in mind that `bw` will also look for a repository in all parent directories until it finds one.
## `BW_SOFTLOCK_EXPIRY` [Soft locks](locks.md) are automatically removed from nodes after some time. By default, it's `"8h"`. You can use this variable to override that default.
## `BW_SSH_ARGS` Extra arguments to include in every call to `ssh` BundleWrap makes. Set this to "-F ~/.ssh/otherconf" to use a different SSH config with BundleWrap. Defaults to `""`.
## `BW_SCP_ARGS` Extra arguments to include in every call to `scp` BundleWrap makes. Defaults to the value of `BW_SSH_ARGS`.
## `BW_TABLE_STYLE` By default, BundleWrap uses Unicode box-drawing characters at various points in its output. Setting this env var to one of the following values changes that behavior:
asciiuse only simple ASCII characters to render tables (useful if your font doesn't properly align box-drawing characters)
grepmake output more grep- and cut-friendly

## `BW_VAULT_DUMMY_MODE` Setting this to `1` will make `repo.vault` return dummy values for every [secret](secrets.md). This is useful for running `bw test` on a CI server that you don't want to trust with your `.secrets.cfg`. bundlewrap-4.13.6/docs/content/guide/installation.md000066400000000000000000000060121417502274300224760ustar00rootroot00000000000000# Installation
You may need to install pip first. This can be accomplished through your distribution's package manager, e.g.:
aptitude install python-pip
or the manual instructions.
## Using pip It's as simple as:
pip install bundlewrap
Note that you need at least Python 2.7 to run BundleWrap. Python 3 is supported as long as it's >= 3.3.
## From git
This type of install will give you the very latest (and thus possibly broken) bleeding edge version of BundleWrap. You should only use this if you know what you're doing.
The instructions below are for installing on Ubuntu Server 12.10 (Quantal), but should also work for other versions of Ubuntu/Debian. If you're on some other distro, you will obviously have to adjust the package install commands.
The instructions assume you have root privileges.
Install basic requirements:
aptitude install build-essential git python-dev python-pip
Clone the GitHub repository:
cd /opt
git clone https://github.com/bundlewrap/bundlewrap.git
Use `pip install -e` to install in "development mode":
pip install -e /opt/bundlewrap
You can now try running the `bw` command line utility:
bw --help
That's it. To update your install, just pull the git repository and have setup.py` check for new dependencies:
cd /opt/bundlewrap
git pull
python setup.py develop

# Requirements for managed systems While the following list might appear long, even very minimal systems should provide everything that's needed. * `apt-get` (only used with [pkg_apt](../items/pkg_apt.md) items) * `cat` * `chmod` * `chown` * `dpkg` (only used with [pkg_apt](../items/pkg_apt.md) items) * `echo` * `file` * `find` * `grep` * `groupadd` * `groupmod` * `id` * `initctl` (only used with [svc_upstart](../items/svc_upstart.md) items) * `mkdir` * `mv` * `pacman` (only used with [pkg_pacman](../items/pkg_pacman.md) items) * `pamac` (only used with [pkg_pamac](../items/pkg_pamac.md) items) * `rm` * sftp-enabled SSH server (your home directory must be writable) * `sudo` * `sha1sum` * `stat` * `systemctl` (only used with [svc_systemd](../items/svc_systemd.md) items) * `tar` (only used with [git_deploy](../items/git_deploy.md) items) * `useradd` * `usermod` * `zfs` and `zpool` (only used with [zfs_dataset](../items/zfs_dataset.md) and [zfs_pool](../items/zfs_pool.md) items) Additionally, you need to pre-configure your SSH client so that it can connect to your nodes without having to type a password (including `sudo` on the node, which also must *not* have the `requiretty` option set). bundlewrap-4.13.6/docs/content/guide/item_file_templates.md000066400000000000000000000046211417502274300240140ustar00rootroot00000000000000# Writing file templates BundleWrap can use [Mako](http://www.makotemplates.org) or [Jinja2](http://jinja.pocoo.org) for file templating. This enables you to dynamically construct your config files. Templates reside in the `files` subdirectory of a bundle and are bound to a file item using the `source` [attribute](../items/file.md#source). This page explains how to get started with Mako. The most basic example would be:
Hello, this is ${node.name}!
After template rendering, it would look like this:
Hello, this is myexamplenodename!
As you can see, `${...}` can be used to insert the value of a context variable into the rendered file. By default, you have access to two variables in every template: `node` and `repo`. They are `bundlewrap.node.Node` and `bundlewrap.repo.Repository` objects, respectively. You can learn more about the attributes and methods of these objects in the [API docs](api.md), but here are a few examples:
## Examples inserts the DNS hostname of the current node ${node.hostname}
a list of all nodes in your repo % for node in repo.nodes: ${node.name} % endfor
make exceptions for certain nodes % if node.name == "node1": option = foo % elif node.name in ("node2", "node3"): option = bar % else: option = baz % endif
check for group membership % if node.in_group("sparkle"): enable_sparkles = 1 % endif
check for membership in any of several groups % if node.in_any_group(("sparkle", "shiny")): enable_fancy = 1 % endif
check for bundle % if node.has_bundle("sparkle"): enable_sparkles = 1 % endif
check for any of several bundles % if node.has_any_bundle(("sparkle", "shiny")): enable_fancy = 1 % endif
list all nodes in a group % for gnode in repo.get_group("mygroup").nodes: ${gnode.name} % endfor
## Working with node metadata Quite often you will attach custom metadata to your nodes in `nodes.py`, e.g.: nodes = { "node1": { "metadata": { "interfaces": { "eth0": "10.1.1.47", "eth1": "10.1.2.47", }, }, }, } You can easily access this information in templates: % for interface, ip in sorted(node.metadata["interfaces"].items()): interface ${interface} ip = ${ip} % endfor This template will render to: interface eth0 ip = 10.1.1.47 interface eth1 ip = 10.1.2.47 bundlewrap-4.13.6/docs/content/guide/kubernetes.md000066400000000000000000000053111417502274300221450ustar00rootroot00000000000000# Kubernetes
Support for Kubernetes is experimental at this time. Backwards-incompatible changes may happen at any time.
To manage a Kubernetes cluster with BundleWrap, you first need to set up a kubectl context that works with the cluster. If you're running on Google Kubernetes Engine for example, this can be accomplished with: gcloud auth login gcloud container clusters get-credentials your-cluster --zone your-zone --project your-project You also need to make sure context names are the same on your teammates' machines.
## Setting up a node Each Kubernetes cluster you manage becomes a node. Here is an example `nodes.py`: nodes = { "my-cluster": { 'os': 'kubernetes', 'bundles': ["my-app"], 'kubectl_context': "my-context", }, }
## Kubernetes bundles You can then proceed to write bundles as with regular nodes, but using the [k8s_ items](../items/k8s.md): k8s_namespaces = { "my-app": {}, } k8s_deployments = { "my-app/my-deployment": { 'manifest': { "spec": { "selector": { "matchLabels": { "app": "nginx", }, }, "replicas": 2, "template": { "metadata": { "labels": { "app": "nginx", }, }, "spec": { "containers": [ { "name": "nginx", "image": "nginx:latest", "ports": [ {"containerPort": 80}, ] }, ], }, }, }, }, }, } All item names (except namespaces themselves) must be prefixed with the name of a namespace and a forward slash `/`. Note that BundleWrap will include defaults for the `Kind` and `metadata/name` keys, but you can override them if you must. Alternatively, you can keep your resource definitions in manifest files: k8s_namespaces = { "my-app": {}, } k8s_deployments = { "my-app/my-deployment": { 'manifest_file': "my_deployment.yaml", }, } BundleWrap will then look for `my_deployment.yaml` in `bundles//manifests/`. You can also use [templating](../items/k8s.md#manifest_processor) in these files. bundlewrap-4.13.6/docs/content/guide/locks.md000066400000000000000000000076511417502274300211220ustar00rootroot00000000000000# Locking BundleWrap's decentralized nature makes it necessary to coordinate actions between users of a shared repository. Locking is an important part of collaborating using BundleWrap. ## Hard locks Since very early in the history of BundleWrap, what we call "hard locks" were used to prevent multiple users from using `bw apply` on the same node at the same time. When BundleWrap finds a hard lock on a node in interactive mode, it will display information about who acquired the lock (and when) and will ask whether to ignore the lock or abort the process. In non-interactive mode, the operation is always cancelled for the node in question unless `--force` is used. ## Soft locks Many teams these days are using a workflow based on pull requests. A common problem here is that changes from a feature branch might already have been applied to a set of nodes, while the master branch is still lacking these changes. While the pull request is open and waiting for review, other users might rightly use the master branch to apply to all nodes, reverting changes made by the feature branch. This can be a major nuisance. As of version 2.6.0, BundleWrap provides "soft locks" to prevent this. The author of a feature branch can now lock the node so only they can use `bw apply` on it:
$ bw lock add node1
✓ node1  locked with ID B9JS (expires in 8h)
This will prevent all other users from changing any items on the node for the next 8 hours. BundleWrap will tell users apart by their [BW_IDENTITY](env.md#BW_IDENTITY). Now say someone else is reviewing the pull request and wants to use `bw apply`, while still keeping others out and the original author in. This can be done by simply locking the node *again* as the reviewer. Nodes can have many soft locks. Soft locks act as an exemption from a general ban on changing items that goes into effect as soon as one or more soft locks are present on the node. Of course, if no soft locks are present, anyone can change any item. You can list all soft locks on a node with:
$ bw lock show node1
i node1  ID    Created              Expires              User   Items  Comment
› node1  Y1KD  2016-05-25 21:30:25  2016-05-26 05:30:25  alice  *      locks are awesome
› node1  B9JS  2016-05-24 13:10:11  2016-05-27 08:10:11  bob    *      me too
Note that each lock is identified by a case-insensitive 4-character ID that can be used to remove the lock:
$ bw lock remove node1 y1kd
✓ node1  lock Y1KD removed
Expired locks are automatically and silently purged whenever BundleWrap has the opportunity. Be sure to check out `bw lock add --help` for how to customize expiration time, add a short comment explaining the reason for the lock, or lock only certain items. Using `bw apply` on a soft locked node is not an error and affected items will simply be skipped. ## Locking non-UNIX nodes Most of the time, BundleWrap assumes that your target system is a UNIX-like operating system. It then stores locks as files in the node's local file system (`/var/lib/bundlewrap` by default). BundleWrap supports managing non-UNIX nodes, too, such as Kubernetes. You can also write your own custom item types to manage hardware. In those situations, BundleWrap has no place to store lock files. You can solve this by designating another regular UNIX node as a "locking node":
nodes['my.k8s.cluster'] = {
    'locking_node': 'my.openbsd.box',
    'os': 'kubernetes',
    'metadata': {
        ...
    },
}
`my.openbsd.box` is the name of another regular node, which must be managed by BundleWrap. You can now use all the usual locking mechanisms when working with `my.k8s.cluster` and its locks will be stored on `my.openbsd.box`. (They will, of course, not conflict with regular locks for `my.openbsd.box`.) A locking node can host locks for as many other nodes as you wish. bundlewrap-4.13.6/docs/content/guide/migrate_12.md000066400000000000000000000051111417502274300217260ustar00rootroot00000000000000# Migrating from BundleWrap 1.x to 2.x As per [semver](http://semver.org), BundleWrap 2.0 breaks compatibility with repositories created for BundleWrap 1.x. This document provides a guide on how to upgrade your repositories to BundleWrap 2.x. Please read the entire document before proceeding. To aid with the transition, BundleWrap 1.6.0 has been released along with 2.0.0. It contains no new features over 1.5.x, but has builtin helpers to aid your migration to 2.0.
## items.py In every bundle, rename `bundle.py` to `items.py`. BundleWrap 1.6.0 can do this for you by running `bw migrate`.
## Default file content type The default `content_type` for [file items](../items/file.md) has changed from "mako" to "text". This means that you need to check all file items that do not define an explicit content type of "mako". Some of them might be fine because you didn't really need templating, while others may need to have their `content_type` set to "mako" explicitly. BundleWrap 1.6.0 will print warnings for every file item affected when running `bw test`.
## Metadata merging The merging behavior for node and group metadata has changed. Instead of a simple `dict.update()`, metadata dicts are now merged recursively. See [the docs](../repo/groups.py.md#metadata) for details.
## Metadata processors and item generators These two advanced features have been replaced by a single new mechanism: [metadata.py](../repo/metadata.py.md) You will need to rethink and rewrite them. BundleWrap 1.6.0 will print warnings for every group that uses metadata processors and any item generators when running `bw test`.
## Custom item types The API for defining your own items has changed. Generally, you should be able to upgrade your items with relatively little effort. Refer to [the docs](dev_item.md) for details.
## Deterministic templates While not a strict requirement, it is highly recommended to ensure your entire configuration can be created deterministically (i.e. remains exactly the same no matter how often you generate it). Otherwise, you won't be able to take advantage of the new functionality provided by `bw hash`. A common pitfall here is iteration over dictionaries in templates: % for key, value in my_dict.items(): ${value} % endfor Standard dictionaries in Python have no defined order. This may result in lines occasionally changing their position. To solve this, you can simply use `sorted()`: % for key, value in sorted(my_dict.items()): ${value} % endfor
## Hook arguments Some [hooks](../repo/hooks.md) had their arguments adjusted slightly. bundlewrap-4.13.6/docs/content/guide/migrate_23.md000066400000000000000000000053341417502274300217370ustar00rootroot00000000000000# Migrating from BundleWrap 2.x to 3.x As per [semver](http://semver.org), BundleWrap 3.0 breaks compatibility with repositories created for BundleWrap 2.x. This document provides a guide on how to upgrade your repositories to BundleWrap 3.x. Please read the entire document before proceeding.
## metadata.py BundleWrap 2.x simply used all functions in `metadata.py` whose names don't start with an underscore as metadata processors. This led to awkward imports like `from foo import bar as _bar`. BundleWrap 3.x requires a decorator for explicitly designating functions as metadata processors: @metadata_processor def myproc(metadata): return metadata, DONE You will have to add `@metadata_processor` to each metadata processor function. There is no need to import it; it is provided automatically, just like `node` and `repo`. The accepted return values of metadata processors have changed as well. Metadata processors now always have to return a tuple with the first element being a dictionary of metadata and the remaining elements made up of various options to tell BundleWrap what to do with the dictionary. In most cases, you will want to return the `DONE` options as in the example above. There is no need to import options, they're always available. When you previously returned `metadata, False` from a metadata processor, you will now have to return `metadata, RUN_ME_AGAIN`. For a more detailed description of the available options, see [the documentation](../repo/metadata.py.md).
## File and directory ownership defaults [Files](../items/file.md), [directories](../items/directory.md), and [symlinks](../items/symlink.md) now have default values for the ownership and mode attributes. Previously the default was to ignore them. It's very likely that you won't have to do anything here, just be aware.
## systemd services enabled by default Again, just be [aware](../items/svc_systemd.md), it's probably what you intended anyway.
## Environment variables The following [env vars](env.md) have been renamed (though the new names have already been available for a while, so chances are you're already using them):
OldNew
BWADDHOSTKEYSBW_ADD_HOST_KEYS
BWCOLORSBW_COLORS
BWITEMWORKERSBW_ITEM_WORKERS
BWNODEWORKERSBW_NODE_WORKERS

## Item.display_keys and Item.display_dicts If you've written your own items and used the `display_keys()` or `display_dicts()` methods or the `BLOCK_CONCURRENT` attribute, you will have to update them to the [new API](dev_item.md). bundlewrap-4.13.6/docs/content/guide/migrate_34.md000066400000000000000000000047011417502274300217360ustar00rootroot00000000000000# Migrating from BundleWrap 3.x to 4.x As per [semver](http://semver.org), BundleWrap 4.0 breaks compatibility with repositories created for BundleWrap 3.x. This document provides a guide on how to upgrade your repositories to BundleWrap 4.x. Please read the entire document before proceeding.
## metadata.py Metadata processors have been split into defaults and reactors. See [metadata.py](../repo/metadata.py.md) for details. Generally speaking, metadata processors that returned `DONE, DEFAULTS` can be turned into defaults. @metadata_processor def foo(metadata): return {"bar": 47} becomes defaults = { "bar": 47, } Metadata processors that return `OVERWRITE, RUN_ME_AGAIN` or otherwise depend on other metadata need to be turned into reactors: @metadata_processor def foo(metadata): metadata["bar"] = metadata["baz"] + 5 return metadata, OVERWRITE, RUN_ME_AGAIN becomes @metadata_reactor def foo(metadata): return { "bar": metadata.get("baz") + 5, }
## members_add and members_remove These must be replaced by other mechanism, such as the newly-available `groups` attribute on individual nodes. Also note that you can now do `bw apply 'lambda:node.metadata["env"] == "prod"'` so you may no longer have a need to create groups based on metadata.
## Plugins The plugin system has been removed since it saw barely any use. The most popular plugin, the `git_deploy` item is now built into BundleWrap itself. rm plugins.json rm items/git_deploy.py
## Command line argument parsing Previously, `bw` used a comma-separated syntax to specify targets for certain actions such as `bw apply`. We now use a space separated style: bw apply node1,node2 becomes bw apply node1 node2 This may appear trivial, but might lead to confusion with people not used to providing multiple multi-value space-separated arguments on the command line. bw nodes -a all node1 becomes bw nodes -a all -- node1 The `--` is necessary so we can tell when the argument list for `-a` ends. Here is another example: bw nodes -a hostname,bundles node1,node2 becomes bw nodes -a hostname bundles -- node1 node2 While a little more verbose, this style let's us use proper shell quoting for argument tokens.
## Minor changes For everything else, please consult the [changelog](https://github.com/bundlewrap/bundlewrap/blob/master/CHANGELOG.md#400). bundlewrap-4.13.6/docs/content/guide/os_compatibility.md000066400000000000000000000013071417502274300233510ustar00rootroot00000000000000# OS compatibility BundleWrap by necessity takes a pragmatic approach to supporting different operating systems and distributions. Our main target is Linux, but support for other UNIXes is also evolving. We cannot guarantee to be compatible with every distribution and BSD flavor under the sun, but we try to cover the common ones.
## node.os and node.os_version You should set these attributes for every node. Giving BundleWrap this information allows us to adapt some built-in behavior.
## other node attributes In some cases (e.g. when not using sudo) you will need to manually adjust some things. Check the docs [on node-level OS overrides](../repo/nodes.py.md#os-compatibility-overrides). bundlewrap-4.13.6/docs/content/guide/quickstart.md000066400000000000000000000122021417502274300221650ustar00rootroot00000000000000Quickstart ========== This is the 10 minute intro into BundleWrap. Fasten your seatbelt. Installation ------------ First, open a terminal and install BundleWrap:
pip install bundlewrap
Create a repository ------------------- Now you'll need to create your [repository](../repo/layout.md):
mkdir my_bundlewrap_repo
cd my_bundlewrap_repo
bw repo create
You will note that some files have been created. Let's check them out:
cat nodes.py
cat groups.py
The contents should be fairly self-explanatory, but you can always check the [docs](../repo/layout.md) on these files if you want to go deeper.
It is highly recommended to use git or a similar tool to keep track of your repository. You may want to start doing that right away.
At this point you will want to edit `nodes.py` and maybe change "localhost" to the hostname of a system you have passwordless (including sudo) SSH access to.
BundleWrap will honor your ~/.ssh/config, so if ssh mynode.example.com sudo id works without any password prompts in your terminal, you're good to go.
If you want to keep working on "localhost", then the above still applies in that ssh localhost sudo id needs to work without any password prompts. Verify that: 1. an SSH daemon is listening on 127.0.0.1 2. a relevant public key is in the ~/.ssh/authorized_keys file 3. the ~/.ssh/authorized_keys file is not world-writable 4. your user or group has ALL=(ALL) NOPASSWD: ALL set in the /etc/sudoers file Run a command ------------- The first thing you can do is run a command on your army of one node:
bw -a run node-1 "uptime"
The -a switch tells bw to automatically trust unknown SSH host keys (when you're connecting to a new node). By default, only known host keys will be accepted.
You should see something like this:
› node-1   20:16:26 up 34 days,  4:10,  0 users,  load average: 0.00, 0.01, 0.05
✓ node-1  completed after 0.366s
Instead of a node name ("node-1" in this case) you can also use a group name (such as "all") from your `groups.py`. Create a bundle --------------- BundleWrap stores node configuration in bundles. A bundle is a collection of *items* such as files, system packages or users. To create your first bundle, type:
bw repo bundle create mybundle
Now that you have created your bundle, it's important to tell BundleWrap which nodes will have this bundle. You can assign bundles to nodes using either groups.py or nodes.py, here we'll use the latter: nodes = { 'node-1': { 'bundles': ( "mybundle", ), 'hostname': "mynode-1.local", }, } Create a file template ---------------------- To manage a file, you need two things: 1. a file item in your bundle 2. a template for the file contents Add this to your `bundles/mybundle/items.py`: files = { '/etc/motd': { 'content_type': 'mako', # use the Mako template engine for this file 'source': "mymotd", # filename of the template }, } Then write the file template:
vim bundles/mybundle/files/mymotd
You can use this for example content:
Welcome to ${node.name}!
Note that the `source` attribute in `items.py` contains a path relative to the `files` directory of your bundle. Apply configuration ------------------- Now all that's left is to run `bw apply`:
bw apply -i node-1
BundleWrap will ask to replace your previous MOTD:
i node-1  started at 2016-02-13 21:25:45
? node-1
? node-1  ╭─ file:/etc/motd
? node-1  │
? node-1  │  content
? node-1  │  --- <node>
? node-1  │  +++ <bundlewrap>
? node-1  │  @@ -1 +1 @@
? node-1  │  -your old motd
? node-1  │  +Welcome to node-1!
? node-1  │
? node-1  ╰─ Fix file:/etc/motd? [Y/n]
That completes the quickstart tutorial! Further reading --------------- Here are some suggestions on what to do next: * set up [SSH multiplexing](https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Multiplexing) for significantly better performance * take a moment to think about what groups and bundles you will create * read up on how a [BundleWrap repository](../repo/layout.md) is laid out * ...especially what [types of items](../repo/items.py.md#item-types) you can add to your bundles * familiarize yourself with [the Mako template language](http://www.makotemplates.org/) * explore the [command line interface](cli.md) * follow [@bundlewrap](https://twitter.com/bundlewrap) on Twitter Have fun! If you have any questions, feel free to drop by [on IRC](irc://irc.libera.chat/bundlewrap) or [GitHub](https://github.com/bundlewrap/bundlewrap/discussions). bundlewrap-4.13.6/docs/content/guide/secrets.md000066400000000000000000000141671417502274300214570ustar00rootroot00000000000000# Handling secrets We strongly recommend **not** putting any sensitive information such as passwords or private keys into your repository. This page describes the helpers available in BundleWrap to manage those secrets without checking them into version control.
Most of the functions described here return lazy Fault objects.

## .secrets.cfg When you initially ran `bw repo create`, a file called `.secrets.cfg` was put into the root level of your repo. It's an INI-style file that by default contains two random keys BundleWrap uses to protect your secrets. One for randomly generated passwords, one for symmetric encryption.
You should never commit .secrets.cfg. Immediately add it to your .gitignore or equivalent.

## Derived passwords In some cases, you can control (i.e. manage with BundleWrap) both ends of the authentication process. A common example is a config file for a web application that holds credentials for a database also managed by BundleWrap. In this case, you don't really care what the password is, you just want it to be the same on both sides. To accomplish that, just write this in your template (Mako syntax shown here):
database_user = "foo"
database_password = "${repo.vault.password_for("my database")}"
In your bundle, you can then configure your database user like this: postgres_roles = { "foo": { 'password': repo.vault.password_for("my database"), }, } It doesn't really matter what string you call `password_for()` with, it just has to be the same on both ends. BundleWrap will then use that string, combine it with the default key called `generate` in your `.secrets.cfg` and derive a random password from that. This makes it easy to change all your passwords at once (e.g. when an employee leaves or when required for compliance reasons) by rotating keys.
However, it also means you have to guard your .secrets.cfg very closely. If it is compromised, so are all your passwords. Use your own judgement.
### "Human" passwords As an alternative to `password_for()`, which generates random strings, you can use `human_password_for()`.It generates strings like `Wiac-Kaobl-Teuh-Kumd-40`. They are easier to handle for human beings. You might want to use them if you have to type those passwords on a regular basis. ### Random bytes `password_for()` and `human_password_for()` are meant for passwords. If you need plain random bytes, you can use `random_bytes_as_base64_for()`. As the name implies, it will return the data base64 encoded. Some examples:
$ bw debug -c 'print(repo.vault.random_bytes_as_base64_for("foo"))'
qczM0GUKW7YlXEuW8HGPYkjCGaX4Vu9Fja5SIZWga7w=
$ bw debug -c 'print(repo.vault.random_bytes_as_base64_for("foo", length=1))'
qQ==

## Static passwords When you need to store a specific password, you can encrypt it symmetrically. The `bw pw` utility provides command line access to the functions attached to `repo.vault`, so we'll use it here to generate the encrypted string:
$ bw pw --encrypt 'my password'
gAAAA[...]mrVMA==
You can then use this encrypted password in a template like this:
database_user = "foo"
database_password = "${repo.vault.decrypt("gAAAA[...]mrVMA==")}"

## Files You can also encrypt entire files:
$ bw pw --encrypt --file /my/secret.file encrypted.file
Encrypted files are always read and written relative to the data/ subdirectory of your repo.
If the source file was encoded using UTF-8, you can then simply pass the decrypted content into a file item: files = { "/secret": { 'content': repo.vault.decrypt_file("encrypted.file"), }, } If the source file is binary however (or any encoding other than UTF-8), you must use base64: files = { "/secret": { 'content': repo.vault.decrypt_file_as_base64("encrypted.file"), 'content_type': 'base64', }, }
## External commands To retrieve secrets using a local shell command, use `vault.cmd`:
$ bw debug -c "print(repo.vault.cmd('uname'))"
Linux
By default, the stdout of the given command will be decoded to text using UTF-8 and have leading and trailing whitespace stripped. To prevent this, use the `as_text` and `strip` parameters:
$ bw debug -c "print(repo.vault.cmd('uname', as_text=False, strip=False))"
b'Linux\n'
The point of using `repo.vault.cmd()` is that (like the other functions introduced on this page) it will return a lazy `Fault` object that will call the command only if and when the value is actually needed (e.g. when rendering a file template).
## Key management ### Multiple keys You can always add more keys to your `.secrets.cfg`, but you should keep the defaults around. Adding more keys makes it possible to give different keys to different teams. **By default, BundleWrap will skip items it can't find the required keys for**. When using `.password_for()`, `.encrypt()` etc., you can provide a `key` argument to select the key: repo.vault.password_for("some database", key="devops") On the command line, `bw pw` also accepts `--key` for this purpose. The encrypted data will be prefixed by `yourkeyname$...` to indicate that the key `yourkeyname` was used for encryption. Thus, during decryption, you can omit the `key=` parameter.
### Rotating keys You can generate a new key by running `bw debug -c "print(repo.vault.random_key())"`. Place the result in your `.secrets.cfg`. Then you need to distribute the new key to your team and run `bw apply` for all your nodes.
Note that `encrypt()` and `decrypt()` (plus their `file_` counterparts) use symmetric encryption and require manually updating the encrypted text after the key has changed.
bundlewrap-4.13.6/docs/content/guide/selectors.md000066400000000000000000000021161417502274300220010ustar00rootroot00000000000000# Node selectors These can be used on the command line to select a number of nodes. See `bw apply --help` for a list of possible uses.
# Item selectors Item selectors provide a way to address multiple items e.g. when specifying dependencies between them.
Example selectorMeaning
file:/etc/motda single item
file:all items of that type
bundle:fooall items in that bundle
tag:fooall items with that tag
tag:all items with any tag
All selectors can be prefixed with `!` to select the inverse (e.g. `!tag:` means "all items without any tag"). Note that when you have a file item and add a dependency to `file:`, BundleWrap will resolve this to all *other* files. Similarily, when you add a dependency on `tag:` to all items with a certain tag through [bundle.py](../repo/bundle.py.md), this will only target *other* tags to avoid an immediate loop. bundlewrap-4.13.6/docs/content/guide/toml.md000066400000000000000000000051021417502274300207470ustar00rootroot00000000000000# TOML nodes and groups The primary way to define nodes is in [nodes.py](../repo/nodes.py.md). However, BundleWrap also provides a built-in alternative that you can use to define each node in a [TOML](https://github.com/toml-lang/toml) file. Doing this has pros and cons, which is why you can choose which way is best for you. *Pros* * One file per node * Node files are machine-readable and -writeable * Easier on the eyes for nodes with simple metadata *Cons* * Does not support [Fault objects](../api/#bundlewraputilsfault) * Does not support [atomic()](../repo/groups.py.md#metadata) * Does not support `None` * Does not support sets or tuples * More difficult to read for long, deeply nested metadata
## Using TOML nodes First, you have to make sure your `nodes.py` doesn't overwrite your TOML nodes. Check if your `nodes.py` overwrites the `nodes` dict: nodes = { # bad "my_node": {...}, } TOML nodes will be added to the `nodes.py` context automatically, so change your `nodes.py` to add to them (or just leave the file empty): nodes["my_node"] = { # good ... } Now you are all set to create your first TOML node. Create a file called `nodes/nodenamegoeshere.toml`: hostname = "tomlnode.example.com" bundles = [ "bundle1", "bundle2", ] [metadata] foo = "bar" [metadata.baz] frob = 47 And that's it. This node will now be added to your other nodes. You may use subdirectories of `nodes/`, but the node name will always just be the filename minus the ".toml" extension.
## Converting existing nodes This is an easy one line operation: bw debug -n nodenamegoeshere -c "node.toml_save()" Don't forget to remove the original node though.
## Editing TOML nodes from Python BundleWrap uses [tomlkit](https://github.com/sdispater/tomlkit) internally and exposes a `TOMLDocument` instance as `node.toml` for you to modify: $ bw debug -n nodenamegoeshere >>> node.file_path nodes/nodenamegoeshere.toml >>> node.toml['bundles'].append("bundle3") >>> node.toml_save() For your convenience, `.toml_set()` is also provided to easily set nested dict values: >>> node.toml_set("metadata/foo/bar/baz", 47) >>> node.toml_save() This should make it pretty straightforward to make changes to lots of nodes without the headaches of using `sed` or something of that nature to edit Python code in `nodes.py`.
## TOML groups They work exactly the same way as nodes, but have their own `groups/` directory. `.toml`, `.toml_set()` and `toml_save()` are also found on `Group` objects. bundlewrap-4.13.6/docs/content/index.md000066400000000000000000000025751417502274300200210ustar00rootroot00000000000000

BundleWrap documentation

If you run into a problem that is not answered in these docs, please find us on [GitHub](https://github.com/bundlewrap/bundlewrap/discussions) or [IRC](irc://irc.libera.chat/bundlewrap). We’re happy to help! Should you already know your way around, just click on the part of your repo that you need help with: bundlewrap-4.13.6/docs/content/items/000077500000000000000000000000001417502274300175005ustar00rootroot00000000000000bundlewrap-4.13.6/docs/content/items/action.md000066400000000000000000000033671417502274300213100ustar00rootroot00000000000000# Actions Actions will be run on every `bw apply`. They differ from regular items in that they cannot be "correct" in the first place. They can only succeed or fail. actions = { 'check_if_its_still_linux': { 'command': "uname", 'expected_return_code': 0, 'expected_stdout': "Linux\n", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## command The only required attribute. This is the command that will be run on the node with root privileges.
## data_stdin You can pipe data directly to the command running on the node. To do so, use this attribute. If it's a string or unicode object, it will always be encoded as UTF-8. Alternatively, you can use raw bytes.
## expected_return_code Defaults to `0`. If the return code of your command is anything else, the action is considered failed. You can also specify a list, set or tuple and the action is considered failed if the command's return code is not contained in that enumeration. You can also set this to `None` and any return code will be accepted.
## expected_stdout If this is given, the stdout output of the command must match the given string or the action is considered failed.
## expected_stderr Same as `expected_stdout`, but with stderr.
## interactive If set to `True`, this action will be skipped in non-interactive mode. If set to `False`, this action will always be executed without asking (even in interactive mode). Defaults to `None`.
Think hard before setting this to False. People might assume that interactive mode won't do anything without their consent.
bundlewrap-4.13.6/docs/content/items/directory.md000066400000000000000000000016511417502274300220310ustar00rootroot00000000000000# Directory items directories = { "/path/to/directory": { "mode": "0755", "owner": "root", "group": "root", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## group Name of the group this directory belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## mode Directory mode as returned by `stat -c %a `. Defaults to `755`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## owner Username of the directory's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## purge Set this to `True` to remove everything from this directory that is not managed by BundleWrap. Defaults to `False`. bundlewrap-4.13.6/docs/content/items/file.md000066400000000000000000000124601417502274300207440ustar00rootroot00000000000000# File items Manage regular files. files = { "/path/to/file": { "mode": "0644", "owner": "root", "group": "root", "content_type": "mako", "encoding": "utf-8", "source": "my_template", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## content May be used instead of `source` to provide file content without a template file.
## content_type How the file pointed to by `source` or the string given to `content` should be interpreted.
ValueEffect
anyonly cares about file owner, group, and mode
base64content is decoded from base64
downloadfile will be downloaded from the URL specified in source
binaryfile is uploaded verbatim, no content processing occurs
jinja2content is interpreted by the Jinja2 template engine
makocontent is interpreted by the Mako template engine
text (default)will be read and diffed as UTF-8, but offers no template logic

## context Only used with Mako and Jinja2 templates. The values of this dictionary will be available from within the template as variables named after the respective keys.
## delete When set to `True`, the path of this file will be removed. It doesn't matter if there is not a file but a directory or something else at this path. When using `delete`, no other attributes are allowed.
## encoding Encoding of the target file. Note that this applies to the remote file only, your template is still conveniently written in UTF-8 and will be converted by BundleWrap. Defaults to "utf-8". Other possible values (e.g. "latin-1") can be found [here](http://docs.python.org/2/library/codecs.html#standard-encodings). Only allowed with `content_type` `jinja2`, `mako`, or `text`.
## group Name of the group this file belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. If `group` is set to `None` and the file does not exist yet, `group` will be the primary group of the ssh user.
## mode File mode as returned by `stat -c %a `. Defaults to `644`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. If `mode` is set to `None` and the file does not exist yet, `mode` will be `0644`.
## owner Username of the file's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. If `owner` is set to `None` and the file does not exist yet, `owner` will be the ssh user.
## source File name of the file template. If this says `my_template`, BundleWrap will look in `data/my_bundle/files/my_template` and then `bundles/my_bundle/files/my_template`. Most of the time, you will want to put config templates into the latter directory. The `data/` subdirectory is meant for files that are very specific to your infrastructure (e.g. DNS zone files). This separation allows you to write your bundles in a generic way so that they could be open-sourced and shared with other people. Defaults to the filename of this item (e.g. `foo.conf` when this item is `/etc/foo.conf`). See also: [Writing file templates](../guide/item_file_templates.md) If using `'content_type': 'download'`, this specifies the URL from which the file will be downloaded. The download is done on the machine running `bw` and then uploaded to the node, so the node doesn't need to have access to the URL.
## content_hash Only valid if `content_type` is set to `download`. Specifies a `sha1sum` to compare the downloaded file to. If set, the file will only be downloaded if the remote hash does not match. Hash will be verified after downloading and after uploading to the node. If not set, bundlewrap will always download the file, then compare the `sha1sum` of the downloaded file to the one currently on the node.
## verify_with This can be used to run external validation commands on a file before it is applied to a node. The file is verified locally on the machine running BundleWrap. Verification is considered successful if the exit code of the verification command is 0. Use `{}` as a placeholder for the shell-quoted path to the temporary file. Here is an example for verifying sudoers files:
visudo -cf {}
Keep in mind that all team members will have to have the verification command installed on their machines.
## test_with Same as `verify_with`, but called when running `bw test`. You may want to use this if you don't want all your team members to have to have a large suite of tools installed, but still want to verify file integrity with another tool. The file is verified locally on the machine running BundleWrap. Verification is considered successful if the exit code of the verification command is 0. Use `{}` as a placeholder for the shell-quoted path to the temporary file. Please note that `bw test` will call both `verify_with` and `test_with`, so there's no need to set both. bundlewrap-4.13.6/docs/content/items/git_deploy.md000066400000000000000000000062651417502274300221720ustar00rootroot00000000000000# Deploying from git The `git_deploy` item lets you deploy the *contents* of a git repository to a node - without requiring the node to have access to that repository or exposing the `.git/` directory to the node. directories = { # git_deploy will not create this by itself "/var/tmp/example": {}, } git_deploy = { "/var/tmp/example": { 'repo': "example", 'rev': "master", 'use_xattrs': True, }, } `git_deploy` items will only upload a tarball with the data from the git repo, no part of the git history is leaked to the node. Requires git to be installed on the machine running BundleWrap.
# git_deploy_repos Put this in a file called git_deploy_repos in your repository root: example: /Users/jdoe/Projects/example This file should also be added to your `.gitignore` if you are sharing that repo with a team. Each team member must provide a mapping of the repo name used in the bundle ("example" in this case) to a local filesystem path with a git repository. It is each user's responsibility to make sure the clone in that location is up to date.
# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## repo The short name of a repo as it appears in `git_deploy_repos`. Alternatively, it can point directly to a git URL: git_deploy = { "/var/tmp/example": { 'repo': "https://github.com/bundlewrap/bundlewrap.git", [...] }, } Note however that this has a performance penalty, as a new clone of that repo has to be made on every run of BundleWrap. (See section "Environment variables" below.)
## rev The `rev` attribute can contain anything `git rev-parse` can resolve into a commit hash (branch names, tags, first few characters of full commit hash). Note that you should probably use tags here. *Never* use HEAD (use a branch name like 'master' instead).
## use_xattrs BundleWrap needs to store the deployed commit hash on the node. The `use_xattrs` attribute controls how this is done. If set to `True`, the `attr` command on the node is used to store the hash as an extended file system attribute. Since `attr` might not be installed on the node, the default is to place a dotfile in the target directory instead (keep that in mind when deploying websites etc.).
# Environment variables ## `BW_GIT_DEPLOY_CACHE` This only affects repositories for which a URL has been specified. With this env var unset, BundleWrap will clone repos to a temporary directory. This is done once per BundleWrap process and removed automatically when the process terminates. If you *manually* launch multiple parallel processes of `bw`, each of those will clone the git repo. This can create significant overhead, since they all create redundant copies. You can set `BW_GIT_DEPLOY_CACHE` to an absolute path: All the `bw` processes will use it as a shared cache. Note: It is not wise to use this option on your workstation. BundleWrap will only ever clone repos, not pull or delete them. This variable is meant as a temporary cache, for example in CI builds, and you will have to clean it up yourself. bundlewrap-4.13.6/docs/content/items/group.md000066400000000000000000000007471417502274300211660ustar00rootroot00000000000000# Group items Manages system groups. Group members are managed through the [user item](user.md). groups = { "acme": { "gid": 2342, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## delete When set to `True`, this group will be removed from the system. When using `delete`, no other attributes are allowed.
## gid Numerical ID of the group. bundlewrap-4.13.6/docs/content/items/k8s.md000066400000000000000000000075001417502274300205310ustar00rootroot00000000000000# Kubernetes items
Support for Kubernetes is experimental at this time. Backwards-incompatible changes may happen at any time.
See also: [Guide to Kubernetes](../guide/kubernetes.md)
Manage resources in Kubernetes clusters. k8s_namespaces = { "my-app": { 'manifest': { 'apiVersion': "v1", }, }, "my-previous-app": {'delete': True}, } k8s_deployments = { "my-app/my-deployment": { 'manifest': { ... }, }, } Note that the names of all items in a namespace must be prefixed with the name of their namespace and a forward slash `/`. Resource items will automatically depend on their namespace if you defined it.
## Resource types
Resource typeBundle attribute
Cluster Rolek8s_clusterroles
Cluster Role Bindingk8s_clusterrolebindings
Config Mapk8s_configmaps
Cron Jobk8s_cronjobs
Custom Resource Definitionk8s_crd
Daemon Setk8s_daemonsets
Deploymentk8s_deployments
Ingressk8s_ingresses
Namespacek8s_namespaces
Network Policyk8s_networkpolicies
Persistent Volume Claimk8s_pvc
Rolek8s_roles
Role Bindingk8s_rolebindings
Servicek8s_services
Service Accountk8s_serviceaccounts
Secretk8s_secrets
StatefulSetk8s_statefulsets
(any)k8s_raw
You can define [Custom Resources](https://kubernetes.io/docs/concepts/api-extension/custom-resources/) like this: k8s_crd = { "custom-thing": { 'manifest': { 'apiVersion': "apiextensions.k8s.io/v1beta1", 'spec': { 'names': { 'kind': "CustomThing", }, }, }, }, } k8s_raw = { "foo/CustomThing/baz": { 'manifest': { 'apiVersion': "example.com/v1", }, }, } The special `k8s_raw` items can also be used to create resources that BundleWrap does not support natively: k8s_raw = { "foo/HorizontalPodAutoscaler/baz": { 'manifest': { 'apiVersion': "autoscaling/v2beta1", }, }, } Resources outside any namespace can be created with `k8s_raw` by omitting the namespace in the item name (so that the name starts with `/`).
# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## context Only used with Mako and Jinja2 manifests (see `manifest_processing` below). The values of this dictionary will be available from within the template as variables named after the respective keys.
## delete Set this to `True` to have the resource removed.
## manifest The resource definition (as defined in the [Kubernetes API](https://kubernetes.io/docs/reference/)) formatted as a Python dictionary (will be converted to JSON and passed to `kubectl apply`). Mutually exclusive with `manifest_file`.
## manifest_file Filename of the resource definition relative to the `manifests` subdirectory of your bundle. Filenames must end in `.yaml`, `.yml`, or `.json` to indicate file format. Mutually exclusive with `manifest`.
## manifest_processor Set this to `jinja2` or `mako` if you want to use a template engine to process your `manifest_file`. Defaults to `None`. bundlewrap-4.13.6/docs/content/items/pkg_apk.md000066400000000000000000000007541417502274300214440ustar00rootroot00000000000000# apk package items Handles packages installed by `apk` on Alpine-based systems. pkg_apk = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-4.13.6/docs/content/items/pkg_apt.md000066400000000000000000000023171417502274300214520ustar00rootroot00000000000000# APT package items Handles packages installed by `apt-get` on Debian-based systems. pkg_apt = { "foopkg": { "installed": True, # default }, "bar_i386": { # i386 multiarch variant of the "bar" package "installed": False, }, "awesome-daemon": { "when_creating": { "start_service": False, }, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be purged.
## when\_creating These attributes are only enforced during the creation of the item on the node (in this case this means when a package is installed). They are ignored in subsequent runs of `bw apply`.
### start\_service By default, daemons will be auto-started on systems like Debian or Ubuntu. This happens right after the package has been installed. You might want to set `start_service` to `False` to avoid this. This might be necessary if BundleWrap must place some more config files on the node before a daemon can actually be started. bundlewrap-4.13.6/docs/content/items/pkg_dnf.md000066400000000000000000000007511417502274300214350ustar00rootroot00000000000000# dnf package items Handles packages installed by `dnf` on RPM-based systems. pkg_dnf = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-4.13.6/docs/content/items/pkg_freebsd.md000066400000000000000000000013201417502274300222710ustar00rootroot00000000000000# FreeBSD package items Handles packages installed by `pkg` on FreeBSD systems. pkg_freebsd = { "foo": { "installed": True, # default }, "bar": { "installed": True, "version": "1.0", }, "baz": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be purged.
## version Optional version string. Can be used to select one specific version of a package. Ignored when `installed` is `False`. bundlewrap-4.13.6/docs/content/items/pkg_openbsd.md000066400000000000000000000016731417502274300223240ustar00rootroot00000000000000# OpenBSD package items Handles packages installed by `pkg_add` on OpenBSD systems. pkg_openbsd = { "foo": { "installed": True, # default }, "bar": { "installed": True, "version": "1.0", }, "baz": { "installed": False, }, "qux": { "flavor": "no_x11", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be purged.
## flavor Optional, defaults to the "normal" flavor. Can be used together with `version`. Ignored when `installed` is `False`.
## version Optional version string. Can be used to select one specific version of a package. Can be used together with `flavor`. Ignored when `installed` is `False`. bundlewrap-4.13.6/docs/content/items/pkg_opkg.md000066400000000000000000000007471417502274300216330ustar00rootroot00000000000000# opkg package items Handles packages installed by `opkg` on OpenWRT/LEDE. pkg_opkg = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-4.13.6/docs/content/items/pkg_pacman.md000066400000000000000000000023311417502274300221210ustar00rootroot00000000000000# Pacman package items Handles packages installed by `pacman` (e.g. Arch Linux). pkg_pacman = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, "somethingelse": { "tarball": "something-1.0.pkg.tar.gz", } }
System updates on Arch Linux should always be performed manually and with great care. Thus, this item type installs packages with a simple pacman -S $pkgname instead of the commonly recommended pacman -Syu $pkgname. You should manually do a full system update before installing new packages via BundleWrap!


# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if this package and all dependencies that are no longer needed should be removed.
## tarball Upload a local file to the node and install it using `pacman -U`. The value of `tarball` must point to a file relative to the `pkg_pacman` subdirectory of the current bundle. bundlewrap-4.13.6/docs/content/items/pkg_pamac.md000066400000000000000000000032231417502274300217440ustar00rootroot00000000000000# Pamac package items Handles packages installed by `pacman` (e.g. Arch Linux) and `pamac`. Uses `pamac` to install, build and remove packages. Needs `pacman` to determine if a package is installed. `Pacman` is only used to read information from the node, all action is handeled by `pamac`. pkg_pamac = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, "somethingelse": { "when_creating": { "aur": True, # installs package from AUR instead of official repos. Defaults to `False` }, }. }
System updates on Arch Linux should always be performed manually and with great care. Thus, this item type installs packages with pamac install --no-upgrade $pkgname. You should manually do a full system update before installing new packages via BundleWrap!


# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if this package and all dependencies that are no longer needed should be removed.
## aur `True` when the package should be installed from AUR; `False` if package should be installed from official sources. Defaults to `False`. This attribute will only be read when creating the item on the node, e.g. when the desired package will be installed for the first time. In subsequent runs, this item will be ignored. See [when\_creating documentation](../repo/items.py.md#when_creating) bundlewrap-4.13.6/docs/content/items/pkg_pip.md000066400000000000000000000017751417502274300214650ustar00rootroot00000000000000# pip package items Handles Python packages installed by `pip`. Note that you can use the [pip_command node attribute](../repo/nodes.py.md#pip_command) to use `pip3`. pkg_pip = { "foo": { "installed": True, # default "version": "1.0", # optional }, "bar": { "installed": False, }, "/path/to/virtualenv/foo": { # will install foo in the virtualenv at /path/to/virtualenv }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed.
## version Force the given exact version to be installed. You can only specify a single version here, selectors like `>=1.0` are NOT supported. If it's not given, the latest version will be installed initially, but (like the other package items) upgrades will NOT be installed. bundlewrap-4.13.6/docs/content/items/pkg_snap.md000066400000000000000000000007371417502274300216330ustar00rootroot00000000000000# snap package items Handles packages installed by `snap` command. pkg_snap = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-4.13.6/docs/content/items/pkg_yum.md000066400000000000000000000007511417502274300215000ustar00rootroot00000000000000# yum package items Handles packages installed by `yum` on RPM-based systems. pkg_yum = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-4.13.6/docs/content/items/pkg_zypper.md000066400000000000000000000007631417502274300222220ustar00rootroot00000000000000# zypper package items Handles packages installed by `zypper` on SUSE-based systems. pkg_zypper = { "foopkg": { "installed": True, # default }, "bar": { "installed": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## installed `True` when the package is expected to be present on the system; `False` if it should be removed. bundlewrap-4.13.6/docs/content/items/postgres_db.md000066400000000000000000000021441417502274300223360ustar00rootroot00000000000000# Postgres database items Manages Postgres databases. postgres_dbs = { "mydatabase": { "owner": "me", "when_creating": { "encoding": "LATIN1", "collation": "de_DE.ISO-8859-1", "ctype": "de_DE.ISO-8859-1", }, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## owner Name of the role which owns this database (defaults to `"postgres"`).
## when\_creating ### encoding, collation, and ctype By default, BundleWrap will only create a database using your default PostgreSQL template, which most likely is `template1`. This means it will use the same encoding and collation that `template1` uses. By specifying any of the attributes `encoding`, `collation`, or `ctype`, BundleWrap will instead create a new database from `template0`, thus allowing you to override said database attributes. These options are creation-time only.
## delete `True` if the database should be deleted (defaults to `False`). bundlewrap-4.13.6/docs/content/items/postgres_role.md000066400000000000000000000020451417502274300227120ustar00rootroot00000000000000# Postgres role items Manages Postgres roles. postgres_roles = { "me": { "superuser": True, "password": "itsamemario", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## superuser `True` if the role should be given superuser privileges (defaults to `False`).
## password Plaintext password to set for this role (will be hashed using MD5).
Please do not write any passwords into your bundles. This attribute is intended to be used with an external source of passwords and filled dynamically. If you don't have or want such an elaborate setup, specify passwords using the password_hash attribute instead.

## password_hash As an alternative to `password`, this allows setting the raw hash as it will be stored in Postgres' internal database. Should start with "md5".
## delete `True` if the role should be deleted (defaults to `False`). bundlewrap-4.13.6/docs/content/items/routeros.md000066400000000000000000000052151417502274300217070ustar00rootroot00000000000000# RouterOS items Manages RouterOS configuration through the API (port 8728). You can address every configuration exposed by the API by constructing the item name and attributes accordingly. If you haven't already, familiarize yourself with the CLI over SSH first. Use it as a reference when composing items in your bundles. Don't forget to set the os attribute of your node to routeros and also set the username and password attributes. routeros = { "/ip/dns": { "servers": "8.8.8.8", }, "/interface/vlan?name=vlan6": { "vlan-id": "6", "interface": "bridge", "needs": { "routeros:/interface/bridge?name=bridge", }, }, "/interface/vlan?name=vlan7": { "delete": True, }, "/interface/bridge?name=bridge": {}, "/interface/bridge/port?interface=ether8": { "bridge": "bridge", "needs": { "routeros:/interface/bridge?name=bridge", }, }, "/interface/bridge/vlan?vlan-ids=6": { "bridge": "bridge", "needs": { "routeros:/interface/bridge?name=bridge", }, "tagged": { "ether10", "ether11", "ether12", }, "untagged": { "ether13", "ether14", "ether15", }, }, "/system/logging?action=remote&topics=critical": {}, } Note that when you're dealing with a list of things, item names have two parts, separated by a `?` character. The first part determines which kind of item is addressed, the second part is a simple `key=value` query that MUST return exactly one entry. If a list has no single "primary key" (such as `/system/logging`), use `&` to add more conditions.

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
BundleWrap will accept any attributes for these items and pass them through to the RouterOS API. All attribute values can be passed as strings. If given as integers or booleans, BundleWrap will convert them to strings for you. If given a set, list, or tuple of strings, BundleWrap will join those strings with commas. Since `comment` is an internal attribute for BundleWrap, use `_comment` to apply the `comment` attribute on a RouterOS item.
## delete When set to `True`, this item will be removed from the system. When using `delete`, no other attributes are allowed. bundlewrap-4.13.6/docs/content/items/svc_freebsd.md000066400000000000000000000016541417502274300223150ustar00rootroot00000000000000# FreeBSD service items Handles services on FreeBSD. svc_freebsd = { "bgpd": { "enabled": True, # default "running": True, # default }, "supervisord": { "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## enabled `True` if the service shall be automatically started during system bootup; `False` otherwise. `True`, the default value, is needed on FreeBSD, as starting disabled services fails.
## running `True` if the service is expected to be running on the system; `False` if it should be stopped.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## restart Restarts the service.
## stop Stops the service.
## stopstart Stops and starts the service. bundlewrap-4.13.6/docs/content/items/svc_openbsd.md000066400000000000000000000016531417502274300223340ustar00rootroot00000000000000# OpenBSD service items Handles services on OpenBSD. svc_openbsd = { "bgpd": { "enabled": True, # default "running": True, # default }, "supervisord": { "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## enabled `True` if the service shall be automatically started during system bootup; `False` otherwise. `True`, the default value, is needed on OpenBSD, as starting disabled services fails.
## running `True` if the service is expected to be running on the system; `False` if it should be stopped.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## restart Restarts the service.
## stop Stops the service.
## stopstart Stops and starts the service. bundlewrap-4.13.6/docs/content/items/svc_openrc.md000066400000000000000000000017441417502274300221710ustar00rootroot00000000000000# openrc service items Handles services managed by openrc. svc_openrc = { "sshd": { "enabled": True, # default "running": True, # default }, "nginx": { "enabled": False, "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## enabled `True` if the service shall be automatically started during system bootup; `False` otherwise. `None` makes BundleWrap ignore this setting.
## running `True` if the service is expected to be running on the system; `False` if it should be stopped. `None` makes BundleWrap ignore this setting.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## reload Reloads the service. Not all services support reloading.
## restart Restarts the service.
## stop Stops the service. bundlewrap-4.13.6/docs/content/items/svc_systemd.md000066400000000000000000000021611417502274300223650ustar00rootroot00000000000000# systemd service items Handles services managed by systemd. svc_systemd = { "fcron.service": { "enabled": True, # default "running": True, # default "masked": False, # default }, "sgopherd.socket": { "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## enabled `True` if the service shall be automatically started during system bootup; `False` otherwise. `None` makes BundleWrap ignore this setting.
## running `True` if the service is expected to be running on the system; `False` if it should be stopped. `None` makes BundleWrap ignore this setting.
## masked `True` if the service is expected to be masked; `False` if it should be unmasked. `None` makes BundleWrap ignore this setting.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## reload Reloads the service.
## restart Restarts the service.
## stop Stops the service. bundlewrap-4.13.6/docs/content/items/svc_systemv.md000066400000000000000000000013201417502274300224030ustar00rootroot00000000000000# System V service items Handles services managed by traditional System V init scripts. svc_systemv = { "apache2": { "running": True, # default }, "mysql": { "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## running `True` if the service is expected to be running on the system; `False` if it should be stopped.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## reload Reloads the service.
## restart Restarts the service.
## stop Stops the service. bundlewrap-4.13.6/docs/content/items/svc_upstart.md000066400000000000000000000020171417502274300223770ustar00rootroot00000000000000# Upstart service items Handles services managed by Upstart. svc_upstart = { "gunicorn": { "running": True, # default }, "celery": { "running": False, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## running `True` if the service is expected to be running on the system; `False` if it should be stopped.
## Canned actions See also: [Explanation of how canned actions work](../repo/items.py.md#canned-actions) ## reload Reloads the service.
## restart Restarts the service.
## stop Stops the service.
## stopstart Stops and then starts the service. This is different from `restart` in that Upstart will pick up changes to the `/etc/init/SERVICENAME.conf` file, while `restart` will continue to use the version of that file that the service was originally started with. See [http://askubuntu.com/a/238069](http://askubuntu.com/a/238069). bundlewrap-4.13.6/docs/content/items/symlink.md000066400000000000000000000013171417502274300215120ustar00rootroot00000000000000# Symlink items symlinks = { "/some/symlink": { "group": "root", "owner": "root", "target": "/target/file", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## target File or directory this symlink points to. **This attribute is required.**
## group Name of the group this symlink belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
## owner Username of the symlink's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. bundlewrap-4.13.6/docs/content/items/user.md000066400000000000000000000051211417502274300207770ustar00rootroot00000000000000# User items Manages system user accounts. users = { "jdoe": { "full_name": "Jane Doe", "gid": 2342, "groups": ["admins", "users", "wheel"], "home": "/home/jdoe", "password_hash": "$6$abcdef$ghijklmnopqrstuvwxyz", "shell": "/bin/zsh", "uid": 4747, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes) All attributes are optional.
## delete When set to `True`, this user will be removed from the system. Note that because of how `userdel` works, the primary group of the user will be removed if it contains no other users. When using `delete`, no other attributes are allowed.
## full_name Full name of the user.
## gid Primary group of the user as numerical ID or group name.
Due to how useradd works, this attribute is required whenever you don't want the default behavior of useradd (usually that means automatically creating a group with the same name as the user). If you want to use an unmanaged group already on the node, you need this attribute. If you want to use a group managed by BundleWrap, you need this attribute. This is true even if the groups mentioned are in fact named like the user.

## groups List of groups (names, not GIDs) the user should belong to. Must NOT include the group referenced by `gid`.
## hash_method One of: * `md5` * `sha256` * `sha512` Defaults to `sha512`.
## home Path to home directory. Defaults to `/home/USERNAME`.
## password The user's password in plaintext.
Please do not write any passwords into your bundles. This attribute is intended to be used with an external source of passwords and filled dynamically. If you don't have or want such an elaborate setup, specify passwords using the password_hash attribute instead.
If you don't specify a salt along with the password, BundleWrap will use a static salt. Be aware that this is basically the same as using no salt at all.

## password_hash Hashed password as it would be returned by `crypt()` and written to `/etc/shadow`.
## salt Recommended for use with the `password` attribute. BundleWrap will use 5000 rounds of SHA-512 on this salt and the provided password.
## shell Path to login shell executable.
## uid Numerical user ID. It's your job to make sure it's unique. bundlewrap-4.13.6/docs/content/items/zfs_dataset.md000066400000000000000000000016141417502274300223330ustar00rootroot00000000000000# ZFS datasets Manages ZFS datasets. zfs_datasets = { "tank/mydataset": { "acltype": "posixacl", "atime": "on", "relatime": "on", "compression": "on", "dedup": "off", "mountpoint": "/mnt/mydataset", "readonly": "off", "quota": "1G", "recordsize": "131072", "logbias": "throughput", }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## mountpoint Controls where the dataset should be mounted. If you set this to `None`, bundlewrap will also automatically unmount the dataset for you. The dataset will get mounted if you specify a mountpoint.
## Any other attribute Sets the corresponding dataset properties. Make sure the property is available in our pool. bundlewrap-4.13.6/docs/content/items/zfs_pool.md000066400000000000000000000030531417502274300216560ustar00rootroot00000000000000# ZFS pools Manages ZFS pools. zfs_pools = { "tank": { "when_creating": { "config": [ { "type": "mirror", "devices": { "/dev/sda", "/dev/sdb", }, }, ], "ashift": 12, }, "autoexpand": False, "autoreplace": False, "autotrim": True, }, }

# Attribute reference See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
## config A list of dicts. This allows you to create arbitrary pool configurations. Each dict must include a `devices` key, which must contain atleast one device to use. `type` is optional, if set, it must be one of these types: * `mirror` - creates a mirrored vdev (like RAID1) * `raidz` - creates a raidz vdev (like RAID5) * `raidz2` - creates a raidz2 vdev (like RAID6) * `raidz3` - creates a raidz3 vdev * `log` - creates a ZIL vdev * `cache` - creates a L2ARC vdev When creating a `log` vdev, you may only use one or two devices. BundleWrap will automatically create a `log mirror` if you specify two devices for your `log` vdev.
## ashift Sets the `ashift` attribute for a to-be-created pool. `ashift` gets ignored if the requested pool already exists.
## autoexpand, autoreplace, and autotrim Sets the corresponding zpool options `autoexpand`, `autoreplace` and `autotrim`. bundlewrap-4.13.6/docs/content/misc/000077500000000000000000000000001417502274300173125ustar00rootroot00000000000000bundlewrap-4.13.6/docs/content/misc/about.md000066400000000000000000000016741417502274300207560ustar00rootroot00000000000000 # About Development on BundleWrap started in July 2012, borrowing some ideas from [Bcfg2](http://bcfg2.org/). Some key features that are meant to set BundleWrap apart from other config management systems are: * decentralized architecture * pythonic and easily extendable * easy to get started with * true item-level parallelism (in addition to working on multiple nodes simultaneously, BundleWrap will continue to fix config files while installing a package on the same node) * very customizable item dependencies * collaboration features like [node locking](../guide/locks.md) (to prevent simultaneous applies to the same node) and hooks for chat notifications * built-in testing facility (`bw test`) * can be used as a library BundleWrap is a "pure" free software project licensed under the terms of the [GPLv3](http://www.gnu.org/licenses/gpl.html>), with no *Enterprise Edition* or commercial support. bundlewrap-4.13.6/docs/content/misc/contributing.md000066400000000000000000000041771417502274300223540ustar00rootroot00000000000000# Contributing We welcome all input and contributions to BundleWrap. If you've never done this sort of thing before, maybe check out [contribution-guide.org](http://www.contribution-guide.org). But don't be afraid to make mistakes, nobody expects your first contribution to be perfect. We'll gladly help you out.
## Submitting bug reports Please use the [GitHub issue tracker](https://github.com/bundlewrap/bundlewrap/issues) and take a few minutes to look for existing reports of the same problem (open or closed!).
If you've found a security issue or are not at all sure, just contact trehn@bundlewrap.org.

## Contributing code
Before working on new features, try reaching out to one of the core authors first. We are very concerned with keeping BundleWrap lean and not introducing bloat.
Here are the steps: 1. Write your code. Awesome! 2. If you haven't already done so, please consider writing tests. Otherwise, someone else will have to do it for you. 3. Same goes for documentation. 4. Set up a [virtualenv](http://virtualenv.readthedocs.org/en/latest/) and run `pip install -r requirements.txt`. 5. Make sure you can connect to your localhost via `ssh` without using a password and that you are able to run `sudo`. 6. Run `py.test tests/`. 7. Review and sign the Copyright Assignment Agreement (CAA) by adding your name and email to the `AUTHORS` file. (This step can be skipped if your contribution is too small to be considered intellectual property, e.g. spelling fixes) 8. Open a pull request on [GitHub](https://github.com/bundlewrap/bundlewrap). 9. Feel great. Thank you.
## Contributing documentation The process is essentially the same as detailed above for code contributions. You will find the docs in `docs/content/` and can preview them using `cd docs && mkdocs serve`.
## Help If at any point you need help or are not sure what to do, just drop by in [#bundlewrap on Libera.Chat](irc://irc.libera.chat/bundlewrap) or open a [discussion on GitHub](https://github.com/bundlewrap/bundlewrap/discussions). bundlewrap-4.13.6/docs/content/misc/deciding.md000066400000000000000000000017661417502274300214140ustar00rootroot00000000000000 ## Is BundleWrap the right tool for you? We think you will enjoy BundleWrap a lot if you: - know some Python - like to write your configuration from scratch and control every bit of it - have lots of unique nodes - are trying to get a lot of existing systems under management - are NOT trying to handle a massive amount of nodes (let’s say more than 1000) - like to start small - don’t want yet more stuff to run on your nodes (or mess with appliances as little as possible) - prefer a simple tool to a fancy one - want as much as possible in git/hg/bzr - have strongly segmented internal networks You might be better served with a different config management system if you: - are already using a config management system and don’t have any major issues - hate Python and/or JSON - like to use community-maintained configuration templates - need unattended bootstrapping of nodes - don’t trust your coworkers bundlewrap-4.13.6/docs/content/misc/faq.md000066400000000000000000000055771417502274300204210ustar00rootroot00000000000000# FAQ ## Technical ### BundleWrap says an item failed to apply, what do I do now? Try running `bw apply -i nodename` to see which attribute of the item could not be fixed. If that doesn't tell you enough, try `bw --debug apply -i nodename` and look for the command BundleWrap is using to fix the item in question. Then try running that command yourself and check for any errors.
### What happens when two people start applying configuration to the same node? BundleWrap uses a [locking mechanism](../guide/locks.md) to prevent collisions like this.
### How can I have BundleWrap reload my services after config changes? See [canned actions](../repo/items.py.md#canned_actions) and [triggers](../repo/items.py.md#triggers).
### Will BundleWrap keep track of package updates? No. BundleWrap will only care about whether a package is installed or not. Updates will have to be installed through a separate mechanism (I like to create an [action](../items/action.md) with the `interactive` attribute set to `True`). Selecting specific versions should be done through your package manager.
### Is there a probing mechanism like Ohai? No. BundleWrap is meant to be very push-focused. The node should not have any say in what configuration it will receive.
### Is BundleWrap secure? BundleWrap is more concerned with safety than security. Due to its design, it is possible for your coworkers to introduce malicious code into a BundleWrap repository that could compromise your machine. You should only use trusted repositories and code. We also recommend following commit logs to your repos.
## The BundleWrap Project ### Why doesn't BundleWrap provide pre-built community bundles? In our experience, bundles for even the most common pieces of software always contain some opinionated bits specific to local infrastructure. Making bundles truly universal (e.g. in terms of supported Linux distributions) would mean a lot of bloat. And since local modifications are hard to reconcile with an upstream community repository, bundles would have to be very feature-complete to be useful to the majority of users, increasing bloat even more. Maintaining bundles and thus configuration for different pieces of software is therefore out of scope for the BundleWrap project. While it might seem tedious when you're getting started, with some practice, writing your own bundles will become both easy and precise in terms of infrastructure fit.
### Why do contributors have to sign a Copyright Assignment Agreement? While it sounds scary, Copyright assignment is used to improve the enforceability of the GPL. Even the FSF does it, [read their explanation why](http://www.gnu.org/licenses/why-assign.html). The agreement used by BundleWrap is from [harmonyagreements.org](http://harmonyagreements.org). If you're still concerned, please do not hesitate to contact [@trehn](https://twitter.com/trehn). bundlewrap-4.13.6/docs/content/misc/glossary.md000066400000000000000000000024541417502274300215040ustar00rootroot00000000000000# Glossary ## action Actions are a special kind of item used for running shell commands during each `bw apply`. They allow you to do things that aren't persistent in nature.
## apply An "apply" is what we call the process of what's otherwise known as "converging" the state described by your repository and the actual status quo on the node.
## bundle A collection of items. Most of the time, you will create one bundle per application. For example, an Apache bundle will include the httpd service, the virtual host definitions and the apache2 package.
## group Used for organizing your nodes.
## hook [Hooks](../repo/hooks.md) can be used to run your own code automatically during various stages of BundleWrap operations.
## item A single piece of configuration on a node, e.g. a file or an installed package. You might be interested in [this overview of item types](../repo/items.py.md#item_types).
## lib [Libs](../repo/libs.md) are a way to store Python modules in your repository and make them accessible to your bundles and templates.
## node A managed system, no matter if physical or virtual.
## repo A repository is a directory with [some stuff](../repo/layout.md) in it that tells BundleWrap everything it needs to know about your infrastructure. bundlewrap-4.13.6/docs/content/repo/000077500000000000000000000000001417502274300173245ustar00rootroot00000000000000bundlewrap-4.13.6/docs/content/repo/bundle.py.md000066400000000000000000000014501417502274300215460ustar00rootroot00000000000000

bundle.py

Within each bundle, there may be a file called `bundle.py`. It can be used to add dependencies and such to all items with a given tag (see [items.py](items.py.md) for a general introduction to these concepts). Here's an example: tags = { 'foo': { 'needs': { 'svc_systemd:bar', }, 'triggers': { 'action:baz', }, }, } With this, whenever you add the `foo` tag to an item in `items.py`, that item will also depend on `svc_systemd:bar` and trigger `action:baz`. Supported item attributes are: * `needs` * `needed_by` * `precedes` * `preceded_by` * `tags` * `triggers` * `triggered_by` See [Selectors](../guide/selectors.md) for a complete overview of the ways to specify items here. bundlewrap-4.13.6/docs/content/repo/groups.py.md000066400000000000000000000076231417502274300216240ustar00rootroot00000000000000# groups.py This file lets you specify or dynamically build groups of [nodes](nodes.py.md) in your environment. As with `nodes.py`, you define your groups as a dictionary: groups = { 'all': { 'member_patterns': ( r".*", ), }, 'group1': { 'members': ( 'node1', ), }, } All group attributes are optional.
# Group attribute reference This section is a reference for all possible attributes you can define for a group: groups = { 'group1': { # THIS PART IS EXPLAINED HERE 'bundles': ["bundle1", "bundle2"], 'members': ["node1"], 'member_patterns': [r"^cluster1\."], 'metadata': {'foo': "bar"}, 'os': 'linux', 'subgroups': ["group2", "group3"], 'subgroup_patterns': [r"^group.*pattern$"], }, } Note that many attributes from [nodes.py](nodes.py.md) (e.g. `bundles`) may also be set at group level, but aren't explicitly documented here again.
## member_patterns A list of regular expressions. Node names matching these expressions will be added to the group members. Matches are determined using [the search() method](http://docs.python.org/2/library/re.html#re.RegexObject.search).
## members A tuple or list of node names that belong to this group.
## metadata A dictionary that will be accessible from each node's `node.metadata`. For each node, BundleWrap will merge the metadata of all of the node's groups first, then merge in the metadata from the node itself. Metadata is merged recursively by default, meaning nested dicts will overlay each other. Lists will be appended to each other, but not recursed into. In come cases, you want to overwrite instead of merge a piece of metadata. This is accomplished through the use of `bundlewrap.metadata.atomic()` and best illustrated as an example: from bundlewrap.metadata import atomic groups = { 'all': { 'metadata': { 'interfaces': { 'eth0': {}, }, 'nameservers': ["8.8.8.8", "8.8.4.4"], 'ntp_servers': ["pool.ntp.org"], }, }, 'internal': { 'metadata': 'interfaces': { 'eth1': {}, }, 'nameservers': atomic(["10.0.0.1", "10.0.0.2"]), 'ntp_servers': ["10.0.0.1", "10.0.0.2"], }, }, } A node in both groups will end up with `eth0` *and* `eth1`. The nameservers however are overwritten, so that nodes that are in both the "all" *and* the "internal" group will only have the `10.0.0.x` ones while nodes just in the "all" group will have the `8.8.x.x` nameservers. The NTP servers are appended: a node in both groups will have all three of them.
BundleWrap will consider group hierarchy when merging metadata. For example, it is possible to define a default nameserver for the "eu" group and then override it for the "eu.frankfurt" subgroup. The catch is that this only works for groups that are connected through a subgroup hierarchy. Independent groups will have their metadata merged in an undefined order. bw test will report conflicting metadata in independent groups as a metadata collision.
Also see the documentation for node.metadata and metadata.py for more information.

## subgroups A tuple or list of group names whose members should be recursively included in this group.
## subgroup_patterns A list of regular expressions. Nodes in with group names matching these expressions will be added to the group members. Matches are determined using [the search() method](http://docs.python.org/2/library/re.html#re.RegexObject.search).
## supergroups The inverse of `subgroups`. Nodes in this group will be added to all supergroups.
bundlewrap-4.13.6/docs/content/repo/hooks.md000066400000000000000000000171741417502274300210030ustar00rootroot00000000000000# Hooks Hooks enable you to execute custom code at certain points during a BundleWrap run. This is useful for integrating with other systems e.g. for team notifications, logging or statistics. To use hooks, you need to create a subdirectory in your repo called `hooks`. In that directory you can place an arbitrary number of Python source files. If those source files define certain functions, these functions will be called at the appropriate time. ## Example `hooks/my_awesome_notification.py`: from my_awesome_notification_system import post_message def node_apply_start(repo, node, interactive=False, **kwargs): post_message("Starting apply on {}, everything is gonna be OK!".format(node.name))
Always define your hooks with **kwargs so we can pass in more information in future updates without breaking your hook.

## Functions This is a list of all functions a hook file may implement. --- **`action_run_start(repo, node, item, **kwargs)`** Called each time a `bw apply` command reaches a new action. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `item` The current action. --- **`action_run_end(repo, node, item, duration=None, status=None, **kwargs)`** Called each time a `bw apply` command completes processing an action. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `item` The current action. `duration` How long the action was running (timedelta). `status`: One of `bundlewrap.items.Item.STATUS_FAILED`, `bundlewrap.items.Item.STATUS_SKIPPED`, or `bundlewrap.items.Item.STATUS_ACTION_SUCCEEDED`. --- **`apply_start(repo, target, nodes, interactive=False, **kwargs)`** Called when you start a `bw apply` command. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `target` The group or node name you gave on the command line. `nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). `interactive` Indicates whether the apply is interactive or not. To abort the entire apply operation: ``` from bundlewrap.exceptions import GracefulApplyException raise GracefulApplyException("reason goes here") ``` --- **`apply_end(repo, target, nodes, duration=None, **kwargs)`** Called when a `bw apply` command completes. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `target` The group or node name you gave on the command line. `nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). `duration` How long the apply took (timedelta). --- **`item_apply_start(repo, node, item, **kwargs)`** Called each time a `bw apply` command reaches a new item. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `item` The current item. --- **`item_apply_end(repo, node, item, duration=None, status_code=None, status_before=None, status_after=None, **kwargs)`** Called each time a `bw apply` command completes processing an item. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `item` The current item. `duration` How long the apply took (timedelta). `status_code` One of `bundlewrap.items.Item.STATUS_FAILED`, `bundlewrap.items.Item.STATUS_SKIPPED`, `bundlewrap.items.Item.STATUS_OK`, or `bundlewrap.items.Item.STATUS_FIXED`. `status_before` An instance of `bundlewrap.items.ItemStatus`. `status_after` See `status_before`. --- **`lock_add(repo, node, lock_id, items, expiry, comment, **kwargs)`** Called each time a soft lock is added to a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `lock_id` The random ID of the lock. `items` List of item selector strings. `expiry` UNIX timestamp of lock expiry time (int). `comment` As entered by user. --- **`lock_remove(repo, node, lock_id, **kwargs)`** Called each time a soft lock is removed from a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `lock_id` The random ID of the lock. --- **`lock_show(repo, node, lock_info, **kwargs)`** Called each time `bw lock show` finds a lock on a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `lock_info` A dict contain the lock details. --- **`node_apply_start(repo, node, interactive=False, **kwargs)`** Called each time a `bw apply` command reaches a new node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `interactive` `True` if this is an interactive apply run. To skip a node: ``` from bundlewrap.exceptions import SkipNode raise SkipNode("reason goes here") ``` --- **`node_apply_end(repo, node, duration=None, interactive=False, result=None, **kwargs)`** Called each time a `bw apply` command finishes processing a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `duration` How long the apply took (timedelta). `interactive` `True` if this was an interactive apply run. `result` An instance of `bundlewrap.node.ApplyResult`. --- **`node_run_start(repo, node, command, **kwargs)`** Called each time a `bw run` command reaches a new node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `command` The command that will be run on the node. To skip a node: ``` from bundlewrap.exceptions import SkipNode raise SkipNode("reason goes here") ``` --- **`node_run_end(repo, node, command, duration=None, return_code=None, stdout="", stderr="", **kwargs)`** Called each time a `bw run` command finishes on a node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). `command` The command that was run on the node. `duration` How long it took to run the command (timedelta). `return_code` Return code of the remote command. `stdout` The captured stdout stream of the remote command. `stderr` The captured stderr stream of the remote command. --- **`run_start(repo, target, nodes, command, **kwargs)`** Called each time a `bw run` command starts. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `target` The group or node name you gave on the command line. `nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). `command` The command that will be run on the node. --- **`run_end(repo, target, nodes, command, duration=None, **kwargs)`** Called each time a `bw run` command finishes. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `target` The group or node name you gave on the command line. `nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). `command` The command that was run. `duration` How long it took to run the command on all nodes (timedelta). --- **`test(repo, **kwargs)`** Called at the end of a full `bw test`. `repo` The current repository (instance of `bundlewrap.repo.Repository`). --- **`test_node(repo, node, **kwargs)`** Called during `bw test` for each node. `repo` The current repository (instance of `bundlewrap.repo.Repository`). `node` The current node (instance of `bundlewrap.node.Node`). bundlewrap-4.13.6/docs/content/repo/items.py.md000066400000000000000000000372371417502274300214320ustar00rootroot00000000000000

items.py

Within each bundle, there may be a file called `items.py`. It defines any number of magic attributes that are automatically processed by BundleWrap. Each attribute is a dictionary mapping an item name (such as a file name) to a dictionary of attributes (e.g. file ownership information). A typical `items.py` might look like this: files = { '/etc/hosts': { 'owner': "root", 'group': "root", 'mode': "0664", [...] }, } users = { 'janedoe': { 'home': "/home/janedoe", 'shell': "/bin/zsh", [...] }, 'johndoe': { 'home': "/home/johndoe", 'shell': "/bin/bash", [...] }, } This bundle defines the attributes `files` and `users`. Within the `users` attribute, there are two `user` items. Each item maps its name to a dictionary that is understood by the specific kind of item. Below you will find a reference of all builtin item types and the attributes they understand. You can also [define your own item types](../guide/dev_item.md).
# Item types This table lists all item types included in BundleWrap along with the bundle attributes they understand.
TypeBundle attributeDescription
actionactionsActions allow you to run commands on every bw apply
directorydirectoriesManages permissions and ownership for directories
filefilesManages contents, permissions, and ownership for files
git_deploygit_deployDeploys the contents of a git repository
groupgroupsManages groups by wrapping groupadd, groupmod and groupdel
k8s_*k8s_*Manages resources in Kubernetes clusters by wrapping kubectl
pkg_aptpkg_aptInstalls and removes packages with APT
pkg_dnfpkg_dnfInstalls and removes packages with dnf
pkg_opkgpkg_opkgInstalls and removes packages with opkg
pkg_pacmanpkg_pacmanInstalls and removes packages with pacman
pkg_pamacpkg_pamacInstalls and removes packages with pamac
pkg_pippkg_pipInstalls and removes Python packages with pip
pkg_snappkg_snapInstalls and removes packages with snap
pkg_yumpkg_yumInstalls and removes packages with yum
pkg_zypperpkg_zypperInstalls and removes packages with zypper
postgres_dbpostgres_dbsManages Postgres databases
postgres_rolepostgres_rolesManages Postgres roles
pkg_pippkg_pipInstalls and removes Python packages with pip
pkg_freebsdpkg_freebsdInstalls and removes FreeBSD packages with pkg
pkg_openbsdpkg_openbsdInstalls and removes OpenBSD packages with pkg_add/pkg_delete
routerosrouterosManages RouterOS configuration
svc_freebsdsvc_freebsdStarts and stops services with FreeBSD's rc
svc_openbsdsvc_openbsdStarts and stops services with OpenBSD's rc
svc_systemdsvc_systemdStarts and stops services with systemd
svc_systemvsvc_systemvStarts and stops services with traditional System V init scripts
svc_upstartsvc_upstartStarts and stops services with Upstart
symlinksymlinksManages symbolic links and their ownership
userusersManages users by wrapping useradd, usermod and userdel
zfs_datasetzfs_datasetsManages ZFS datasets
zfs_poolzfs_poolsManages ZFS pools

# Builtin item attributes There are also attributes that can be applied to any kind of item.
## after This lets you control execution order of items. This is not something you will have to do very often, because there are already implicit dependencies between item types (e.g. all files automatically depend on the users owning them). actions = { 'a': { 'command': 'true', }, 'b': { 'command': 'true', 'after': {'action:a'}, }, } When set up like this, `action:b` will only run after `action:a` has been completed. Note that it doesn't matter if `action:a` is successful or not, that what `needs` is for. See [Selectors](../guide/selectors.md) for a complete overview of the ways to specify items here.
## before Just like `after`, but in the opposite direction.
## comment This is a string that will be displayed in interactive mode (`bw apply -i`) whenever the item is to be changed in any way. You can use it to warn users before they start disruptive actions.
## error_on_missing_fault This will simply skip an item instead of raising an error when a Fault used for an attribute on the item is unavailable. Faults are special objects used by `repo.vault` to [handle secrets](../guide/secrets.md). A Fault being unavailable can mean you're missing the secret key required to decrypt a secret you're trying to use as an item attribute value. Defaults to `False`.
## needs This allows for setting up dependencies between items. Here are two examples: my_items = { 'item1': { [...] 'needs': [ 'file:/etc/foo.conf', ], }, 'item2': { [...] 'needs': [ 'pkg_apt:', 'bundle:foo', ], } } The first item (`item1`, specific attributes have been omitted) depends on a file called `/etc/foo.conf`, while `item2` depends on all APT packages being installed and every item in the foo bundle. Note that unlike `after`, with `needs` the depending item will be skipped if the item it depends on fails or is skipped (unless `cascade_skip` is set to `False` on that item). See [Selectors](../guide/selectors.md) for a complete overview of the ways to specify items here.
## needed_by This attribute is an alternative way of defining dependencies. It works just like `needs`, but in the other direction. There are only three scenarios where you should use `needed_by` over `needs`: * if you need all items of a certain type to depend on something or * if you need all items in a bundle to depend on something or * if you need an item in a bundle you can't edit to depend on something in your bundles
## tags A list of strings to tag an item with. Tagging has no immediate effect in itself, but can be useful in a number of places. For example, you can add dependencies on all items with a given tag: pkg_apt = { "mysql-server-{}".format(node.metadata.get('mysql_version', "5.5")): { 'tags': ["provides-mysqld"], }, } svc_systemd = { "myapp": { 'needs': ["tag:provides-mysqld"], }, } In this simplified example we save ourselves from duplicating the logic that gets the current MySQL version from metadata (which is probably overkill here, but you might encounter more complex situations). Tags also allow for optional dependencies, since items can depend on tags that don't exist. So for example if you need to do something after items from another bundle have been completed, but that bundle might not always be there, you can depend on a tag given to the items of the other bundle.
## triggers and triggered In some scenarios, you may want to execute an [action](../items/action.md) only when an item is fixed (e.g. restart a daemon after a config file has changed or run `postmap` after updating an alias file). To do this, BundleWrap has the builtin atttribute `triggers`. You can use it to point to any item that has its `triggered` attribute set to `True`. Such items will only be checked (or in the case of actions: run) if the triggering item is fixed (or a triggering action completes successfully). files = { '/etc/daemon.conf': { [...] 'triggers': [ 'action:restart_daemon', ], }, } actions = { 'restart_daemon': { 'command': "service daemon restart", 'triggered': True, }, } The above example will run `service daemon restart` every time BundleWrap successfully applies a change to `/etc/daemon.conf`. If an action is triggered multiple times, it will only be run once. Similar to `needed_by`, `triggered_by` can be used to define a `triggers` relationship from the opposite direction. See [Selectors](../guide/selectors.md) for a complete overview of the ways to specify items here.
## preceded_by Operates like `triggers`, but will apply the triggered item *before* the triggering item. Let's look at an example: files = { '/etc/example.conf': { [...] 'preceded_by': [ 'action:backup_example', ], }, } actions = { 'backup_example': { 'command': "cp /etc/example.conf /etc/example.conf.bak", 'triggered': True, }, } In this configuration, `/etc/example.conf` will always be copied before and only if it is changed. You would probably also want to set `cascade_skip` to `False` on the action so you can skip it in interactive mode when you're sure you don't need the backup copy. Similar to `needed_by`, `precedes` can be used to define a `preceded_by` relationship from the opposite direction. See [Selectors](../guide/selectors.md) for a complete overview of the ways to specify items here.
## skip Set this to `True` to always skip this item. This is useful if you just want to quickly disable this item to try something or if it's sitting somewhere in a dependency chain and it would be too cumbersome to remove entirely under certain conditions. Note that setting this to `True` will also change the default for `cascade_skip` to `False`.
## unless Another builtin item attribute is `unless`. For example, it can be used to construct a one-off file item where BundleWrap will only create the file once, but won't check or modify its contents once it exists. files = { "/path/to/file": { [...] "unless": "test -x /path/to/file", }, } This will run `test -x /path/to/file` before doing anything with the item. If the command returns 0, no action will be taken to "correct" the item. Another common use for `unless` is with actions that perform some sort of install operation. In this case, the `unless` condition makes sure the install operation is only performed when it is needed instead of every time you run `bw apply`. In scenarios like this you will probably want to set `cascade_skip` to `False` so that skipping the installation (because the thing is already installed) will not cause every item that depends on the installed thing to be skipped. Example: actions = { 'download_thing': { 'command': "wget http://example.com/thing.bin -O /opt/thing.bin && chmod +x /opt/thing.bin", 'unless': "test -x /opt/thing.bin", 'cascade_skip': False, }, 'run_thing': { 'command': "/opt/thing.bin", 'needs': ["action:download_thing"], }, } If `action:download_thing` would not set `cascade_skip` to `False`, `action:run_thing` would only be executed once: directly after the thing has been downloaded. On subsequent runs, `action:download_thing` will fail the `unless` condition and be skipped. This would also cause all items that depend on it to be skipped, including `action:run_thing`.
The commands you choose for unless should not change the state of your node. Otherwise, running bw verify might unexpectedly interfere with your nodes.

## cascade_skip DEPRECATED: Use `before` and `after` instead. There are some situations where you don't want to default behavior of skipping everything that depends on a skipped item. That's where `cascade_skip` comes in. Set it to `False` and skipping an item won't skip those that depend on it. Note that items can be skipped * interactively or * because of `bw apply --only` or `bw apply --skip` or * because a Fault was unavailable or * they were soft-locked on the node or * because they haven't been triggered or * because one of their dependencies was skipped or * because one of their dependencies failed or * they failed their `unless` condition or * the `skip` attribute was set or * because an [action](../items/action.md) had its `interactive` attribute set to `True` during a non-interactive run The following example will offer to run an `apt-get update` before installing a package, but continue to install the package even if the update is declined interactively. actions = { 'apt_update': { 'cascade_skip': False, 'command': "apt-get update", }, } pkg_apt = { 'somepkg': { 'needs': ["action:apt_update"], }, } `cascade_skip` defaults to `True`. However, if the item uses the `unless` or `skip` attributes or is triggered, the default changes to `False`. Most of the time, this is what you'll want.
## when\_creating These attributes are only enforced during the creation of the item on the node (this means the first run of `bw apply` after adding this item to config). They are ignored in subsequent runs of `bw apply`, and when other (non-when\_creating) attributes are changed. # Canned actions Some item types have what we call "canned actions". Those are pre-defined actions attached directly to an item. Take a look at this example: svc_upstart = {'mysql': {'running': True}} files = { "/etc/mysql/my.cnf": { 'source': "my.cnf", 'triggers': [ "svc_upstart:mysql:reload", # this triggers the canned action ], }, } Canned actions always have to be triggered in order to run. In the example above, a change in the file `/etc/mysql/my.cnf` will trigger the `reload` action defined by the [svc_upstart item type](../items/svc_upstart.md) for the mysql service. bundlewrap-4.13.6/docs/content/repo/layout.md000066400000000000000000000030001417502274300211540ustar00rootroot00000000000000 Repository layout ================= A BundleWrap repository contains everything you need to contruct the configuration for your systems. This page describes the various subdirectories and files than can exist inside a repo.
nodes.py This file tells BundleWrap what nodes (servers, VMs, ...) there are in your environment and lets you configure options such as hostnames.
groups.py This file allows you to organize your nodes into groups.
bundles/ This required subdirectory contains the bulk of your configuration, organized into bundles of related items. Each bundle is a subdirectory of bundles/ with an items.py or metadata.py in it (or both).
data/ This optional subdirectory contains data files that are not generic enough to be included in bundles (which are meant to be shareable).
hooks/ This optional subdirectory contains hooks you can use to act on certain events when using BundleWrap.
items/ This optional subdirectory contains the code for your custom item types.
libs/ This optional subdirectory contains reusable custom code for your bundles.
bundlewrap-4.13.6/docs/content/repo/libs.md000066400000000000000000000010671417502274300206030ustar00rootroot00000000000000 # Custom code The `libs/` subdirectory of your repository provides a convenient place to put reusable code used throughout your bundles and hooks. A Python module called `example.py` placed in this directory will be available as `repo.libs.example` wherever you have access to a `bundlewrap.repo.Repository` object. In `nodes.py` and `groups.py`, you can do the same thing with just `libs.example`.
Only single files, no subdirectories or packages, are supported at the moment.
bundlewrap-4.13.6/docs/content/repo/metadata.py.md000066400000000000000000000071561417502274300220660ustar00rootroot00000000000000# metadata.py Alongside `items.py` you may create another file called `metadata.py`. It can be used to define defaults and do advanced processing of the metadata you configured for your nodes and groups. Specifically, it allows each bundle to modify metadata before `items.py` is evaluated. ## Defaults Let's look at defaults first: defaults = { "foo": 5, } This will simply ensure that the `"foo"` key in metadata will always be set, but the default value of 5 can be overridden by node or group metadata or metadata reactors. ## Reactors So let's look at reactors next. Metadata reactors are functions that take the metadata generated for this node so far as their single argument. You must then return a new dictionary with any metadata you wish to have added: @metadata_reactor def bar(metadata): return { "bar": metadata.get("foo"), } While this looks simple enough, there are some important caveats. First and foremost: Metadata reactors must assume to be called many times. This is to give you an opportunity to react to metadata provided by other reactors. All reactors will be run again and again until none of them return any changed metadata. Anything you return from a reactor will overwrite defaults, while metadata from `groups.py` and `nodes.py` will still overwrite metadata from reactors. Collection types like sets and dicts will be merged. The parameter `metadata` is not a dictionary but an instance of `Metastack`. You cannot modify the contents of this object. It provides `.get("some/path", "default")` to query a key path (equivalent to `metadata["some"]["path"]` in a dict) and accepts an optional default value. It will raise a `KeyError` when called for a non-existant path without a default. While node and group metadata and metadata defaults will always be available to reactors, you should not rely on that for the simple reason that you may one day move some metadata from those static sources into another reactor, which may be run later. Thus you may need to wait for some iterations before that data shows up in `metadata`. Note that BundleWrap will catch any `KeyError`s raised in metadata reactors and only report them if they don't go away after all other relevant reactors are done. You can also access other nodes' metadata: @metadata_reactor def baz(metadata): frob = set() for n in repo.nodes: frob.add(n.metadata.get('sizzle')) return {'frob': frob} ### DoNotRunAgain On the other hand, if your reactor only needs to provide new metadata in *some* cases, you can tell BundleWrap to not run it again to save some performance: @metadata_reactor def foo(metadata): if node.has_bundle("bar"): return {"bar": metadata.get("foo") + 1} else: raise DoNotRunAgain
For your convenience, you can access repo, node, metadata_reactors, and DoNotRunAgain in metadata.py without importing them.
## Priority For atomic ("primitive") data types like `int` or `bool`: 1. Nodes 2. Groups 3. Reactors 4. Defaults Node metadata wins over group metadata, groups win over reactors, reactors win over defaults. This also applies to type conflicts: For example, specifying a boolean flag in node metadata will win over a list returned by a metadata reactor. (You should probably avoid situations like this entirely.) Set-like data types will be merged recursively.
Also see the documentation for node.metadata and group.metadata for more information.
bundlewrap-4.13.6/docs/content/repo/nodes.py.md000066400000000000000000000134571417502274300214170ustar00rootroot00000000000000# nodes.py This file lets you specify or dynamically build a list of nodes in your environment. All you have to do here is define a Python dictionary called `nodes`. It should look something like this: nodes = { "node-1": { 'hostname': "node-1.example.com", }, } With BundleWrap, the DNS name and the internal identifier for a node ("node-1" in this case) are two separate things. All fields for a node (including `hostname`) are optional. If you don't give one, BundleWrap will attempt to use the internal identifier to connect to a node: nodes = { "node-1.example.com": {}, }
# Dynamic node list You are not confined to the static way of defining a node list as shown above. You can also assemble the `nodes` dictionary dynamically: def get_my_nodes_from_ldap(): [...] return ldap_nodes nodes = get_my_nodes_from_ldap()
# One file per node Especially in larger installations, a single nodes.py can become inconvenient to work with. This example reads nodes from a `nodes/` directory. from glob import glob from os.path import join nodes = {} for node in glob(join(repo_path, "nodes", "*.py")): with open(node, 'r') as f: exec(f.read()) Node files would then append `nodes`, like this: # nodes/node-1.py nodes['node-1'] = { 'hostname': "node-1.example.com", } Alternatively, consider using [TOML nodes](../guide/toml.md).
# Node attribute reference This section is a reference for all possible attributes you can define for a node: nodes = { 'node-1': { # THIS PART IS EXPLAINED HERE }, } All attributes can also be set at the group level, unless noted otherwise.
## Regular attributes ### bundles A list of bundle names to be assigned to this node. Bundles set at [group level](groups.py.md) will be added.
### dummy Set this to `True` to prevent BundleWrap from creating items for and connecting to this node. This is useful for unmanaged nodes because you can still assign them bundles and metadata like regular nodes and access that from managed nodes (e.g. for monitoring).
### groups A list of group names this node should be added to. Be aware that you can also define group members at the group itself and you probably should not use both methods in parallel to avoid confusion. Cannot be set at group level.
### hostname A string used as a DNS name when connecting to this node. May also be an IP address.
The username and SSH private key for connecting to the node cannot be configured in BundleWrap. If you need to customize those, BundleWrap will honor your ~/.ssh/config.
Cannot be set at group level.
### metadata This can be a dictionary of arbitrary data (some type restrictions apply). You can access it from your templates as `node.metadata`. Use this to attach custom data (such as a list of IP addresses that should be configured on the target node) to the node. Note that you can also define metadata at the [group level](groups.py.md#metadata), but node metadata has higher priority. You are restricted to using only the following types in metadata: * `dict` * `list` * `tuple` * `set` * `bool` * `text` / `unicode` * `bytes` / `str` (only if decodable into text using UTF-8) * `int` * `None` * `bundlewrap.utils.Fault`
Also see the documentation for group.metadata and metadata.py for more information.

### os Defaults to `"linux"`. A list of supported OSes can be obtained with `bw debug -n ANY_NODE_NAME -c "print(node.OS_KNOWN)"`.
### os_version Set this to your OS version. Note that it must be a tuple of integers, e.g. if you're running Ubuntu 16.04 LTS, it should be `(16, 4)`. Tuples of integers can be used for easy comparison of versions: `(12, 4) < (16, 4)`
### password Which password to use when connecting to the node. Currently only supported for RouterOS.
### username Which username to use when connecting to the node. Leave this unset to use SSH configuration instead (recommended).
## OS compatibility overrides ### cmd_wrapper_outer Used whenever a command needs to be run on a node. Defaults to `"sudo sh -c {}"`. `{}` will be replaced by the quoted command to be run (after `cmd_wrapper_inner` has been applied). You will need to override this if you're not using `sudo` to gain root privileges (e.g. `doas`) on the node.
### cmd_wrapper_inner Used whenever a command needs to be run on a node. Defaults to `"export LANG=C; {}"`. `{}` will be replaced by the command to be run. You will need to override this if the shell on your node sets environment variables differently.
### lock_dir Directory that will be used for creating [locks](../guide/locks.md) on the node. Defaults to `"/var/lib/bundlewrap"`. Will be created if it does not exist. You will need to override this if `/var/lib` is restricted somehow on your node (SElinux, mounted readonly, etc.).
### pip_command This setting will affect how [pkg_pip](../items/pkg_pip.md) will behave. By default, it will use whatever `pip` on your system defaults to. You will need to override this if you don't have `pip`, but (for example) only `pip3`. Be aware that this setting has no effect when using virtualenvs.
### use_shadow_passwords
Changing this setting will affect the security of the target system. Only do this for legacy systems that don't support shadow passwords.
This setting will affect how the [user item](../items/user.md) item operates. If set to `False`, password hashes will be written directly to `/etc/passwd` and thus be accessible to any user on the system. If the OS of the node is set to "openbsd", this setting has no effect as `master.shadow` is always used. bundlewrap-4.13.6/docs/content/repo/requirements.txt.md000066400000000000000000000014271417502274300232130ustar00rootroot00000000000000 # requirements.txt This optional file can be used to ensure minimum required versions of BundleWrap and other Python packages on every machine that uses a repository. `bw repo create` will initially add your current version of BundleWrap:
bundlewrap>=2.4.0
You can add more packages as you like (you do not have to specify a version for each one), just append each package in a separate line. When someone then tries to use your repo without one of those packages, BundleWrap will exit early with a friendly error message:
! Python package 'foo' is listed in requirements.txt, but wasn't found. You probably have to install it with `pip install foo`.
bundlewrap-4.13.6/docs/mkdocs.yml000066400000000000000000000051241417502274300167120ustar00rootroot00000000000000site_name: BundleWrap docs_dir: content site_dir: build extra_css: - bundlewrap.css repo_url: "https://github.com/bundlewrap/bundlewrap" remote_name: github copyright: "BundleWrap is published under the GPL license.

Donations welcome in Bitcoin 13AJYksqncZromPF8HvDUXsmHChAm3Y7W7 or Ethereum 0x5Eb3037e197d3C0d2E014bcfC2e027EB0AD42812." nav: - : index.md - Guides: - Quickstart: guide/quickstart.md - Installation: guide/installation.md - CLI: guide/cli.md - Environment Variables: guide/env.md - File templates: guide/item_file_templates.md - Handling secrets: guide/secrets.md - Locking: guide/locks.md - Kubernetes: guide/kubernetes.md - Custom items: guide/dev_item.md - Python API: guide/api.md - OS compatibility: guide/os_compatibility.md - TOML nodes and groups: guide/toml.md - Selectors: guide/selectors.md - Migrating to 2.0: guide/migrate_12.md - Migrating to 3.0: guide/migrate_23.md - Migrating to 4.0: guide/migrate_34.md - Repository: - Overview: repo/layout.md - nodes.py: repo/nodes.py.md - groups.py: repo/groups.py.md - requirements.txt: repo/requirements.txt.md - bundles/.../bundle.py: repo/bundle.py.md - bundles/.../items.py: repo/items.py.md - bundles/.../metadata.py: repo/metadata.py.md - hooks/: repo/hooks.md - libs/: repo/libs.md - Items: - action: items/action.md - directory: items/directory.md - file: items/file.md - git_deploy: items/git_deploy.md - group: items/group.md - k8s_*: items/k8s.md - pkg_apk: items/pkg_apk.md - pkg_apt: items/pkg_apt.md - pkg_dnf: items/pkg_dnf.md - pkg_freebsd: items/pkg_freebsd.md - pkg_openbsd: items/pkg_openbsd.md - pkg_opkg: items/pkg_opkg.md - pkg_pacman: items/pkg_pacman.md - pkg_pamac: items/pkg_pamac.md - pkg_pip: items/pkg_pip.md - pkg_snap: items/pkg_snap.md - pkg_yum: items/pkg_yum.md - pkg_zypper: items/pkg_zypper.md - postgres_db: items/postgres_db.md - postgres_role: items/postgres_role.md - routeros: items/routeros.md - svc_freebsd: items/svc_freebsd.md - svc_openbsd: items/svc_openbsd.md - svc_openrc: items/svc_openrc.md - svc_systemd: items/svc_systemd.md - svc_systemv: items/svc_systemv.md - svc_upstart: items/svc_upstart.md - symlink: items/symlink.md - user: items/user.md - zfs_dataset: items/zfs_dataset.md - zfs_pool: items/zfs_pool.md - Misc: - About: misc/about.md - Why BundleWrap: misc/deciding.md - Glossary: misc/glossary.md - FAQ: misc/faq.md - Contributing: misc/contributing.md bundlewrap-4.13.6/requirements.txt000066400000000000000000000001101417502274300172310ustar00rootroot00000000000000# deps in this file are for local dev purposes only mkdocs pytest wheel bundlewrap-4.13.6/setup.cfg000066400000000000000000000002001417502274300155660ustar00rootroot00000000000000[flake8] max-line-length = 100 max-complexity = 10 [tool:pytest] python_files=*.py python_classes=Test python_functions=test_* bundlewrap-4.13.6/setup.py000066400000000000000000000036551417502274300155000ustar00rootroot00000000000000from setuptools import find_packages, setup setup( name="bundlewrap", version="4.13.6", description="Config management with Python", long_description=( "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n" "While most other config management systems rely on a client-server architecture, BundleWrap works off a repository cloned to your local machine. It then automates the process of SSHing into your servers and making sure everything is configured the way it's supposed to be. You won't have to install anything on managed servers." ), author="Torsten Rehn", author_email="torsten@rehn.email", license="GPLv3", url="http://bundlewrap.org", packages=find_packages(), entry_points={ 'console_scripts': [ "bw=bundlewrap.cmdline:main", ], }, keywords=["configuration", "config", "management"], classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: System Administrators", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Natural Language :: English", "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Topic :: System :: Installation/Setup", "Topic :: System :: Systems Administration", ], install_requires=[ "cryptography", "Jinja2", "Mako", "passlib", "pyyaml", "requests >= 1.0.0", "librouteros >= 3.0.0", "tomlkit", ], zip_safe=False, ) bundlewrap-4.13.6/tests/000077500000000000000000000000001417502274300151175ustar00rootroot00000000000000bundlewrap-4.13.6/tests/integration/000077500000000000000000000000001417502274300174425ustar00rootroot00000000000000bundlewrap-4.13.6/tests/integration/bw_apply_actions.py000066400000000000000000000070061417502274300233540ustar00rootroot00000000000000from bundlewrap.utils.testing import host_os, make_repo, run def test_action_success(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "success": { 'command': "true", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) def test_action_fail(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "failure": { 'command': "false", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) def test_action_pipe_binary(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "pipe": { 'command': "cat", 'data_stdin': b"hello\000world", 'expected_stdout': b"hello\000world", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) def test_action_pipe_utf8(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "pipe": { 'command': "cat", 'data_stdin': "hello 🐧\n", 'expected_stdout': "hello 🐧\n", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) def test_action_return_codes(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "single-code": { 'command': "true", 'expected_return_code': 0, }, "multi-code-list": { 'command': "false", 'expected_return_code': [1], }, "multi-code-tuple": { 'command': "false", 'expected_return_code': (1,), }, "multi-code-set": { 'command': "false", 'expected_return_code': {1}, } }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) bundlewrap-4.13.6/tests/integration/bw_apply_autoonly.py000066400000000000000000000024151417502274300235650ustar00rootroot00000000000000from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_only_bundle_with_dep(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'any', 'needs': ["file:" + join(str(tmpdir), "bar")], }, }, }, }, "test2": { 'items': { 'files': { join(str(tmpdir), "bar"): { 'content_type': 'any', }, join(str(tmpdir), "baz"): { 'content_type': 'any', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test", "test2"], 'os': host_os(), }, }, ) run("bw apply -o bundle:test -- localhost", path=str(tmpdir)) assert exists(join(str(tmpdir), "foo")) assert exists(join(str(tmpdir), "bar")) assert not exists(join(str(tmpdir), "baz")) bundlewrap-4.13.6/tests/integration/bw_apply_autoskip.py000066400000000000000000000120461417502274300235530ustar00rootroot00000000000000from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_skip_bundle(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip bundle:test -- localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_group(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'groups': {"foo"}, 'os': host_os(), }, }, groups={ "foo": {}, }, ) result = run("bw apply --skip group:foo -- localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_id(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip file:{} -- localhost".format(join(str(tmpdir), "foo")), path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_node(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip node:localhost -- localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_tag(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", 'tags': ["nope"], }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip tag:nope -- localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_type(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip file: -- localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) def test_skip_trigger(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", 'tags': ["nope"], 'triggers': ["file:{}".format(join(str(tmpdir), "bar"))], }, join(str(tmpdir), "bar"): { 'content': "nope", 'triggered': True, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) result = run("bw apply --skip tag:nope -- localhost", path=str(tmpdir)) assert result[2] == 0 assert not exists(join(str(tmpdir), "foo")) assert not exists(join(str(tmpdir), "bar")) bundlewrap-4.13.6/tests/integration/bw_apply_beforeafter.py000066400000000000000000000174321417502274300242040ustar00rootroot00000000000000from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_after(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "a": { 'command': "echo a >> " + join(str(tmpdir), "foo"), 'after': {"action:b"}, }, "b": { 'command': "echo b >> " + join(str(tmpdir), "foo"), }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 with open(join(str(tmpdir), "foo")) as f: assert f.read() == "b\na\n" def test_before(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "a": { 'command': "echo a >> " + join(str(tmpdir), "foo"), 'before': {"action:b"}, }, "b": { 'command': "echo b >> " + join(str(tmpdir), "foo"), }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 with open(join(str(tmpdir), "foo")) as f: assert f.read() == "a\nb\n" def test_before_fail_no_skip(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "a": { 'command': "false", 'before': {"action:b"}, }, "b": { 'command': "echo b >> " + join(str(tmpdir), "foo"), }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 1 with open(join(str(tmpdir), "foo")) as f: assert f.read() == "b\n" def test_after_fail_no_skip(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "a": { 'command': "false", }, "b": { 'after': {"action:a"}, 'command': "echo b >> " + join(str(tmpdir), "foo"), }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 1 with open(join(str(tmpdir), "foo")) as f: assert f.read() == "b\n" def test_before_fail_skip_with_needs(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "a": { 'command': "false", 'before': {"action:b"}, }, "b": { 'command': "echo b >> " + join(str(tmpdir), "foo"), 'needs': {"action:a"}, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 1 assert b"skipped" in stdout assert not exists(join(str(tmpdir), "foo")) def test_after_fail_skip_with_needed_by(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "a": { 'command': "false", 'needed_by': {"action:b"}, }, "b": { 'command': "echo b >> " + join(str(tmpdir), "foo"), 'after': {"action:a"}, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 1 assert b"skipped" in stdout assert not exists(join(str(tmpdir), "foo")) def test_chain_skip(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "a": { 'command': "false", }, "b": { 'command': "echo b >> " + join(str(tmpdir), "foo"), 'needs': {"action:a"}, }, "c": { 'command': "echo c >> " + join(str(tmpdir), "foo"), 'after': {"action:b"}, }, "d": { 'command': "echo d >> " + join(str(tmpdir), "foo"), 'needs': {"action:b"}, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 1 assert b"skipped" in stdout with open(join(str(tmpdir), "foo")) as f: assert f.read() == "c\n" def test_chain_skip_no_cascade(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "a": { 'command': "false", }, "b": { 'command': "echo b >> " + join(str(tmpdir), "foo"), 'needs': {"action:a"}, 'cascade_skip': False, }, "c": { 'command': "echo c >> " + join(str(tmpdir), "foo"), 'after': {"action:b"}, }, "d": { 'command': "echo d >> " + join(str(tmpdir), "foo"), 'needs': {"action:b"}, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 1 assert b"skipped" in stdout with open(join(str(tmpdir), "foo")) as f: assert f.read() in ("c\nd\n", "d\nc\n") bundlewrap-4.13.6/tests/integration/bw_apply_directories.py000066400000000000000000000065061417502274300242340ustar00rootroot00000000000000from os import mkdir from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_purge(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "purgedir", "managed_file"): { 'content': "content", }, join(str(tmpdir), "purgedir", "subdir1", "managed_file"): { 'content': "content", }, }, 'directories': { join(str(tmpdir), "purgedir"): { 'purge': True, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) mkdir(join(str(tmpdir), "purgedir")) mkdir(join(str(tmpdir), "purgedir", "subdir2")) mkdir(join(str(tmpdir), "purgedir", "subdir3")) with open(join(str(tmpdir), "purgedir", "unmanaged_file"), 'w') as f: f.write("content") with open(join(str(tmpdir), "purgedir", "subdir3", "unmanaged_file"), 'w') as f: f.write("content") run("bw apply localhost", path=str(tmpdir)) assert not exists(join(str(tmpdir), "purgedir", "unmanaged_file")) assert not exists(join(str(tmpdir), "purgedir", "subdir3", "unmanaged_file")) assert not exists(join(str(tmpdir), "purgedir", "subdir2")) assert exists(join(str(tmpdir), "purgedir", "subdir1", "managed_file")) assert exists(join(str(tmpdir), "purgedir", "managed_file")) def test_purge_special_chars(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "purgedir", "mänäged_file"): { 'content': "content", }, join(str(tmpdir), "purgedir", "managed_`id`_file"): { 'content': "content", }, }, 'directories': { join(str(tmpdir), "purgedir"): { 'purge': True, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) mkdir(join(str(tmpdir), "purgedir")) with open(join(str(tmpdir), "purgedir", "unmänäged_file"), 'w') as f: f.write("content") with open(join(str(tmpdir), "purgedir", "unmanaged_`uname`_file"), 'w') as f: f.write("content") with open(join(str(tmpdir), "purgedir", "unmanaged_:'_file"), 'w') as f: f.write("content") run("bw apply localhost", path=str(tmpdir)) assert not exists(join(str(tmpdir), "purgedir", "unmänäged_file")) assert not exists(join(str(tmpdir), "purgedir", "unmanaged_`uname`_file")) assert not exists(join(str(tmpdir), "purgedir", "unmanaged_:'_file")) assert exists(join(str(tmpdir), "purgedir", "mänäged_file")) assert exists(join(str(tmpdir), "purgedir", "managed_`id`_file")) bundlewrap-4.13.6/tests/integration/bw_apply_files.py000066400000000000000000000153121417502274300230150ustar00rootroot00000000000000from base64 import b64encode from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_any_content_create(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'any', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"" def test_any_content_exists(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'any', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "foo"), 'wb') as f: f.write(b"existing content") run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"existing content" def test_binary_inline_content(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo.bin"): { 'content_type': 'base64', 'content': b64encode("ö".encode('latin-1')), }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo.bin"), 'rb') as f: content = f.read() assert content.decode('latin-1') == "ö" def test_binary_template_content(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo.bin"): { 'encoding': 'latin-1', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "bundles", "test", "files", "foo.bin"), 'wb') as f: f.write("ö".encode('utf-8')) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo.bin"), 'rb') as f: content = f.read() assert content.decode('latin-1') == "ö" def test_delete(tmpdir): with open(join(str(tmpdir), "foo"), 'w') as f: f.write("foo") make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'delete': True, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) assert not exists(join(str(tmpdir), "foo")) def test_mako_template_content(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'mako', 'content': "${node.name}", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"localhost" def test_mako_template_content_with_secret(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'mako', 'content': "${repo.vault.password_for('testing')}", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"faCTT76kagtDuZE5wnoiD1CxhGKmbgiX" def test_text_template_content(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'text', 'content': "${node.name}", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "foo"), 'rb') as f: content = f.read() assert content == b"${node.name}" def test_fault_content_unavailable_skipped(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': {}, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: f.write(""" files = { "/tmp/bw_test_faultunavailable": { 'content': repo.vault.password_for("fault", key="missing"), }, } """) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert b"file:/tmp/bw_test_faultunavailable skipped (Fault unavailable)" in stdout assert not exists("/tmp/bw_test_faultunavailable") bundlewrap-4.13.6/tests/integration/bw_apply_git_deploy.py000066400000000000000000000040471417502274300240550ustar00rootroot00000000000000from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_deploy_from_url(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'git_deploy': { join(str(tmpdir), "git_deployed_bw"): { 'repo': "https://github.com/bundlewrap/bundlewrap.git", 'rev': "main", }, }, 'directories': { join(str(tmpdir), "git_deployed_bw"): {}, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) assert not exists(join(str(tmpdir), "git_deployed_bw", "LICENSE")) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert exists(join(str(tmpdir), "git_deployed_bw", "LICENSE")) assert not exists(join(str(tmpdir), "git_deployed_bw", ".git")) def test_cannot_deploy_into_purged(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'git_deploy': { join(str(tmpdir), "git_deployed_bw"): { 'repo': "https://github.com/bundlewrap/bundlewrap.git", 'rev': "main", }, }, 'directories': { join(str(tmpdir), "git_deployed_bw"): { 'purge': True, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 1 assert b"cannot git_deploy into purged directory" in stderr bundlewrap-4.13.6/tests/integration/bw_apply_postgres.py000066400000000000000000000033671417502274300235700ustar00rootroot00000000000000from json import loads from os import environ from bundlewrap.utils.testing import host_os, make_repo, run if environ.get('TRAVIS') == "true": def test_create(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'postgres_dbs': { "bw-test1": { 'owner': "bw-test1", }, }, 'postgres_roles': { "bw-test1": { 'superuser': True, 'password': 'potato', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 stdout, stderr, rcode = run("bw items --state localhost postgres_db:bw-test1", path=str(tmpdir)) assert rcode == 0 assert loads(stdout.decode()) == {'owner': "bw-test1"} stdout, stderr, rcode = run("bw items --state localhost postgres_role:bw-test1", path=str(tmpdir)) assert rcode == 0 assert loads(stdout.decode()) == { 'can_login': True, 'password_hash': "md5ecba3aec62c5aabf6480de6352182004", 'superuser': True, } stdout, stderr, rcode = run("dropdb bw-test1", path=str(tmpdir)) assert rcode == 0 stdout, stderr, rcode = run("dropuser bw-test1", path=str(tmpdir)) assert rcode == 0 bundlewrap-4.13.6/tests/integration/bw_apply_precedes.py000066400000000000000000000164521417502274300235130ustar00rootroot00000000000000from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_precedes(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["tag:tag1"], }, }, 'actions': { "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'needs': ["action:action2"], }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "1\n2\n3\n" def test_precedes_unless(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["tag:tag1"], }, }, 'actions': { "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'unless': 'true', }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'needs': ["action:action2"], }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "1\n3\n" def test_precedes_unless2(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["tag:tag1"], }, }, 'actions': { "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'unless': 'true', }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'needs': ["action:action2"], 'unless': 'true', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) assert not exists(join(str(tmpdir), "file")) def test_precedes_unless3(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["tag:tag1"], 'unless': 'true', }, }, 'actions': { "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'tags': ["tag1"], 'needs': ["action:action2"], }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "2\n3\n" def test_precedes_unless4(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "file"): { 'content': "1\n", 'triggered': True, 'precedes': ["action:action3"], }, }, 'actions': { "action2": { 'command': "false", 'needs': ["file:{}".format(join(str(tmpdir), "file"))], }, "action3": { 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), 'needs': ["action:action2"], }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "1\n" def test_precedes_action(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "action1": { 'command': "echo 1 > {}".format(join(str(tmpdir), "file")), 'precedes': ["action:action2"], 'triggered': True, }, "action2": { 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "file")) as f: content = f.read() assert content == "1\n2\n" bundlewrap-4.13.6/tests/integration/bw_apply_secrets.py000066400000000000000000000134471417502274300233720ustar00rootroot00000000000000from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_fault_content(tmpdir): make_repo( tmpdir, bundles={ "test": {}, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: f.write(""" files = {{ "{}": {{ 'content': repo.vault.password_for("test"), }}, }} """.format(join(str(tmpdir), "secret"))) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "secret")) as f: content = f.read() assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" def test_fault_content_mako(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "secret"): { 'content': "${repo.vault.password_for('test')}", 'content_type': 'mako', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "secret")) as f: content = f.read() assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" def test_fault_content_mako_metadata(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "secret"): { 'content': "${node.metadata['secret']}", 'content_type': 'mako', }, }, }, }, }, ) with open(join(str(tmpdir), "nodes.py"), 'w') as f: f.write(""" nodes = {{ "localhost": {{ 'bundles': ["test"], 'metadata': {{'secret': vault.password_for("test")}}, 'os': "{}", }}, }} """.format(host_os())) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "secret")) as f: content = f.read() assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" def test_fault_content_jinja2(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "secret"): { 'content': "{{ repo.vault.password_for('test') }}", 'content_type': 'jinja2', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) run("bw apply localhost", path=str(tmpdir)) with open(join(str(tmpdir), "secret")) as f: content = f.read() assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" def test_fault_content_skipped(tmpdir): make_repo( tmpdir, bundles={ "test": {}, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: f.write(""" files = {{ "{}": {{ 'content': repo.vault.password_for("test", key='unavailable'), }}, }} """.format(join(str(tmpdir), "secret"))) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert not exists(join(str(tmpdir), "secret")) def test_fault_content_skipped_mako(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "secret"): { 'content': "${repo.vault.password_for('test', key='unavailable')}", 'content_type': 'mako', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert not exists(join(str(tmpdir), "secret")) def test_fault_content_skipped_jinja2(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "secret"): { 'content': "{{ repo.vault.password_for('test', key='unavailable') }}", 'content_type': 'jinja2', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) def test_fault_content_error(tmpdir): make_repo( tmpdir, bundles={ "test": {}, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: f.write(""" files = {{ "{}": {{ 'content': repo.vault.password_for("test", key='unavailable'), 'error_on_missing_fault': True, }}, }} """.format(join(str(tmpdir), "secret"))) stdout, stderr, rcode = run("bw -d apply localhost", path=str(tmpdir)) print(stdout) assert rcode == 1 bundlewrap-4.13.6/tests/integration/bw_apply_skip_attr.py000066400000000000000000000035761417502274300237240ustar00rootroot00000000000000from os.path import exists, join from bundlewrap.utils.testing import host_os, make_repo, run def test_skip_attr_no_cascade(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "nope", 'skip': True, }, join(str(tmpdir), "bar"): { 'content': "yes", 'needs': {"file:" + join(str(tmpdir), "foo")}, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert not exists(join(str(tmpdir), "foo")) assert exists(join(str(tmpdir), "bar")) assert b"attribute" in stdout def test_skip_attr_action(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'actions': { "foo": { 'command': "true", 'skip': True, }, "bar": { 'command': "true", 'needs': {"action:foo"}, }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert b"succeeded" in stdout assert b"attribute" in stdout bundlewrap-4.13.6/tests/integration/bw_apply_symlinks.py000066400000000000000000000057271417502274300235750ustar00rootroot00000000000000from os import mkdir, readlink, symlink from os.path import join from bundlewrap.utils.testing import host_os, make_repo, run def test_create(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'symlinks': { join(str(tmpdir), "foo"): { 'target': "/dev/null", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert readlink(join(str(tmpdir), "foo")) == "/dev/null" def test_fix(tmpdir): symlink(join(str(tmpdir), "bar"), join(str(tmpdir), "foo")) make_repo( tmpdir, bundles={ "test": { 'items': { 'symlinks': { join(str(tmpdir), "foo"): { 'target': "/dev/null", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert readlink(join(str(tmpdir), "foo")) == "/dev/null" def test_fix_dir(tmpdir): mkdir(join(str(tmpdir), "foo")) make_repo( tmpdir, bundles={ "test": { 'items': { 'symlinks': { join(str(tmpdir), "foo"): { 'target': "/dev/null", }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert readlink(join(str(tmpdir), "foo")) == "/dev/null" def test_fix_dir_target(tmpdir): mkdir(join(str(tmpdir), "dir1")) mkdir(join(str(tmpdir), "dir2")) symlink(join(str(tmpdir), "dir1"), join(str(tmpdir), "link")) make_repo( tmpdir, bundles={ "test": { 'items': { 'symlinks': { join(str(tmpdir), "link"): { 'target': join(str(tmpdir), "dir2"), }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 assert readlink(join(str(tmpdir), "link")) == join(str(tmpdir), "dir2") bundlewrap-4.13.6/tests/integration/bw_apply_tags.py000066400000000000000000000037451417502274300226600ustar00rootroot00000000000000from bundlewrap.utils.testing import host_os, make_repo, run def test_empty_tags(tmpdir): make_repo( tmpdir, nodes={ "localhost": { 'bundles': ["bundle1"], 'os': host_os(), }, }, bundles={ "bundle1": { 'attrs': { 'tags': { "empty": { 'needs': {"action:early"}, }, }, }, 'items': { 'actions': { "early": { 'command': "true", }, "late": { 'command': "true", 'needs': {"tag:empty"}, }, }, }, }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 0 def test_empty_tag_loop(tmpdir): make_repo( tmpdir, nodes={ "localhost": { 'bundles': ["bundle1"], 'os': host_os(), }, }, bundles={ "bundle1": { 'attrs': { 'tags': { "1": { 'needs': {"tag:2"}, }, "2": { 'tags': {"3"}, }, "3": { 'needs': {"tag:1"}, }, }, }, 'items': { 'actions': { "early": { 'command': "true", }, }, }, }, }, ) stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) assert rcode == 1 bundlewrap-4.13.6/tests/integration/bw_diff.py000066400000000000000000000047571417502274300214310ustar00rootroot00000000000000from bundlewrap.utils.testing import make_repo, run def test_metadata(tmpdir): make_repo( tmpdir, nodes={ "node1": {'metadata': {"key": "value1"}}, "node2": {'metadata': {"key": "value2"}}, }, ) stdout, stderr, rcode = run("bw diff -m node1 node2", path=str(tmpdir)) assert b"value1" in stdout assert b"value2" in stdout assert stderr == b"" assert rcode == 0 def test_file_items(tmpdir): make_repo( tmpdir, nodes={ "node1": {'bundles': ["bundle1"]}, "node2": {'bundles': ["bundle2"]}, }, bundles={ "bundle1": { 'items': { "files": { "/tmp/test": { 'content': "one", }, }, }, }, "bundle2": { 'items': { "files": { "/tmp/test": { 'content': "two", }, }, }, }, }, ) stdout, stderr, rcode = run("bw diff -i file:/tmp/test -- node1 node2", path=str(tmpdir)) assert b"one" in stdout assert b"two" in stdout assert stderr == b"" assert rcode == 0 def test_whole_node(tmpdir): make_repo( tmpdir, nodes={ "node1": {'bundles': ["bundle1", "bundle3"]}, "node2": {'bundles': ["bundle2", "bundle3"]}, }, bundles={ "bundle1": { 'items': { "files": { "/tmp/foo": { 'content': "one", }, }, }, }, "bundle2": { 'items': { "files": { "/tmp/foo": { 'content': "two", }, }, }, }, "bundle3": { 'items': { "files": { "/tmp/bar": { 'content': "common", }, }, }, }, }, ) stdout, stderr, rcode = run("bw diff node1 node2", path=str(tmpdir)) assert b"/tmp/foo" in stdout assert b"/tmp/bar" not in stdout assert stderr == b"" assert rcode == 0 bundlewrap-4.13.6/tests/integration/bw_groups.py000066400000000000000000000077311417502274300220330ustar00rootroot00000000000000from bundlewrap.utils.testing import make_repo, run def test_group_members(tmpdir): make_repo( tmpdir, nodes={ "node1": {}, "node2": {}, "node3": {}, }, groups={ "group1": {}, "group2": { 'members': {"node2"}, }, "group3": { 'members': {"node2", "node3"}, }, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1 group2 group3 -a nodes", path=str(tmpdir)) assert stdout == b"""group1\t group2\tnode2 group3\tnode2,node3 """ assert stderr == b"" assert rcode == 0 def test_group_members_at_node(tmpdir): make_repo( tmpdir, nodes={ "node1": {'groups': ["group1", "group2"]}, "node2": {'groups': ["group1"]}, "node3": {'groups': []}, }, groups={ "group1": {}, "group2": {}, "group3": {}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1 group2 group3 -a nodes", path=str(tmpdir)) assert stdout == b"""group1\tnode1,node2 group2\tnode1 group3\t """ assert stderr == b"" assert rcode == 0 def test_supergroups(tmpdir): make_repo( tmpdir, groups={ "group1": {}, "group2": {'supergroups': {"group1"}}, "group3": {'supergroups': {"group1"}}, "group4": {}, "group5": {'subgroups': {"group1"}}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups group1", path=str(tmpdir)) assert stdout == b"""group1 group2 group3 """ assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups group2", path=str(tmpdir)) assert stdout == b"group2\n" assert stderr == b"" assert rcode == 0 def test_supergroups_indirect(tmpdir): make_repo( tmpdir, groups={ "group1": {}, "group2": {'supergroups': {"group1"}}, "group3": {'supergroups': {"group2"}}, "group4": {}, "group5": {'subgroups': {"group1"}}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups group1", path=str(tmpdir)) assert stdout == b"""group1 group2 group3 """ assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups group2", path=str(tmpdir)) assert stdout == b"""group2 group3 """ assert stderr == b"" assert rcode == 0 def test_supergroups_loop(tmpdir): make_repo( tmpdir, groups={ "group1": {'supergroups': {"group2"}}, "group2": {'supergroups': {"group1"}}, "group3": {}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups group1", path=str(tmpdir)) assert b"group1" in stderr assert b"group2" in stderr assert b"group3" not in stderr assert rcode == 1 def test_supergroups_loop_thru_subgroup(tmpdir): make_repo( tmpdir, groups={ "group1": { 'subgroups': {"group2"}, 'supergroups': {"group3"}, }, "group2": {'subgroups': {"group3"}}, "group3": {}, "group4": {}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups group1", path=str(tmpdir)) assert b"group1" in stderr assert b"group2" in stderr assert b"group3" in stderr assert b"group4" not in stderr assert rcode == 1 def test_supergroups_redundant(tmpdir): make_repo( tmpdir, groups={ "group1": {'subgroups': {"group2"}}, "group2": {'supergroups': {"group1"}}, "group3": {}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups group1", path=str(tmpdir)) assert b"group1" in stderr assert b"group2" in stderr assert b"group3" not in stderr assert rcode == 1 bundlewrap-4.13.6/tests/integration/bw_hash.py000066400000000000000000000210421417502274300214260ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import make_repo, run def test_empty(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw hash", path=str(tmpdir)) assert stdout == b"bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f\n" assert stderr == b"" def test_nondeterministic(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { 'files': { "/test": { 'content_type': 'mako', 'content': "<% import random %>${random.randint(1, 9999)}", }, }, }, }, }, ) hashes = set() for i in range(3): stdout, stderr, rcode = run("bw hash", path=str(tmpdir)) hashes.add(stdout.strip()) assert len(hashes) > 1 def test_deterministic(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { 'files': { "/test": { 'content': "${node.name}", 'group': None, # BSD has a different default and we don't want to # deal with that here }, }, }, }, }, ) hashes = set() for i in range(3): stdout, stderr, rcode = run("bw hash", path=str(tmpdir)) hashes.add(stdout.strip()) assert len(hashes) == 1 assert hashes.pop() == b"2203e7acc35608bbff471c023b7b7498e5b385d9" def test_dict(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { 'files': { "/test": { 'content': "yes please", 'group': None, # BSD has a different default and we don't want to # deal with that here }, }, }, }, }, ) stdout, stderr, rcode = run("bw hash -d", path=str(tmpdir)) assert rcode == 0 assert stdout == b"93e7a2c6e8cdc71fb4df5426bc0d0bb978d84381 node1\n" stdout, stderr, rcode = run("bw hash -d node1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"59d1a7c79640ccdfd3700ab141698a9389fcd0b7 file:/test\n" stdout, stderr, rcode = run("bw hash -d node1 file:/test", path=str(tmpdir)) assert rcode == 0 assert stdout == ( b"content_hash\tc05a36d547e2b1682472f76985018038d1feebc5\n" b"mode\t0644\n" b"owner\troot\n" b"type\tfile\n" ) def test_metadata_empty(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': {}, }, }, ) stdout, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f\n" def test_metadata_fault(tmpdir): make_repo(tmpdir) with open(join(str(tmpdir), "nodes.py"), 'w') as f: f.write(""" nodes = { 'node1': { 'metadata': {'foo': vault.password_for("testing")}, }, 'node2': { 'metadata': {'foo': vault.password_for("testing").value}, }, 'node3': { 'metadata': {'foo': "faCTT76kagtDuZE5wnoiD1CxhGKmbgiX"}, }, 'node4': { 'metadata': {'foo': "something else entirely"}, }, } """) stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) assert stdout1 == b"b60c0959c9c1ff38940d7b6d4121b2162be34fc9\n" assert stderr == b"" assert rcode == 0 stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir)) assert stdout2 == stdout1 assert stderr == b"" assert rcode == 0 stdout3, stderr, rcode = run("bw hash -m node3", path=str(tmpdir)) assert stdout3 == stdout1 assert stderr == b"" assert rcode == 0 stdout4, stderr, rcode = run("bw hash -m node4", path=str(tmpdir)) assert stdout4 != stdout1 assert stderr == b"" assert rcode == 0 def test_metadata_nested_sort(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { 'nested': { 'one': True, 'two': False, 'three': 3, 'four': "four", 'five': None, }, }, }, "node2": { 'metadata': { 'nested': { 'five': None, 'four': "four", 'one': True, 'three': 3, 'two': False, }, }, }, }, ) stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) assert rcode == 0 assert stdout1 == b"d96dc8da8948d0da7924954a657ac960ce7194e9\n" stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir)) assert rcode == 0 assert stdout1 == stdout2 def test_metadata_repo(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { 'foo': 47, }, }, }, ) stdout, stderr, rcode = run("bw hash -m", path=str(tmpdir)) assert rcode == 0 assert stdout == b"8c4a30eaa521c966c678d6e51070f6b3a34b7322\n" def test_metadata_repo_dict(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { 'foo': 47, }, }, }, ) stdout, stderr, rcode = run("bw hash -md", path=str(tmpdir)) assert rcode == 0 assert stdout == b"node1\t223fb72805ecab20f92b463af65896303f997f1c\n" def test_groups_repo(tmpdir): make_repo( tmpdir, groups={ "group1": {}, "group2": {}, }, ) stdout, stderr, rcode = run("bw hash -g", path=str(tmpdir)) assert rcode == 0 assert stdout == b"479c737e191339e5fae20ac8a8903a75f6b91f4d\n" def test_groups_repo_dict(tmpdir): make_repo( tmpdir, groups={ "group1": {}, "group2": {}, }, ) stdout, stderr, rcode = run("bw hash -dg", path=str(tmpdir)) assert rcode == 0 assert stdout == b"group1\ngroup2\n" def test_groups(tmpdir): make_repo( tmpdir, groups={ "group1": {'members': ["node1", "node2"]}, "group2": {'members': ["node3"]}, }, nodes={ "node1": {}, "node2": {}, "node3": {}, }, ) stdout, stderr, rcode = run("bw hash -g group1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"59f5a812acd22592b046b20e9afedc1cfcd37c77\n" def test_groups_dict(tmpdir): make_repo( tmpdir, groups={ "group1": {}, "group2": {}, }, nodes={ "node1": {'groups': {"group1"}}, "node2": {'groups': {"group1"}}, "node3": {'groups': {"group2"}}, }, ) stdout, stderr, rcode = run("bw hash -dg group1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"node1\nnode2\n" def test_groups_node(tmpdir): make_repo( tmpdir, groups={ "group1": {}, "group2": {}, }, nodes={ "node1": {'groups': {"group1"}}, "node2": {'groups': {"group1"}}, "node3": {'groups': {"group2"}}, }, ) stdout, stderr, rcode = run("bw hash -g node1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"6f4615dc71426549e22df7961bd2b88ba95ad1fc\n" def test_groups_node_dict(tmpdir): make_repo( tmpdir, groups={ "group1": {}, "group2": {}, }, nodes={ "node1": {'groups': {"group1"}}, "node2": {'groups': {"group1"}}, "node3": {'groups': {"group2"}}, }, ) stdout, stderr, rcode = run("bw hash -dg node1", path=str(tmpdir)) assert rcode == 0 assert stdout == b"group1\n" bundlewrap-4.13.6/tests/integration/bw_items.py000066400000000000000000000154421417502274300216330ustar00rootroot00000000000000from bundlewrap.utils.testing import make_repo, run def test_file_preview(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { 'files': { "/test": { 'content': "föö", 'encoding': 'latin-1', }, }, }, }, }, ) stdout, stderr, rcode = run("bw items -f node1 file:/test", path=str(tmpdir)) assert stdout == "föö".encode('utf-8') # our output is always utf-8 assert rcode == 0 def test_multiple_file_preview(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { 'files': { "/test": { 'content': "föö", }, "/testdir/test2": { 'content': "bar", }, }, }, }, }, ) stdout, stderr, rcode = run("bw items -w itemprev node1", path=str(tmpdir)) assert rcode == 0 assert tmpdir.join("itemprev/test").exists() assert tmpdir.join("itemprev/testdir/test2").exists() def test_fault_unavailable(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { 'files': { "/test": { 'content': "${repo.vault.password_for('test', key='404')}", 'content_type': 'mako', }, }, }, }, }, ) stdout, stderr, rcode = run("bw items -f node1 file:/test", path=str(tmpdir)) assert rcode == 1 def test_fault_unavailable_multiple(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { 'files': { "/test": { 'content': "föö", }, "/testdir/test3": { 'content': "${repo.vault.password_for('test', key='404')}", 'content_type': 'mako', }, }, }, }, }, ) stdout, stderr, rcode = run("bw items -w itemprev node1", path=str(tmpdir)) assert rcode == 0 assert tmpdir.join("itemprev/test").exists() assert not tmpdir.join("itemprev/testdir/test3").exists() def test_tag_inheritance(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'attrs': { 'tags': { "directly": { 'tags': {"inherited"}, }, }, }, 'items': { 'actions': { "test": { 'command': "true", 'tags': {"directly"}, }, }, }, }, }, ) stdout, stderr, rcode = run("bw items --attrs node1 action:test", path=str(tmpdir)) assert rcode == 0 assert "inherited" in stdout.decode() def test_tag_inheritance_loop(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'attrs': { 'tags': { "directly": { 'tags': {"inherited"}, }, "inherited": { 'tags': {"looped"}, }, "looped": { 'tags': {"inherited"}, 'needs': {"action:dep"}, }, }, }, 'items': { 'actions': { "test": { 'command': "true", 'tags': {"directly"}, }, "dep": { 'command': "true", }, }, }, }, }, ) stdout, stderr, rcode = run("bw items --attrs node1 action:test", path=str(tmpdir)) assert rcode == 0 assert "inherited" in stdout.decode() assert "looped" in stdout.decode() assert "action:dep" in stdout.decode() def test_duplicate_items(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1", "bundle2"], }, }, bundles={ "bundle1": { 'items': { 'actions': { "dupl": { 'command': "true", }, }, }, }, "bundle2": { 'items': { 'actions': { "dupl": { 'command': "true", }, }, }, }, }, ) stdout, stderr, rcode = run("bw items node1", path=str(tmpdir)) assert rcode == 1 assert "action:dupl" in stderr.decode() assert "bundle1" in stderr.decode() assert "bundle2" in stderr.decode() def test_show_auto_needs(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { 'directories': { "/foo": {}, "/foo/bar": {'needs': {"action:"}}, }, }, }, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw items --attr node1 directory:/foo/bar needs", path=str(tmpdir)) assert stdout.decode() == """attribute\tvalue needs\taction: needs\tdirectory:/foo """ assert stderr.decode() == "" assert rcode == 0 bundlewrap-4.13.6/tests/integration/bw_lock.py000066400000000000000000000024171417502274300214400ustar00rootroot00000000000000from re import search from bundlewrap.utils.testing import host_os, make_repo, run def get_lock_id(output): return search(r"locked with ID (\w+) ", output).groups()[0] def test_add_lock_apply_remove(tmpdir): make_repo( tmpdir, nodes={ "localhost": { 'bundles': ["bundle1"], 'os': host_os(), }, }, bundles={ "bundle1": { 'items': { 'files': { "/tmp/bw_test_lock_add": { 'content': "foo", }, }, }, }, }, ) run("sudo rm -f /tmp/bw_test_lock_add") stdout, stderr, rcode = run("BW_IDENTITY=jdoe bw lock add -c höhöhö -e 1m -i file:/tmp/bw_test_lock_add -- localhost", path=str(tmpdir)) assert rcode == 0 lock_id = get_lock_id(stdout.decode('utf-8')) assert len(lock_id) == 4 stdout, stderr, rcode = run("bw -d apply localhost", path=str(tmpdir)) assert rcode == 0 stdout, stderr, rcode = run("cat /tmp/bw_test_lock_add", path=str(tmpdir)) assert rcode != 0 stdout, stderr, rcode = run("bw lock remove localhost {}".format(lock_id), path=str(tmpdir)) assert rcode == 0 bundlewrap-4.13.6/tests/integration/bw_metadata.py000066400000000000000000000350371417502274300222740ustar00rootroot00000000000000from json import loads from os.path import join from bundlewrap.utils.testing import make_repo, run def test_empty(tmpdir): make_repo( tmpdir, nodes={ "node1": {}, }, ) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert stdout == b"{}\n" assert stderr == b"" assert rcode == 0 def test_simple(tmpdir): make_repo( tmpdir, nodes={ "node1": {'metadata': {"foo": "bar"}}, }, ) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == {"foo": "bar"} assert stderr == b"" assert rcode == 0 def test_object(tmpdir): make_repo(tmpdir) with open(join(str(tmpdir), "nodes.py"), 'w') as f: f.write("nodes = {'node1': {'metadata': {'foo': object}}}") stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode == 1 def test_merge(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'groups': {"group1"}, 'metadata': { "foo": { "bar": "baz", }, }, }, }, groups={ "group1": { 'metadata': { "ding": 5, "foo": { "bar": "ZAB", "baz": "bar", }, }, }, }, ) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "ding": 5, "foo": { "bar": "baz", "baz": "bar", }, } assert stderr == b"" assert rcode == 0 def test_metadatapy(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': { "foo": { "bar": "shizzle", }, }, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_reactor def foo(metadata): return { "baz": node.name, "frob": metadata.get("foo/bar", "shnozzle") + "ay", "gob": metadata.get("shlop", "mop"), } """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "baz": "node1", "foo": { "bar": "shizzle", }, "frob": "shizzleay", "gob": "mop", } assert stderr == b"" assert rcode == 0 def test_metadatapy_defaults(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """defaults = { "baz": node.name, "foo": "baz", } """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "baz": "node1", "foo": "bar", } assert stderr == b"" assert rcode == 0 def test_metadatapy_defaults_atomic(tmpdir): make_repo( tmpdir, bundles={"test": {}}, ) with open(join(str(tmpdir), "nodes.py"), 'w') as f: f.write( """ from bundlewrap.metadata import atomic nodes = { "node1": { 'bundles': ["test"], 'metadata': {"foo": atomic({"bar": "baz"})}, }, } """) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """defaults = { "foo": { "bar": "frob", "baz": "gobble", }, } """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "foo": {"bar": "baz"}, } assert stderr == b"" assert rcode == 0 def test_metadatapy_update(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {"foo": "bar"}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_reactor def foo(metadata): return { "baz": "foo", "foo": "baz", } """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "baz": "foo", "foo": "bar", } assert stderr == b"" assert rcode == 0 def test_table(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'metadata': { "foo_dict": { "bar": "baz", }, "foo_list": ["bar", 1], "foo_int": 47, "foo_umlaut": "föö", }, }, "node2": { 'metadata': { "foo_dict": { "baz": "bar", }, "foo_list": [], "foo_int": -3, "foo_umlaut": "füü", }, }, }, groups={"all": {'member_patterns': {r".*"}}}, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw metadata all -k foo_dict/bar foo_list foo_int foo_umlaut", path=str(tmpdir)) assert stdout.decode('utf-8') == """node\tfoo_dict/bar\tfoo_int\tfoo_list\tfoo_umlaut node1\tbaz\t47\tbar, 1\tföö node2\t\t-3\t\tfüü """ assert stderr == b"" assert rcode == 0 def test_metadatapy_merge_order(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'groups': {"group1"}, 'metadata': { "four": "node", }, }, }, groups={ "group1": { 'metadata': { "three": "group", "four": "group", }, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """defaults = { "one": "defaults", "two": "defaults", "three": "defaults", "four": "defaults", } @metadata_reactor def foo_reactor(metadata): return { "two": "reactor", "three": "reactor", "four": "reactor", } """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "one": "defaults", "two": "reactor", "three": "group", "four": "node", } assert stderr == b"" assert rcode == 0 def test_metadatapy_static_reorder(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': { "foo": "bar", "frob": "flup", }, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """@metadata_reactor def foo_reactor(metadata): return { "foo": "overwritten", "baz": metadata.get("frob"), } """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "foo": "bar", "frob": "flup", "baz": "flup", } assert stderr == b"" assert rcode == 0 def test_metadatapy_reactor_keyerror_from_metastack(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor def foo_reactor(metadata): return {'foo': metadata.get('bar')} """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode == 1 assert b"node1" in stderr assert b"foo_reactor" in stderr assert b"'bar'" in stderr def test_metadatapy_reactor_keyerror_from_dict(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor def foo_reactor(metadata): x = {}['baz'] return {'x': x} """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode == 1 assert b"node1" in stderr assert b"foo_reactor" in stderr assert b"'baz'" in stderr def test_metadatapy_reactor_keyerror_fixed(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor def one(metadata): return {'one': True} @metadata_reactor def two(metadata): return {'two': metadata.get('one')} @metadata_reactor def three(metadata): return {'three': metadata.get('two')} @metadata_reactor def four(metadata): return {'four': metadata.get('three')} """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "one": True, "two": True, "three": True, "four": True, } assert stderr == b"" assert rcode == 0 def test_metadatapy_infinite_loop(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor def plusone(metadata): return {'foo': metadata.get('bar', 0) + 1 } @metadata_reactor def plustwo(metadata): return {'bar': metadata.get('foo', 0) + 2 } """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode == 1 def test_metadatapy_no_self_react(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor def reactor1(metadata): assert not metadata.get('broken', False) return {'broken': True} @metadata_reactor def reactor2(metadata): # just to make sure reactor1 runs again return {'again': True} """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "broken": True, "again": True, } def test_own_node_metadata(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {'number': 47}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor def reactor1(metadata): return {'plusone': node.metadata.get('number') + 1} """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "number": 47, "plusone": 48, } def test_other_node_metadata(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], 'metadata': {'number': 47}, }, "node2": { 'bundles': ["test"], 'metadata': {'number': 42}, }, "node3": { 'bundles': ["test"], 'metadata': {'number': 23}, }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor def reactor1(metadata): numbers = set() for n in repo.nodes: if n != node: numbers.add(n.metadata.get('number')) return {'other_numbers': numbers} """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert loads(stdout.decode()) == { "number": 47, "other_numbers": [23, 42], } stdout, stderr, rcode = run("bw metadata node2", path=str(tmpdir)) assert loads(stdout.decode()) == { "number": 42, "other_numbers": [23, 47], } stdout, stderr, rcode = run("bw metadata node3", path=str(tmpdir)) assert loads(stdout.decode()) == { "number": 23, "other_numbers": [42, 47], } def test_reactor_provides_not_run(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor.provides('foo') def reactor1(metadata): return {'foo': 1} @metadata_reactor.provides('bar') def reactor2(metadata): assert False """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode == 1 assert "AssertionError" in stderr.decode() stdout, stderr, rcode = run("bw metadata node1 -k foo", path=str(tmpdir)) assert rcode == 0 assert loads(stdout.decode()) == { 'foo': 1, } def test_reactor_provides_chain(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor.provides('foo') def reactor1(metadata): return { 'foo': { 'baz': metadata.get('bar'), }, } @metadata_reactor.provides('bar') def reactor2(metadata): return {'bar': 2} @metadata_reactor.provides('something irrelevant') def reactor3(metadata): assert False @metadata_reactor def reactor4(metadata): return {} """) stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) assert rcode == 1 assert "AssertionError" in stderr.decode() stdout, stderr, rcode = run("bw metadata node1 -k foo", path=str(tmpdir)) assert rcode == 0 assert loads(stdout.decode()) == { 'foo': { 'baz': 2, }, } bundlewrap-4.13.6/tests/integration/bw_nodes.py000066400000000000000000000042231417502274300216150ustar00rootroot00000000000000from bundlewrap.utils.testing import make_repo, run def test_empty(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw nodes", path=str(tmpdir)) assert stdout == b"" assert stderr == b"" assert rcode == 0 def test_single(tmpdir): make_repo(tmpdir, nodes={"node1": {}}) stdout, stderr, rcode = run("bw nodes", path=str(tmpdir)) assert stdout == b"node1\n" assert stderr == b"" assert rcode == 0 def test_hostname(tmpdir): make_repo( tmpdir, groups={"all": {'member_patterns': {r".*"}}}, nodes={"node1": {'hostname': "node1.example.com"}}, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all -a hostname | cut -f 2", path=str(tmpdir)) assert stdout == b"node1.example.com\n" assert stderr == b"" assert rcode == 0 def test_bundles(tmpdir): make_repo( tmpdir, bundles={ "bundle1": {}, "bundle2": {}, }, groups={"all": {'member_patterns': {r".*"}}}, nodes={ "node1": {'bundles': ["bundle1", "bundle2"]}, "node2": {'bundles': ["bundle2"]}, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all -a bundles | grep node1 | cut -f 2", path=str(tmpdir)) assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2"] assert stderr == b"" assert rcode == 0 def test_bundles_via_group(tmpdir): make_repo( tmpdir, bundles={ "bundle1": {}, "bundle2": {}, "bundle3": {}, }, groups={ "group1": { 'bundles': {"bundle2"}, 'subgroups': {"group2"}, }, "group2": { 'bundles': {"bundle3"}, } }, nodes={ "node1": { 'bundles': {"bundle1"}, 'groups': {"group2"}, }, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 -a bundles | cut -f 2", path=str(tmpdir)) assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2", "bundle3"] assert stderr == b"" assert rcode == 0 bundlewrap-4.13.6/tests/integration/bw_plot.py000066400000000000000000000115601417502274300214650ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import make_repo, run def test_groups_for_node(tmpdir): make_repo( tmpdir, nodes={ "node-foo": {'groups': {"group-foo"}}, "node-bar": {}, "node-baz": {}, "node-pop": {'groups': {"group-baz"}}, }, groups={ "group-foo": { 'member_patterns': [r".*-bar"], }, "group-bar": { 'subgroups': ["group-foo"], }, "group-baz": {}, "group-frob": { 'members': {"node-pop"}, }, "group-pop": { 'subgroup_patterns': [r"ba"], }, }, ) stdout, stderr, rcode = run("bw plot groups-for-node node-foo", path=str(tmpdir)) assert stdout == b"""digraph bundlewrap { rankdir = LR node [color="#303030"; fillcolor="#303030"; fontname=Helvetica] edge [arrowhead=vee] "group-bar" [fontcolor=white,style=filled]; "group-foo" [fontcolor=white,style=filled]; "group-pop" [fontcolor=white,style=filled]; "node-foo" [fontcolor="#303030",shape=box,style=rounded]; "group-bar" -> "group-foo" [color="#6BB753",penwidth=2] "group-foo" -> "node-foo" [color="#D18C57",penwidth=2] "group-pop" -> "group-bar" [color="#6BB753",penwidth=2] } """ assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run("bw plot groups-for-node node-pop", path=str(tmpdir)) assert stdout == b"""digraph bundlewrap { rankdir = LR node [color="#303030"; fillcolor="#303030"; fontname=Helvetica] edge [arrowhead=vee] "group-baz" [fontcolor=white,style=filled]; "group-frob" [fontcolor=white,style=filled]; "group-pop" [fontcolor=white,style=filled]; "node-pop" [fontcolor="#303030",shape=box,style=rounded]; "group-baz" -> "node-pop" [color="#D18C57",penwidth=2] "group-frob" -> "node-pop" [color="#D18C57",penwidth=2] "group-pop" -> "group-baz" [color="#6BB753",penwidth=2] } """ assert stderr == b"" assert rcode == 0 def test_empty_tags(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'attrs': { 'tags': { "empty": { 'needs': {"action:early"}, }, }, }, 'items': { 'actions': { "early": { 'command': "true", }, "late": { 'command': "true", 'needs': {"tag:empty"}, }, }, }, }, }, ) stdout, stderr, rcode = run("bw plot node node1", path=str(tmpdir)) assert rcode == 0 assert '"action:late" -> "empty_tag:empty"' in stdout.decode() assert '"empty_tag:empty" -> "action:early"' in stdout.decode() def test_no_empty_tags(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'attrs': { 'tags': { "notempty": { 'needs': {"action:early"}, }, }, }, 'items': { 'actions': { "early": { 'command': "true", }, "middle": { 'command': "true", 'tags': {"notempty"}, }, "late": { 'command': "true", 'needs': {"tag:notempty"}, }, }, }, }, }, ) stdout, stderr, rcode = run("bw plot node node1", path=str(tmpdir)) assert rcode == 0 assert '"action:late" -> "action:middle"' in stdout.decode() assert '"action:middle" -> "action:early"' in stdout.decode() assert "empty_tag" not in stdout.decode() def test_plot_reactors(tmpdir): make_repo( tmpdir, bundles={"test": {}}, nodes={ "node1": { 'bundles': ["test"], }, }, ) with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: f.write( """ @metadata_reactor.provides('foo') def reactor1(metadata): return {'foo': metadata.get('bar')} @metadata_reactor.provides('bar') def reactor2(metadata): return {'bar': 47} """) stdout, stderr, rcode = run("bw plot reactors node1", path=str(tmpdir)) assert rcode == 0 assert "reactor1" in stdout.decode() bundlewrap-4.13.6/tests/integration/bw_pw.py000066400000000000000000000053451417502274300211410ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import make_repo, run def test_encrypt(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw pw -e test", path=str(tmpdir)) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run("bw pw -d '{}'".format(stdout.decode('utf-8').strip()), path=str(tmpdir)) assert stdout == b"test\n" assert stderr == b"" assert rcode == 0 def test_encrypt_different_key_autodetect(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw pw -e -k generate test", path=str(tmpdir)) assert stderr == b"" assert rcode == 0 print(stdout) stdout, stderr, rcode = run("bw pw -d '{}'".format(stdout.decode('utf-8').strip()), path=str(tmpdir)) assert stdout == b"test\n" assert stderr == b"" assert rcode == 0 def test_encrypt_file(tmpdir): make_repo(tmpdir) source_file = join(str(tmpdir), "data", "source") with open(source_file, 'w') as f: f.write("ohai") stdout, stderr, rcode = run( f"bw pw -e -f encrypted \"{source_file}\"", path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw pw -d -f decrypted encrypted", path=str(tmpdir), ) assert stdout == b"" assert stderr == b"" assert rcode == 0 with open(join(tmpdir, "data", "decrypted")) as f: assert f.read() == "ohai" def test_encrypt_file_different_key_autodetect(tmpdir): make_repo(tmpdir) source_file = join(str(tmpdir), "data", "source") with open(source_file, 'w') as f: f.write("ohai") stdout, stderr, rcode = run( f"bw pw -e -f encrypted -k generate \"{source_file}\"", path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw pw -d -f decrypted encrypted", path=str(tmpdir), ) assert stdout == b"" assert stderr == b"" assert rcode == 0 with open(join(tmpdir, "data", "decrypted")) as f: assert f.read() == "ohai" def test_human_password(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw pw -H \"hello world\"", path=str(tmpdir)) assert stdout == b"Xaint-Heep-Pier-Tikl-76\n" assert stderr == b"" assert rcode == 0 def test_random_bytes_as_base64(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw pw -b foo", path=str(tmpdir)) assert stdout == b"rt+Dgv0yA10DS3ux94mmtEg+isChTJvgkfklzmWkvyg=\n" assert stderr == b"" assert rcode == 0 def test_random_bytes_as_base64_length(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw pw -b -l 1 foo", path=str(tmpdir)) assert stdout == b"rg==\n" assert stderr == b"" assert rcode == 0 bundlewrap-4.13.6/tests/integration/bw_repo.py000066400000000000000000000006261417502274300214550ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import make_repo, run def test_not_a_repo_test(tmpdir): assert run("bw nodes", path=str(tmpdir))[2] == 1 def test_subdir_invocation(tmpdir): make_repo(tmpdir, nodes={"node1": {}}) stdout, stderr, rcode = run("bw nodes", path=join(str(tmpdir), "bundles")) assert stdout == b"node1\n" assert stderr == b"" assert rcode == 0 bundlewrap-4.13.6/tests/integration/bw_run.py000066400000000000000000000013621417502274300213120ustar00rootroot00000000000000from bundlewrap.utils.testing import host_os, make_repo, run def test_run_ok(tmpdir): make_repo( tmpdir, nodes={ "localhost": { 'os': host_os(), }, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw run localhost true", path=str(tmpdir)) assert rcode == 0 assert b"localhost\t0" in stdout assert stderr == b"" def test_run_fail(tmpdir): make_repo( tmpdir, nodes={ "localhost": { 'os': host_os(), }, }, ) stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw run localhost false", path=str(tmpdir)) assert rcode == 0 assert b"localhost\t1" in stdout assert stderr == b"" bundlewrap-4.13.6/tests/integration/bw_stats.py000066400000000000000000000025241417502274300216450ustar00rootroot00000000000000from bundlewrap.utils.testing import make_repo, run def test_nondeterministic(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { 'files': { "/test": { 'content': "foo", }, "/test2": { 'content': "foo", }, }, }, }, }, ) stdout, stderr, rcode = run("bw stats", path=str(tmpdir)) assert stdout == """╭───────┬───────────────────╮ │ count │ type │ ├───────┼───────────────────┤ │ 1 │ nodes │ │ 0 │ groups │ │ 1 │ bundles │ │ 0 │ metadata defaults │ │ 0 │ metadata reactors │ │ 2 │ items │ ├───────┼───────────────────┤ │ 2 │ file │ ╰───────┴───────────────────╯ """.encode('utf-8') bundlewrap-4.13.6/tests/integration/bw_test.py000066400000000000000000000637441417502274300215010ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import host_os, make_repo, run def test_empty(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw test", path=str(tmpdir)) assert stderr == b"" assert rcode == 0 def test_bundle_not_found(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, ) assert run("bw test", path=str(tmpdir))[2] == 1 def test_hooks(tmpdir): make_repo( tmpdir, nodes={ "node1": {}, "node2": {}, }, ) with open(join(str(tmpdir), "hooks", "test.py"), 'w') as f: f.write("""from bundlewrap.utils.ui import io def test(repo, **kwargs): io.stdout("AAA") def test_node(repo, node, **kwargs): io.stdout("BBB") """) assert b"AAA" in run("bw test -H", path=str(tmpdir))[0] assert b"BBB" in run("bw test -J", path=str(tmpdir))[0] def test_circular_dep_direct(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "pkg_apt": { "foo": { 'needs': ["pkg_apt:bar"], }, "bar": { 'needs': ["pkg_apt:foo"], }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_circular_dep_indirect(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "pkg_apt": { "foo": { 'needs': ["pkg_apt:bar"], }, "bar": { 'needs': ["pkg_apt:baz"], }, "baz": { 'needs': ["pkg_apt:foo"], }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_circular_dep_self(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "pkg_apt": { "foo": { 'needs': ["pkg_apt:foo"], }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_unknown_tag(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "files": { "/foo": { 'content': "none", 'needs': { "tag:bar", }, }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 0 def test_circular_trigger_self(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "pkg_apt": { "foo": { 'triggers': ["pkg_apt:foo"], }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_file_invalid_attribute(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "files": { "/foo": { "potato": "yes", }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_file_template_error(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "files": { "/foo": { 'content_type': 'mako', 'content': "${broken", }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_group_loop(tmpdir): make_repo( tmpdir, groups={ "group1": { 'subgroups': ["group2"], }, "group2": { 'subgroups': ["group3"], }, "group3": { 'subgroups': ["group1"], }, }, ) assert run("bw test -S", path=str(tmpdir))[2] == 1 def test_group_metadata_collision(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'groups': { "group1", "group3", }, }, }, groups={ "group1": { 'metadata': { 'foo': { 'baz': 1, }, 'bar': 2, }, }, "group2": { 'metadata': { 'foo': { 'baz': 3, }, 'snap': 4, }, 'subgroups': ["group3"], }, "group3": {}, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 1 def test_group_metadata_collision_subgroups(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'groups': { "group1", "group3", }, }, }, groups={ "group1": { 'metadata': { 'foo': { 'baz': 1, }, 'bar': 2, }, }, "group2": { 'metadata': { 'foo': { 'baz': 3, }, 'snap': 4, }, 'subgroups': ["group1", "group3"], }, "group3": {}, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 0 def test_group_metadata_collision_list(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'groups': { "group1", "group2", }, }, }, groups={ "group1": { 'metadata': { 'foo': [1], }, }, "group2": { 'metadata': { 'foo': [2], }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 1 def test_group_metadata_collision_dict(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'groups': { "group1", "group2", }, }, }, groups={ "group1": { 'metadata': { 'foo': {'bar': 1}, }, }, "group2": { 'metadata': { 'foo': 2, }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 1 def test_group_metadata_collision_dict_ok(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'groups': { "group1", "group2", }, }, }, groups={ "group1": { 'metadata': { 'foo': {'bar': 1}, }, }, "group2": { 'metadata': { 'foo': {'baz': 2}, }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 0 def test_group_metadata_collision_set(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'groups': { "group1", "group2", }, }, }, groups={ "group1": { 'metadata': { 'foo': set([1]), }, }, "group2": { 'metadata': { 'foo': 2, }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 1 def test_group_metadata_collision_set_ok(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'groups': { "group1", "group2", }, }, }, groups={ "group1": { 'metadata': { 'foo': set([1]), }, }, "group2": { 'metadata': { 'foo': set([2]), }, }, }, ) assert run("bw test -M", path=str(tmpdir))[2] == 0 def test_defaults_metadata_collision(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': {"bundle1", "bundle2"}, }, }, bundles={ "bundle1": {}, "bundle2": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write( """defaults = { "foo": "bar", } """) with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: f.write( """defaults = { "foo": "baz", } """) stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) assert rcode == 1 assert b"foo" in stderr def test_defaults_metadata_collision_nested(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': {"bundle1", "bundle2"}, }, }, bundles={ "bundle1": {}, "bundle2": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write( """defaults = { "foo": {"bar": "baz"}, } """) with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: f.write( """defaults = { "foo": {"bar": "frob"}, } """) stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) assert rcode == 1 assert b"foo/bar" in stderr def test_defaults_metadata_collision_ok(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': {"bundle1", "bundle2"}, }, }, bundles={ "bundle1": {}, "bundle2": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write( """defaults = { "foo": {"bar"}, } """) with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: f.write( """defaults = { "foo": {"baz"}, } """) assert run("bw test -M", path=str(tmpdir))[2] == 0 def test_reactor_metadata_collision(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': {"bundle1", "bundle2"}, }, }, bundles={ "bundle1": {}, "bundle2": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write( """@metadata_reactor def foo(metadata): return {"foo": 1} """) with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: f.write( """@metadata_reactor def foo(metadata): return {"foo": 2} """) stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) assert rcode == 1 assert b"foo" in stderr def test_reactor_metadata_collision_nested(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': {"bundle1", "bundle2"}, }, }, bundles={ "bundle1": {}, "bundle2": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write( """@metadata_reactor def foo(metadata): return {"foo": {"bar": "1"}} """) with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: f.write( """@metadata_reactor def foo(metadata): return {"foo": {"bar": "2"}} """) stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) assert rcode == 1 assert b"foo/bar" in stderr def test_reactor_metadata_collision_nested_mixed(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': {"bundle1", "bundle2"}, }, }, bundles={ "bundle1": {}, "bundle2": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write( """@metadata_reactor def foo(metadata): return {"foo": {"bar": {True}}} """) with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: f.write( """@metadata_reactor def foo(metadata): return {"foo": {"bar": [False]}} """) stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) assert rcode == 1 assert b"foo/bar" in stderr def test_reactor_provides_ok(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': {"bundle1"}, }, }, bundles={ "bundle1": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write( """@metadata_reactor.provides("foo") def foo(metadata): return {"foo": 1} """) stdout, stderr, rcode = run("bw test -p", path=str(tmpdir)) assert rcode == 0 def test_reactor_provides_violated(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': {"bundle1"}, }, }, bundles={ "bundle1": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write( """@metadata_reactor.provides("foo") def foo(metadata): return {"bar": 1} """) stdout, stderr, rcode = run("bw test -p", path=str(tmpdir)) assert rcode == 1 assert "foo" in stderr.decode() assert "bar" in stderr.decode() def test_fault_missing(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "files": { "/foo": { 'content_type': 'mako', 'content': "${repo.vault.decrypt('bzzt', key='unavailable')}", }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 assert run("bw test -iI", path=str(tmpdir))[2] == 0 def test_fault_missing_content(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": {} }, ) with open(join(str(tmpdir), "bundles", "bundle1", "items.py"), 'w') as f: f.write(""" files = { "/foo": { 'content': repo.vault.decrypt("bzzt", key="unavailable"), }, } """) assert run("bw test -I", path=str(tmpdir))[2] == 1 assert run("bw test -iI", path=str(tmpdir))[2] == 0 def test_metadata_determinism_ok(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write("""@metadata_reactor def test(metadata): return {'test': 1} """) assert run("bw test -m 3", path=str(tmpdir))[2] == 0 def test_metadata_determinism_broken(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": {}, }, ) with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: f.write("""from random import randint n = randint(1, 99999) @metadata_reactor def test(metadata): return {'findme': n} """) stdout, stderr, rcode = run("bw test -m 3", path=str(tmpdir)) assert rcode == 1 assert b"findme" in stderr def test_config_determinism_ok(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "files": { "/test": { 'content': "1", 'content_type': 'mako', }, }, }, }, }, ) assert run("bw test -d 3", path=str(tmpdir))[2] == 0 def test_config_determinism_broken(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "files": { "/test": { 'content': "<% from random import randint %>\nfindme${randint(1, 99999)\n}", 'content_type': 'mako', }, }, }, }, }, ) stdout, stderr, rcode = run("bw test -d 3", path=str(tmpdir)) assert rcode == 1 assert b"findme" in stderr def test_unknown_subgroup(tmpdir): make_repo( tmpdir, nodes={ "node1": {'groups': {"group2"}}, }, groups={ "group1": {'subgroups': ["missing-group"]}, "group2": {}, }, ) assert run("bw test", path=str(tmpdir))[2] == 1 assert run("bw test group1", path=str(tmpdir))[2] == 1 assert run("bw test group2", path=str(tmpdir))[2] == 1 def test_empty_group(tmpdir): make_repo( tmpdir, nodes={ "node1": {'groups': {"group2"}}, }, groups={ "group1": {}, "group2": {}, }, ) assert run("bw test", path=str(tmpdir))[2] == 0 assert run("bw test -e", path=str(tmpdir))[2] == 1 def test_group_user_dep_deleted(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "users": { "user1": { 'groups': ["group1"], }, }, "groups": { "group1": { 'delete': True, }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_group_user_dep_ok(tmpdir): # regression test for #341 make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "users": { "user1": {}, }, "groups": { "group1": {'delete': True}, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 0 def test_group_user_dep_deleted_gid(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'items': { "users": { "user1": { 'gid': "group1", }, }, "groups": { "group1": { 'delete': True, }, }, }, }, }, ) assert run("bw test -I", path=str(tmpdir))[2] == 1 def test_reverse_dummy_dep(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1", "bundle2"], }, }, bundles={ "bundle1": { 'items': { 'files': { "/test": { 'content': "test", }, }, }, }, "bundle2": { 'items': { 'files': { "/test2": { 'content': "test", 'needed_by': ["bundle:bundle1"], }, }, }, }, }, ) stdout, stderr, rcode = run("bw test", path=str(tmpdir)) assert rcode == 0 def test_bundlepy_tag_loop(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'attrs': { 'tags': { "a": { 'needs': {"tag:b"}, }, "b": { 'needs': {"tag:a"}, }, }, }, 'items': { 'actions': { "one": { 'command': "true", 'tags': {"a"}, }, "two": { 'command': "true", 'tags': {"b"}, }, }, }, }, }, ) stdout, stderr, rcode = run("bw test -I", path=str(tmpdir)) assert rcode == 1 assert "action:one" in stderr.decode() assert "action:two" in stderr.decode() def test_bundlepy_tag_loop2(tmpdir): make_repo( tmpdir, nodes={ "node1": { 'bundles': ["bundle1"], }, }, bundles={ "bundle1": { 'attrs': { 'tags': { "one": { 'needed_by': {"tag:two"}, }, "two": { 'needed_by': {"action:late"}, }, }, }, 'items': { 'actions': { "early": { 'command': "true", 'needed_by': {"tag:one"}, }, "fill_tag_one": { 'command': "true", 'tags': {"one"}, }, "fill_tag_two": { 'command': "true", 'tags': {"two"}, }, "late": { 'command': "true", 'needed_by': {"action:early"}, # this makes the loop }, }, }, }, }, ) stdout, stderr, rcode = run("bw test -I", path=str(tmpdir)) assert rcode == 1 assert "action:late" in stderr.decode() def test_file_test_with_fails(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "does not matter", 'test_with': 'false' }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw test localhost", path=str(tmpdir)) assert rcode != 0 assert b'failed local validation using: false' in stderr def test_file_test_with_succeds(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content': "does not matter", 'test_with': 'true' }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) stdout, stderr, rcode = run("bw test localhost", path=str(tmpdir)) assert rcode == 0 assert b'failed local validation using: true' not in stderr bundlewrap-4.13.6/tests/integration/bw_verify.py000066400000000000000000000013741417502274300220150ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import host_os, make_repo, run def test_empty_verify(tmpdir): make_repo( tmpdir, bundles={ "test": { 'items': { 'files': { join(str(tmpdir), "foo"): { 'content_type': 'any', }, }, }, }, }, nodes={ "localhost": { 'bundles': ["test"], 'os': host_os(), }, }, ) with open(join(str(tmpdir), "foo"), 'w') as f: f.write("test") stdout, stderr, rcode = run("bw verify localhost", path=str(tmpdir)) assert rcode == 0 bundlewrap-4.13.6/tests/integration/repolibs.py000066400000000000000000000011631417502274300216340ustar00rootroot00000000000000from os.path import join from bundlewrap.utils.testing import make_repo, run def test_metadatapy(tmpdir): make_repo( tmpdir, ) with open(join(str(tmpdir), "libs", "libstest.py"), 'w') as f: f.write( """ivar = 47 def func(): return 48 """) stdout, stderr, rcode = run("bw debug -c 'print(repo.libs.libstest.ivar)'", path=str(tmpdir)) assert stdout == b"47\n" assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run("bw debug -c 'print(repo.libs.libstest.func())'", path=str(tmpdir)) assert stdout == b"48\n" assert stderr == b"" assert rcode == 0 bundlewrap-4.13.6/tests/integration/secret.py000066400000000000000000000230571417502274300213100ustar00rootroot00000000000000from base64 import b64decode from os.path import join from bundlewrap.utils.testing import make_repo, run def test_b64encode_fault(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").b64encode())'", path=str(tmpdir)) assert stdout == b"ZmFDVFQ3NmthZ3REdVpFNXdub2lEMUN4aEdLbWJnaVg=\n" assert stderr == b"" assert rcode == 0 def test_format_password(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").format_into(\"format: {}\"))'", path=str(tmpdir)) assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n" assert stderr == b"" assert rcode == 0 def test_human_password_digits(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", digits=4))'", path=str(tmpdir)) assert stdout == b"Xaint-Heep-Pier-Tikl-7608\n" assert stderr == b"" assert rcode == 0 def test_human_password_per_word(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", per_word=1))'", path=str(tmpdir)) assert stdout == b"X-D-F-H-42\n" assert stderr == b"" assert rcode == 0 def test_human_password_words(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", words=2))'", path=str(tmpdir)) assert stdout == b"Xaint-Heep-13\n" assert stderr == b"" assert rcode == 0 def test_encrypt_file_base64(tmpdir): make_repo(tmpdir) source_file = join(str(tmpdir), "data", "source") with open(source_file, 'wb') as f: f.write("öhai".encode('latin-1')) stdout, stderr, rcode = run( "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( source_file, "encrypted", ), path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\"))'".format( "encrypted", ), path=str(tmpdir), ) assert b64decode(stdout.decode('utf-8')) == "öhai".encode('latin-1') assert stderr == b"" assert rcode == 0 def test_faults_equality_decrypt(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"foo\"))'", path=str(tmpdir)) assert stderr == b"" assert rcode == 0 enc_foo = stdout.decode('utf-8').strip() stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.encrypt(\"bar\"))'", path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 enc_bar = stdout.decode('utf-8').strip() stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt(\"{0}\") == repo.vault.decrypt(\"{0}\"))'".format( enc_foo, ), path=str(tmpdir), ) assert stdout == b"True\n" assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt(\"{}\") == repo.vault.decrypt(\"{}\"))'".format( enc_foo, enc_bar, ), path=str(tmpdir), ) assert stdout == b"False\n" assert stderr == b"" assert rcode == 0 def test_faults_equality_decrypt_file(tmpdir): make_repo(tmpdir) source_file = join(str(tmpdir), "data", "source") with open(source_file, 'w') as f: f.write("foo") stdout, stderr, rcode = run( "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( source_file, "enc_foo", ), path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 source_file = join(str(tmpdir), "data", "source") with open(source_file, 'w') as f: f.write("bar") stdout, stderr, rcode = run( "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( source_file, "enc_bar", ), path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt_file(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format( "enc_foo", "enc_foo", ), path=str(tmpdir), ) assert stdout == b"True\n" assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt_file(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format( "enc_foo", "enc_bar", ), path=str(tmpdir), ) assert stdout == b"False\n" assert stderr == b"" assert rcode == 0 def test_faults_equality_decrypt_file_as_base64(tmpdir): make_repo(tmpdir) source_file = join(str(tmpdir), "data", "source") with open(source_file, 'w') as f: f.write("foo") stdout, stderr, rcode = run( "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( source_file, "enc_foo", ), path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 source_file = join(str(tmpdir), "data", "source") with open(source_file, 'w') as f: f.write("bar") stdout, stderr, rcode = run( "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( source_file, "enc_bar", ), path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file_as_base64(\"{}\"))'".format( "enc_foo", "enc_foo", ), path=str(tmpdir), ) assert stdout == b"True\n" assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file_as_base64(\"{}\"))'".format( "enc_foo", "enc_bar", ), path=str(tmpdir), ) assert stdout == b"False\n" assert stderr == b"" assert rcode == 0 def test_faults_equality_decrypt_file_mixed(tmpdir): make_repo(tmpdir) source_file = join(str(tmpdir), "data", "source") with open(source_file, 'w') as f: f.write("foo") stdout, stderr, rcode = run( "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( source_file, "enc_foo", ), path=str(tmpdir), ) assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format( "enc_foo", "enc_foo", ), path=str(tmpdir), ) assert stdout == b"False\n" assert stderr == b"" assert rcode == 0 def test_faults_equality_human_password_for(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.human_password_for(\"a\") == repo.vault.human_password_for(\"a\"))'", path=str(tmpdir), ) assert stdout == b"True\n" assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.human_password_for(\"a\") == repo.vault.human_password_for(\"b\"))'", path=str(tmpdir), ) assert stdout == b"False\n" assert stderr == b"" assert rcode == 0 def test_faults_equality_password_for(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.password_for(\"a\"))'", path=str(tmpdir), ) assert stdout == b"True\n" assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.password_for(\"b\"))'", path=str(tmpdir), ) assert stdout == b"False\n" assert stderr == b"" assert rcode == 0 def test_faults_equality_password_for_mixed(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.human_password_for(\"a\"))'", path=str(tmpdir), ) assert stdout == b"False\n" assert stderr == b"" assert rcode == 0 def test_faults_equality_random_bytes_as_base64(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"a\") == repo.vault.random_bytes_as_base64_for(\"a\"))'", path=str(tmpdir), ) assert stdout == b"True\n" assert stderr == b"" assert rcode == 0 stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"a\") == repo.vault.random_bytes_as_base64_for(\"b\"))'", path=str(tmpdir), ) assert stdout == b"False\n" assert stderr == b"" assert rcode == 0 def test_cmd(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.cmd(\"echo hi\"))'", path=str(tmpdir), ) assert stdout == b"hi\n" assert stderr == b"" assert rcode == 0 def test_cmd_binary_nostrip(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.cmd(\"echo hi\", as_text=False, strip=False))'", path=str(tmpdir), ) assert stdout == b"b'hi\\n'\n" assert stderr == b"" assert rcode == 0 def test_cmd_fail(tmpdir): make_repo(tmpdir) stdout, stderr, rcode = run( "bw debug -c 'print(repo.vault.cmd(\"false\"))'", path=str(tmpdir), ) assert b"CalledProcessError" in stderr assert rcode == 1 bundlewrap-4.13.6/tests/integration/tomlfiles.py000066400000000000000000000012531417502274300220130ustar00rootroot00000000000000from os.path import join from bundlewrap.repo import Repository from bundlewrap.utils import get_file_contents from bundlewrap.utils.testing import make_repo def test_toml_conversion(tmpdir): make_repo( tmpdir, nodes={ 'node1': { 'os': 'ubuntu', 'metadata': { "foo": { "bar": "baz", }, }, }, }, ) repo = Repository(tmpdir) node = repo.get_node("node1") node.toml_save() assert get_file_contents(join(tmpdir, "nodes", "node1.toml")) == \ b"""os = "ubuntu" [metadata.foo] bar = "baz" """ bundlewrap-4.13.6/tests/unit/000077500000000000000000000000001417502274300160765ustar00rootroot00000000000000bundlewrap-4.13.6/tests/unit/faults.py000066400000000000000000000213261417502274300177520ustar00rootroot00000000000000from bundlewrap.utils import Fault from pytest import raises def test_basic_resolve(): def callback(): return 4 # Chosen by fair dice roll. Guaranteed to be random. f = Fault('id', callback) assert f.value == 4 def test_add_fault(): def callback_a(): return 'foo' def callback_b(): return 'bar' a = Fault('id foo', callback_a) b = Fault('id bar', callback_b) c = a + b assert c.value == 'foobar' def test_add_fault_nonstring(): def callback_a(): return 4 def callback_b(): return 8 a = Fault('id foo', callback_a) b = Fault('id bar', callback_b) c = a + b assert c.value == 12 def test_add_plain_nonstring(): def callback(): return 4 a = Fault('id foo', callback) b = a + 8 assert b.value == 12 def test_add_plain(): def callback_a(): return 'foo' a = Fault('id foo', callback_a) c = a + 'bar' assert c.value == 'foobar' def test_order(): def callback_a(): return 'foo' def callback_b(): return 'bar' def callback_c(): return '0first' a = Fault('id foo', callback_a) b = Fault('id bar', callback_b) c = Fault('id 0first', callback_c) lst = sorted([a, b, c]) assert lst[0].value == '0first' assert lst[1].value == 'bar' assert lst[2].value == 'foo' def test_b64encode(): def callback(): return 'foo' a = Fault('id foo', callback).b64encode() assert a.value == 'Zm9v' def test_format_into(): def callback(): return 'foo' a = Fault('id foo', callback).format_into('This is my secret: "{}"') assert a.value == 'This is my secret: "foo"' # XXX Other methods missing. This basically tests if # _make_method_callback() is working. def test_generic_method_lower(): def callback(): return 'FOO' a = Fault('id FOO', callback) assert a.lower().value == 'foo' def test_equal_no_operators(): def callback_a(): return 'foo' def callback_b(): return 'foo, but here you see the problem' a = Fault('id foo', callback_a) b = Fault('id foo', callback_b) assert id(a) != id(b) assert a == b def test_not_equal_no_operators(): def callback_a(): return 'this interface is not fool proof' def callback_b(): return 'this interface is not fool proof' a = Fault('id foo', callback_a) b = Fault('id bar', callback_b) assert id(a) != id(b) assert a != b def test_equal_lower(): def callback_a(): return 'foo' def callback_b(): return 'foo' a = Fault('id foo', callback_a).lower() b = Fault('id foo', callback_b).lower() assert id(a) != id(b) assert a == b def test_not_equal_lower(): def callback_a(): return 'foo' def callback_b(): return 'foo' a = Fault('id foo', callback_a).lower() b = Fault('id bar', callback_b).lower() assert id(a) != id(b) assert a != b def test_equal_b64encode(): def callback_a(): return 'foo' def callback_b(): return 'foo' a = Fault('id foo', callback_a).b64encode() b = Fault('id foo', callback_b).b64encode() assert id(a) != id(b) assert a == b def test_not_equal_b64encode(): def callback_a(): return 'foo' def callback_b(): return 'foo' a = Fault('id foo', callback_a).b64encode() b = Fault('id bar', callback_b).b64encode() assert id(a) != id(b) assert a != b def test_equal_format_into(): def callback_a(): return 'foo' def callback_b(): return 'foo' a = Fault('id foo', callback_a).format_into('bar {}') b = Fault('id foo', callback_b).format_into('bar {}') assert id(a) != id(b) assert a == b def test_not_equal_format_into(): def callback_a(): return 'foo' def callback_b(): return 'foo' a = Fault('id foo', callback_a).format_into('bar {}') b = Fault('id foo', callback_b).format_into('baz {}') assert id(a) != id(b) assert a != b def test_nested_equal(): def callback_a(): return 'foo' def callback_b(): return 'foo' a = Fault('id foo', callback_a).lower().b64encode() b = Fault('id foo', callback_b).lower().b64encode() assert id(a) != id(b) assert a == b def test_nested_not_equal_because_of_id(): def callback_a(): return 'foo' def callback_b(): return 'foo' a = Fault('id foo', callback_a).lower().b64encode() b = Fault('id bar', callback_b).lower().b64encode() assert id(a) != id(b) assert a != b def test_nested_not_equal_because_of_operators(): def callback_a(): return 'foo' def callback_b(): return 'foo' a = Fault('id foo', callback_a).lower().b64encode() b = Fault('id foo', callback_b).lower() assert id(a) != id(b) assert a != b def test_can_be_used_in_set(): def callback_a(): return 'foo' def callback_b(): return 'bar' a = Fault('id foo', callback_a) b = Fault('id bar', callback_b) s = {a, a, b} assert len(s) == 2 assert 'foo' in [i.value for i in s] assert 'bar' in [i.value for i in s] def test_kwargs_add_to_idlist(): def callback(): return 'foo' a = Fault('id foo', callback, foo='bar', baz='bam', frob='glob') b = Fault('id foo', callback, different='kwargs') assert a != b assert hash(a) != hash(b) def test_unhashable_dict_kwargs_add_to_idlist(): def callback(): return 'foo' a = Fault('id foo', callback, foo='bar', baz={1: {2: {3: 4}}}) b = Fault('id foo', callback, foo='bar', baz={1: {3: {3: 4}}}) assert a != b assert hash(a) != hash(b) def test_unhashable_list_kwargs_add_to_idlist(): def callback(): return 'foo' a = Fault('id foo', callback, foo='bar', baz=[1, 2, [3, 4]]) b = Fault('id foo', callback, foo='bar', baz=[1, [3, 4], 2]) assert a != b assert hash(a) != hash(b) def test_unhashable_set_kwargs_add_to_idlist(): def callback(): return 'foo' a = Fault('id foo', callback, foo='bar', baz={1, 2, 3}) b = Fault('id foo', callback, foo='bar', baz={1, 2, 4}) assert a != b assert hash(a) != hash(b) def test_unhashable_dict_kwargs_add_to_idlist_equal(): def callback(): return 'foo' a = Fault('id foo', callback, foo='bar', baz={1: {2: {3: 4, 5: 6}}}) b = Fault('id foo', callback, foo='bar', baz={1: {2: {5: 6, 3: 4}}}) assert a == b assert hash(a) == hash(b) def test_unhashable_list_kwargs_add_to_idlist_equal(): def callback(): return 'foo' a = Fault('id foo', callback, foo='bar', baz=[1, 2, 3]) b = Fault('id foo', callback, foo='bar', baz=[1, 2, 3]) assert id(a) != id(b) assert a == b def test_unhashable_set_kwargs_add_to_idlist_equal(): def callback(): return 'foo' a = Fault('id foo', callback, foo='bar', baz={1, 2, 3}) b = Fault('id foo', callback, foo='bar', baz={1, 3, 2}) assert a == b assert hash(a) == hash(b) def test_eq_and_hash_do_not_resolve_fault(): def callback(): raise Exception('Fault resolved, this should not happen') a = Fault('id foo', callback) b = Fault('id foo', callback) assert a == b s = {a, b} def test_kwargs_changed_after_creation(): def callback(): return 'foo' data = { 'foo': 0, } a = Fault('id foo', callback, data=data) data['foo'] = 1 b = Fault('id foo', callback, data=data) # Even though both Faults reference the same dict, hashes are built # on Fault creation based on the actual values in mutable # parameters. assert a != b assert hash(a) != hash(b) def test_kwargs_not_changed_after_creation(): def callback(): return 'foo' data = { 'foo': 0, } a = Fault('id foo', callback, data=data) b = Fault('id foo', callback, data=data) assert a == b assert hash(a) == hash(b) def test_hash_does_not_change(): def callback(): return 'foo' data = { 'foo': 0, } a = Fault('id foo', callback, data=data) hash1 = hash(a) data['foo'] = 1 hash2 = hash(a) assert hash1 == hash2 def test_sort(): def one(): return 1 def three(): return 3 f1 = Fault("1", one) f3 = Fault("3", three) assert sorted([2, f3, f1]) == [f1, 2, f3] def test_sort_typeerror(): def one(): return 1 def three(): return 3 f1 = Fault("1", one) f3 = Fault("3", three) with raises(TypeError): sorted(["2", f3, f1]) def test_sort_typeerror_from_fault(): def one(): return 1 def three(): return "3" f1 = Fault("1", one) f3 = Fault("3", three) with raises(TypeError): sorted([2, f3, f1]) bundlewrap-4.13.6/tests/unit/metadata.py000066400000000000000000000005231417502274300202300ustar00rootroot00000000000000from bundlewrap.utils.dicts import merge_dict from bundlewrap.metadata import atomic def test_atomic_no_merge_base(): assert merge_dict( {1: atomic([5])}, {1: [6, 7]}, ) == {1: [6, 7]} def test_atomic_no_merge_update(): assert merge_dict( {1: [5]}, {1: atomic([6, 7])}, ) == {1: [6, 7]} bundlewrap-4.13.6/tests/unit/metastack.py000066400000000000000000000201101417502274300204160ustar00rootroot00000000000000from bundlewrap.metadata import atomic from bundlewrap.utils.metastack import Metastack from pytest import raises def test_has_no_top(): stack = Metastack() with raises(KeyError): stack.get(('something',)) def test_has_no_subpath(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'in': {}}}) with raises(KeyError): stack.get(('something', 'in', 'a', 'path')) def test_get_top(): stack = Metastack() stack.set_layer(0, 'base', {'something': 123}) assert stack.get(('something',)) == 123 def test_get_subpath(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'in': {'a': 'subpath'}}}) assert stack.get(('something', 'in', 'a')) == 'subpath' def test_overlay_value(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}}) stack.set_layer(0, 'overlay', {'something': {'a_value': 10}}) assert stack.get(('something', 'a_value')) == 10 def test_merge_lists(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}}) stack.set_layer(0, 'overlay', {'something': {'a_list': [3]}}) assert sorted(stack.get(('something', 'a_list'))) == sorted([1, 2, 3]) def test_merge_sets(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_set': {1, 2}, 'a_value': 5}}) stack.set_layer(0, 'overlay', {'something': {'a_set': {3}}}) assert stack.get(('something', 'a_set')) == {1, 2, 3} def test_overlay_value_multi_layers(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}}) stack.set_layer(0, 'overlay', {'something': {'a_value': 10}}) stack.set_layer(0, 'unrelated', {'something': {'another_value': 10}}) assert stack.get(('something', 'a_value')) == 10 def test_merge_lists_multi_layers(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}}) stack.set_layer(0, 'overlay', {'something': {'a_list': [3]}}) stack.set_layer(0, 'unrelated', {'something': {'another_value': 10}}) # Objects in Metastacks are frozen. This converts lists to tuples. # Unlike set and frozenset, list and tuple doesn't naturally support # "is equal". # # This is acceptable, because in metaprocs people are expected to # maybe check if something is in a list and maybe access some item # of a list. All that works. Operations like .append() do not work # and they are not supposed to. assert len(stack.get(('something', 'a_list'))) == 3 assert stack.get(('something', 'a_list'))[0] == 1 assert stack.get(('something', 'a_list'))[1] == 2 assert stack.get(('something', 'a_list'))[2] == 3 def test_merge_sets_multi_layers(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_set': {1, 2}, 'a_value': 5}}) stack.set_layer(0, 'overlay', {'something': {'a_set': {3}}}) stack.set_layer(0, 'unrelated', {'something': {'another_value': 10}}) assert stack.get(('something', 'a_set')) == {1, 2, 3} def test_merge_lists_with_empty_layer(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}}) stack.set_layer(0, 'overlay1', {'something': {'a_list': []}}) stack.set_layer(0, 'overlay2', {'something': {'a_list': [3]}}) assert sorted(stack.get(('something', 'a_list'))) == sorted([1, 2, 3]) def test_merge_sets_with_empty_layer(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_set': {1, 2}, 'a_value': 5}}) stack.set_layer(0, 'overlay1', {'something': {'a_set': set()}}) stack.set_layer(0, 'overlay2', {'something': {'a_set': {3}}}) assert stack.get(('something', 'a_set')) == {1, 2, 3} def test_merge_lists_with_multiple_used_layers(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}}) stack.set_layer(0, 'overlay1', {'something': {'a_list': [3]}}) stack.set_layer(0, 'overlay2', {'something': {'a_list': [4]}}) stack.set_layer(0, 'overlay3', {'something': {'a_list': [6, 5]}}) assert sorted(stack.get(('something', 'a_list'))) == sorted([1, 2, 3, 4, 5, 6]) def test_merge_sets_with_multiple_used_layers(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_set': {1, 2}, 'a_value': 5}}) stack.set_layer(0, 'overlay1', {'something': {'a_set': {3}}}) stack.set_layer(0, 'overlay2', {'something': {'a_set': {4}}}) stack.set_layer(0, 'overlay3', {'something': {'a_set': {6, 5}}}) assert stack.get(('something', 'a_set')) == {1, 2, 3, 4, 5, 6} def test_merge_dicts(): stack = Metastack() stack.set_layer(0, 'overlay1', {'something': {'a_value': 3}}) stack.set_layer(0, 'overlay2', {'something': {'another_value': 5}}) stack.set_layer(0, 'overlay3', {'something': {'this': {'and': 'that'}}}) stack.set_layer(0, 'overlay4', {'something': {'a_set': {1, 2}}}) stack.set_layer(0, 'overlay5', {'something': {'a_set': {3, 4}}}) assert stack.get(('something',)) == { 'a_set': {1, 2, 3, 4}, 'a_value': 3, 'another_value': 5, 'this': { 'and': 'that', }, } def test_requesting_empty_path(): stack = Metastack() stack.set_layer(0, 'base', {'foo': {'bar': 'baz'}}) assert stack.get(()) == {'foo': {'bar': 'baz'}} def test_update_layer_for_new_value(): stack = Metastack() stack.set_layer(0, 'base', {'foo': 'bar'}) stack.set_layer(0, 'overlay', {'something': 123}) assert stack.get(('foo',)) == 'bar' with raises(KeyError): assert stack.get(('boing',)) assert stack.get(('something',)) == 123 stack.set_layer(0, 'overlay', {'something': 456}) assert stack.get(('foo',)) == 'bar' with raises(KeyError): assert stack.get(('boing',)) assert stack.get(('something',)) == 456 def test_deepcopy(): stack = Metastack() stack.set_layer(0, 'base', {'foo': {'bar': {1, 2, 3}}}) foo = stack.get(('foo',)) foo['bar'].add(4) assert stack.get(('foo', 'bar')) == {1, 2, 3} del foo['bar'] assert stack.get(('foo', 'bar')) def test_atomic_in_base(): stack = Metastack() stack.set_layer(0, 'base', {'list': atomic([1, 2, 3])}) stack.set_layer(0, 'overlay', {'list': [4]}) assert list(stack.get(('list',))) == [4] def test_atomic_in_layer(): stack = Metastack() stack.set_layer(0, 'base', {'list': [1, 2, 3]}) stack.set_layer(0, 'overlay', {'list': atomic([4])}) assert list(stack.get(('list',))) == [4] def test_pop_layer(): stack = Metastack() stack.set_layer(0, 'overlay', {'foo': 'bar'}) stack.set_layer(0, 'overlay', {'foo': 'baz'}) assert stack.pop_layer(0, 'overlay') == {'foo': 'baz'} with raises(KeyError): stack.get(('foo',)) assert stack.pop_layer(0, 'overlay') == {} assert stack.pop_layer(0, 'unknown') == {} assert stack.pop_layer(47, 'unknown') == {} def test_as_dict(): stack = Metastack() stack.set_layer(0, 'base', { 'bool': True, 'bytes': b'howdy', 'dict': {'1': 2}, 'int': 1, 'list': [1], 'none': None, 'set': {1}, 'str': 'howdy', 'tuple': (1, 2), }) stack.set_layer(0, 'overlay1', {'int': 1000}) stack.set_layer(0, 'overlay2', {'list': [2]}) stack.set_layer(0, 'overlay3', {'new_element': True}) assert stack.as_dict() == { 'bool': True, 'bytes': b'howdy', 'dict': {'1': 2}, 'int': 1000, 'list': [1, 2], 'new_element': True, 'none': None, 'set': {1}, 'str': 'howdy', 'tuple': (1, 2), } def test_as_blame(): stack = Metastack() stack.set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}}) stack.set_layer(0, 'overlay', {'something': {'a_list': [3]}}) stack.set_layer(0, 'unrelated', {'something': {'another_value': 10}}) assert stack.as_blame() == { ('something',): ['base', 'overlay', 'unrelated'], ('something', 'a_list'): ['base', 'overlay'], ('something', 'a_value'): ['base'], ('something', 'another_value'): ['unrelated'], } bundlewrap-4.13.6/tests/unit/pkg_freebsd.py000066400000000000000000000011601417502274300207210ustar00rootroot00000000000000from bundlewrap.items.pkg_freebsd import parse_pkg_name from pytest import raises def test_not_found(): found, version = parse_pkg_name("tree", "zsh-5.8") assert found is False def test_version(): found, version = parse_pkg_name("tree", "tree-1.8.0") assert found is True assert version == "1.8.0" def test_version_with_epoch(): found, version = parse_pkg_name( "zsh-syntax-highlighting", "zsh-syntax-highlighting-0.7.1,1") assert found is True assert version == "0.7.1,1" def test_illegal_no_version(): with raises(AssertionError): parse_pkg_name("tree", "tree") bundlewrap-4.13.6/tests/unit/pkg_openbsd.py000066400000000000000000000047761417502274300207610ustar00rootroot00000000000000from bundlewrap.items.pkg_openbsd import parse_pkg_name from pytest import raises def test_not_found(): found, version, flavor = parse_pkg_name("rsync", "irssi-1.0.4p0-socks") assert found is False def test_only_version(): found, version, flavor = parse_pkg_name("irssi", "irssi-1.0.4p0") assert found is True assert version == "1.0.4p0" assert flavor == "" def test_version_and_flavor(): found, version, flavor = parse_pkg_name("irssi", "irssi-1.0.4p0-socks") assert found is True assert version == "1.0.4p0" assert flavor == "socks" def test_dashname_not_found(): found, version, flavor = parse_pkg_name("rsync", "cyrus-sasl-2.1.26p24-pgsql") assert found is False def test_dashname_only_version(): found, version, flavor = parse_pkg_name("cyrus-sasl", "cyrus-sasl-2.1.26p24") assert found is True assert version == "2.1.26p24" assert flavor == "" def test_dashname_version_and_flavor(): found, version, flavor = parse_pkg_name("cyrus-sasl", "cyrus-sasl-2.1.26p24-pgsql") assert found is True assert version == "2.1.26p24" assert flavor == "pgsql" def test_dashflavor_not_found(): found, version, flavor = parse_pkg_name("rsync", "vim-8.0.0987p0-gtk2-lua") assert found is False def test_dashflavor_version_and_flavor(): found, version, flavor = parse_pkg_name("vim", "vim-8.0.0987p0-gtk2-lua") assert found is True assert version == "8.0.0987p0" assert flavor == "gtk2-lua" def test_dashall_not_found(): found, version, flavor = parse_pkg_name("rsync", "graphical-vim-8.0.0987p0-gtk2-lua") assert found is False def test_dashall_not_found_dash_in_pkgname(): found, version, flavor = parse_pkg_name("graphical-vim", "graphical-vim-8.0.0987p0-gtk2-lua") assert found is True assert version == "8.0.0987p0" assert flavor == "gtk2-lua" def test_illegal_version_ends_with_dash(): with raises(AssertionError): parse_pkg_name("dummy", "foo-1.0-") def test_illegal_flavor_ends_with_dash(): with raises(AssertionError): parse_pkg_name("dummy", "foo-1.0-bar-") def test_illegal_no_version(): with raises(AssertionError): parse_pkg_name("dummy", "foo-bar") def test_illegal_no_name(): with raises(AssertionError): parse_pkg_name("dummy", "1.0-flavor") def test_illegal_only_version(): with raises(AssertionError): parse_pkg_name("dummy", "1.0") def test_illegal_empty_line(): with raises(AssertionError): parse_pkg_name("dummy", "") bundlewrap-4.13.6/tests/unit/utils_dicts.py000066400000000000000000000114741417502274300210050ustar00rootroot00000000000000from bundlewrap.metadata import atomic from bundlewrap.utils.dicts import ( extra_paths_in_dict, map_dict_keys, reduce_dict, validate_dict, COLLECTION_OF_STRINGS, TUPLE_OF_INTS, ) from pytest import raises def test_dictmap(): assert set(map_dict_keys({ 'key1': 1, 'key2': { 'key3': [3, 3, 3], 'key4': atomic([4, 4, 4]), 'key5': { 'key6': "6", }, 'key7': set((7, 7, 7)), }, })) == set([ ("key1",), ("key2",), ("key2", "key3"), ("key2", "key4"), ("key2", "key5"), ("key2", "key5", "key6"), ("key2", "key7"), ]) def test_dictmap_leaves(): assert set(map_dict_keys({ 'key1': 1, 'key2': { 'key3': [3, 3, 3], 'key4': atomic([4, 4, 4]), 'key5': { 'key6': "6", }, 'key7': set((7, 7, 7)), }, }, leaves_only=True)) == set([ ("key1",), ("key2", "key3"), ("key2", "key4"), ("key2", "key5", "key6"), ("key2", "key7"), ]) def test_reduce_dict_two_lists(): assert reduce_dict( [1, 2, 3], [1, 2], ) == [1, 2, 3] def test_reduce_dict_list_and_dict(): assert reduce_dict( [1, 2, 3], {'a': 4}, ) == [1, 2, 3] def test_reduce_dict_simple(): assert reduce_dict( {'a': 1, 'b': 2}, {'a': 3}, ) == {'a': 1} def test_reduce_dict_nested(): full_dict = { 'a': [{ 'b': 1, 'c': 2, }], 'd': 3, } template_dict = { 'a': [{ 'b': None, }], 'd': None, 'e': None, } assert reduce_dict(full_dict, template_dict) == { 'a': [{ 'b': 1, }], 'd': 3, } def test_validate_ok(): validate_dict( { 'a': 5, 'b': "bee", 'c': None, 'd': ("t", "u", "p", "l", "e"), 'e': ["l", "i", "s", "t"], 'f': {"s", "e", "t"}, 'g': (1, "2"), 'h': [1, "2"], 'i': {1, "2"}, 'j': True, 'k': False, 'l': (1, 2, 3), }, { 'a': int, 'b': str, 'c': type(None), 'd': COLLECTION_OF_STRINGS, 'e': COLLECTION_OF_STRINGS, 'f': COLLECTION_OF_STRINGS, 'g': tuple, 'h': list, 'i': set, 'j': bool, 'k': (int, bool), 'l': TUPLE_OF_INTS, }, ) def test_validate_single_type_error(): with raises(ValueError): validate_dict( { 'a': 5, }, { 'a': str, }, ) def test_validate_multi_type_error(): with raises(ValueError): validate_dict( { 'a': 5, }, { 'a': (str, list), }, ) def test_validate_inner_type_error(): with raises(ValueError): validate_dict( { 'd': ("t", "u", "p", "l", "e", 47), }, { 'd': COLLECTION_OF_STRINGS, }, ) def test_validate_inner_type_error2(): with raises(ValueError): validate_dict( { 'l': (1, 2, "3"), }, { 'l': TUPLE_OF_INTS, }, ) def test_validate_missing_key(): with raises(ValueError): validate_dict( { 'a': 5, }, { 'a': int, 'b': str, }, required_keys=['a', 'b'], ) def test_validate_required_key(): validate_dict( { 'a': 5, 'b': "bee", }, { 'a': int, 'b': str, }, required_keys=['a', 'b'], ) def test_extra_paths(): assert set(extra_paths_in_dict( { 'a': 1, 'b': 1, }, { ('a',), }, )) == { ('b',), } def test_extra_paths_nested(): assert set(extra_paths_in_dict( { 'a': 1, 'b': { 'c': 1 }, 'd': { 'e': 1 }, }, { ('b', 'c'), }, )) == { ('a',), ('d', 'e'), } def test_extra_paths_ok(): assert set(extra_paths_in_dict( { 'a': 1, 'b': { 'c': 1 }, 'd': { 'e': 1 }, }, { ('a',), ('b', 'c'), ('d',), }, )) == set() bundlewrap-4.13.6/tests/unit/utils_table.py000066400000000000000000000006441417502274300207630ustar00rootroot00000000000000from bundlewrap.utils.table import ROW_SEPARATOR, render_table def test_render_table(): assert "\n".join(render_table([ ["head1", "h2"], ROW_SEPARATOR, ["1", "2"] ], alignments={0: 'right'})) == """ ╭───────┬────╮ │ head1 │ h2 │ ├───────┼────┤ │ 1 │ 2 │ ╰───────┴────╯ """.strip() bundlewrap-4.13.6/tests/unit/utils_text.py000066400000000000000000000034751417502274300206650ustar00rootroot00000000000000from datetime import timedelta from bundlewrap.utils.text import ( ansi_clean, bold, format_duration, red, parse_duration, trim_visible_len_to, ) def test_ansi_clean(): assert red("test") != "test" assert len(red("test")) != len("test") assert ansi_clean(red("test")) == "test" assert ansi_clean(bold(red("test"))) == "test" def test_format_duration(): assert format_duration(timedelta()) == "0s" assert format_duration(timedelta(seconds=10)) == "10s" assert format_duration(timedelta(minutes=10)) == "10m" assert format_duration(timedelta(hours=10)) == "10h" assert format_duration(timedelta(days=10)) == "10d" assert format_duration(timedelta(days=1, hours=2, minutes=3, seconds=4)) == "1d 2h 3m 4s" def test_parse_duration(): assert parse_duration("0s") == timedelta() assert parse_duration("10s") == timedelta(seconds=10) assert parse_duration("10m") == timedelta(minutes=10) assert parse_duration("10h") == timedelta(hours=10) assert parse_duration("10d") == timedelta(days=10) assert parse_duration("1d 2h 3m 4s") == timedelta(days=1, hours=2, minutes=3, seconds=4) def test_parse_format_inverse(): for duration in ( "0s", "1s", "1m", "1h", "1d", "1d 4h", "1d 4h 7s", ): assert format_duration(parse_duration(duration)) == duration def test_trim_visible_len_to(): assert trim_visible_len_to("foo bar", 10) == "foo bar" assert trim_visible_len_to("foo bar", 3) == "foo" assert trim_visible_len_to("\033[1mfoo bar", 3) == "\033[1mfoo" assert trim_visible_len_to("foo \033[1mbar\033[0m", 4) == "foo " assert trim_visible_len_to("foo \033[1mbar\033[0m", 5) == "foo \033[1mb" assert trim_visible_len_to("föö \033[1mbär\033[0m", 7) == "föö \033[1mbär"