pax_global_header00006660000000000000000000000064135677160310014523gustar00rootroot0000000000000052 comment=0534ba0829dd7c0f10cf5fb4333a7e23602538bf pcs-0.10.4/000077500000000000000000000000001356771603100123725ustar00rootroot00000000000000pcs-0.10.4/.eslintrc000066400000000000000000000001731356771603100142170ustar00rootroot00000000000000{ "parser": "babel-eslint", "rules": { "semi": [2, "always", { "omitLastInOneLineBlock": true, }], } } pcs-0.10.4/.gitignore000066400000000000000000000002401356771603100143560ustar00rootroot00000000000000*.pyc *.swp pcs.spec /MANIFEST /dist/ /pcs/bash_completion.d.pcs /pcs/bundled/ /pcsd/.bundle /pcsd/vendor /pcsd/public/ui /rpm_build/ pcs-*.tar.gz .mypy_cache/ pcs-0.10.4/.gitlab-ci.yml000066400000000000000000000044751356771603100150400ustar00rootroot00000000000000image: fedora:latest stages: - stage1 - stage2 rpm_build: stage: stage1 script: - "dnf install -y dnf-plugins-core git make rpm-build rpmdevtools sed tar wget which " - make pcs.spec GIT_TAG=ci - dnf builddep -y pcs.spec - make rpm GIT_TAG=ci - mkdir -p rpms - cp $(find rpm_build -type f -name '*.rpm' -not -name '*.src.rpm') rpms artifacts: expire_in: 1 week paths: - rpms pylint: stage: stage1 script: - "dnf install -y python3 python3-lxml python3-pip python3-pycurl python3-pyOpenSSL findutils make time which " - make pylint_requirements - make pylint mypy: stage: stage1 script: - "dnf install -y python3 python3-lxml python3-pip python3-pycurl python3-pyOpenSSL git make tar time wget which " - make mypy_requirements - make mypy ruby_tests: stage: stage1 script: - "dnf install -y ruby ruby-devel rubygem-bundler rubygem-backports rubygem-ethon rubygem-ffi rubygem-io-console rubygem-json rubygem-open4 rubygem-rack rubygem-rack-protection rubygem-rack-test rubygem-sinatra rubygem-tilt rubygem-test-unit " - /usr/bin/ruby -I ./pcsd -I ./pcsd/test pcsd/test/test_all_suite.rb python_tier0_tests: stage: stage2 needs: - rpm_build script: - "dnf install -y python3-mock python3-pip fence-agents-scsi fence-agents-apc fence-agents-ipmilan fence-virt booth-site rpms/pcs-ci-*.rpm " - python3 -m pip install concurrencytest - rm -rf pcs pcsd # make sure we are testing installed package - pcs_test/suite.py -v --installed python_smoke_tests: stage: stage2 needs: - rpm_build script: - "dnf install -y cracklib-dicts passwd procps-ng rpms/pcs-ci-*.rpm " - /usr/sbin/pcsd & # start pcsd - sleep 10 # wait for pcsd to start up properly - pcs_test/smoke.sh artifacts: paths: - /var/log/pcsd/ when: on_failure pcs-0.10.4/.travis.yml000066400000000000000000000015121356771603100145020ustar00rootroot00000000000000language: generic sudo: required env: global: - SRC_PATH=/src/pcs matrix: - DIST=fedora29 RUBY_BIN=/usr/bin/ruby PYTHON_TEST_ARGS="" - DIST=fedora30 RUBY_BIN=/usr/bin/ruby PYTHON_TEST_ARGS="" - DIST=fedora31 RUBY_BIN=/usr/bin/ruby PYTHON_TEST_ARGS="" services: - docker before_install: - docker build --build-arg src_path=$SRC_PATH -t $DIST -f test/$DIST/Dockerfile . script: # python test suite - docker run $DIST $SRC_PATH/pcs_test/suite.py -v --vanilla $PYTHON_TEST_ARGS # python static types check - docker run $DIST make -C $SRC_PATH mypy # python linter - docker run $DIST make -C $SRC_PATH pylint # ruby test suite - docker run $DIST $RUBY_BIN -I $SRC_PATH/pcsd -I $SRC_PATH/pcsd/test $SRC_PATH/pcsd/test/test_all_suite.rb pcs-0.10.4/CHANGELOG.md000066400000000000000000001311521356771603100142060ustar00rootroot00000000000000# Change Log ## [0.10.4] - 2019-11-28 ### Added - New section in pcs man page summarizing changes in pcs-0.10. Commands removed or changed in pcs-0.10 print errors poiting to that section. ([rhbz#1728890]) - `pcs resource disable` can show effects of disabling resources and prevent disabling resources if any other resources would be affected ([rhbz#1631519]) - `pcs resource relations` command shows relations between resources such as ordering constraints, ordering set constraints and relations defined by resource hierarchy ([rhbz#1631514]) ### Changed - Expired location constraints are now hidden by default when listing constraints in any way. Using `--all` will list and denote them with `(expired)`. All expired rules are then marked the same way. ([rhbz#1442116]) ### Fixed - All node names and scores are validated when running `pcs constraint location avoids/prefers` before writing configuration to cib ([rhbz#1673835]) - Fixed crash when an invalid port is given in an address to the `pcs host auth` command ([rhbz#1698763]) - Command `pcs cluster verify` suggests `--full` option instead of `-V` option which is not recognized by pcs ([rhbz#1712347]) - It is now possible to authenticate remote clusters in web UI even if the local cluster is not authenticated ([rhbz#1743735]) - Documentation of `pcs constraint colocation add` ([rhbz#1734361]) - Empty constraint option are not allowed in `pcs constraint order` and `pcs constraint colocation add` commands ([rhbz#1734361]) - More fixes for the case when PATH environment variable is not set - Fixed crashes and other issues when UTF-8 characters are present in the corosync.conf file ([rhbz#1741586]) [rhbz#1442116]: https://bugzilla.redhat.com/show_bug.cgi?id=1442116 [rhbz#1631514]: https://bugzilla.redhat.com/show_bug.cgi?id=1631514 [rhbz#1631519]: https://bugzilla.redhat.com/show_bug.cgi?id=1631519 [rhbz#1673835]: https://bugzilla.redhat.com/show_bug.cgi?id=1673835 [rhbz#1698763]: https://bugzilla.redhat.com/show_bug.cgi?id=1698763 [rhbz#1712347]: https://bugzilla.redhat.com/show_bug.cgi?id=1712347 [rhbz#1728890]: https://bugzilla.redhat.com/show_bug.cgi?id=1728890 [rhbz#1734361]: https://bugzilla.redhat.com/show_bug.cgi?id=1734361 [rhbz#1741586]: https://bugzilla.redhat.com/show_bug.cgi?id=1741586 [rhbz#1743735]: https://bugzilla.redhat.com/show_bug.cgi?id=1743735 ## [0.10.3] - 2019-08-23 ### Fixed - Fixed crashes in the `pcs host auth` command ([rhbz#1676957]) - Fixed id conflict with current bundle configuration in `pcs resource bundle reset` ([rhbz#1657166]) - Options starting with - and -- are no longer ignored for non-root users (broken since pcs-0.10.2) ([rhbz#1725183]) - Fixed crashes when pcs is configured that no rubygems are bundled in pcs package ([ghissue#208]) - Standby nodes running resources are listed separately in `pcs status nodes` - Parsing arguments in the `pcs constraint order` and `pcs constraint colocation add` commands has been improved, errors which were previously silent are now reported ([rhbz#1734361]) - Fixed shebang correction in Makefile ([ghissue#206]) - Generate 256 bytes long corosync authkey, longer keys are not supported when FIPS is enabled ([rhbz#1740218]) ### Changed - Command `pcs resource bundle reset` no longer accepts the container type ([rhbz#1657166]) [ghissue#206]: https://github.com/ClusterLabs/pcs/issues/206 [ghissue#208]: https://github.com/ClusterLabs/pcs/issues/208 [rhbz#1657166]: https://bugzilla.redhat.com/show_bug.cgi?id=1657166 [rhbz#1676957]: https://bugzilla.redhat.com/show_bug.cgi?id=1676957 [rhbz#1725183]: https://bugzilla.redhat.com/show_bug.cgi?id=1725183 [rhbz#1734361]: https://bugzilla.redhat.com/show_bug.cgi?id=1734361 [rhbz#1740218]: https://bugzilla.redhat.com/show_bug.cgi?id=1740218 ## [0.10.2] - 2019-06-12 ### Added - Command `pcs config checkpoint diff` for displaying differences between two specified checkpoints ([rhbz#1655055]) - Support for resource instance attributes uniqueness check according to resource agent metadata ([rhbz#1665404]) - Command `pcs resource bundle reset` for a bundle configuration reseting ([rhbz#1657166]) - `pcs cluster setup` now checks if nodes' addresses match value of `ip_version` ([rhbz#1667053]) - Support for sbd option SBD\_TIMEOUT\_ACTION ([rhbz#1664828]) - Support for clearing expired moves and bans of resources ([rhbz#1625386]) - Commands for adding, changing and removing corosync links ([rhbz#1667058]) ### Fixed - Corosync config file parser updated and made more strict to match changes in corosync - Allow non-root users to read quorum status (commands `pcs status corosync`, `pcs status quorum`, `pcs quorum device status`, `pcs quorum status`) ([rhbz#1653316]) - Removed command `pcs resource show` dropped from usage and man page ([rhbz#1656953]) - Put proper link options' names to corosync.conf ([rhbz#1659051]) - Fixed issuses in configuring links in the 'create cluster' form in web UI ([rhbz#1664057]) - Pcs no longer removes empty `meta_attributes`, `instance_attributes` and other nvsets and similar elements from CIB. Such behavior was causing problems when pacemaker ACLs were in effect, leading to inability of pushing modified CIBs to pacemaker. ([rhbz#1659144]) - `ipv4-6` and `ipv6-4` are now valid values of `ip_version` in cluster setup ([rhbz#1667040]) - Crash when using unsupported options in commands `pcs status` and `pcs config` ([rhbz#1668422]) - `pcs resource group add` now fails gracefully instead of dumping an invalid CIB when a group ID is already occupied by a non-resource element ([rhbz#1668223]) - pcs no longer spawns unnecessary processes for reading known hosts ([rhbz#1676945]) - Lower load caused by periodical config files syncing in pcsd by making it sync less frequently ([rhbz#1676957]) - Improve logging of periodical config files syncing in pcsd - Knet link option `ip_version` has been removed, it was never supported by corosync. Transport option `ip_version` is still in place. ([rhbz#1674005]) - Several bugs in linklist validation in `pcs cluster setup` ([rhbz#1667090]) - Fixed a typo in documentation (regardles -> regardless) ([rhbz#1660702]) - Fixed pcsd crashes when non-ASCII characters are present in systemd journal - Pcs works even when PATH environment variable is not set ([rhbz#1673825]) - Fixed several "Unknown report" error messages - Pcsd SSL certificates are no longer synced across cluster nodes when creating new cluster or adding new node to an existing cluster. To enable the syncing, set `PCSD_SSL_CERT_SYNC_ENABLED` to `true` in pcsd config. ([rhbz#1673822]) - Pcs now reports missing node names in corosync.conf instead of failing silently - Fixed an issue where some pcs commands could not connect to cluster nodes over IPv6 - Fixed cluster setup problem in web UI when full domain names are used ([rhbz#1687965]) - Fixed inability to setup cluster in web UI when knet links are not specified ([rhbz#1687562]) - `--force` works correctly in `pcs quorum unblock` (broken since pcs-0.10.1) - Removed `3des` from allowed knet crypto ciphers since it is actually not supported by corosync - Improved validation of corosync options and their values ([rhbz#1679196], [rhbz#1679197]) ### Changed - Do not check whether watchdog is defined as an absolute path when enabling SBD. This check is not needed anymore as we are validating watchdog against list provided by SBD itself. ### Deprecated - Command `pcs resource show`, removed in pcs-0.10.1, has been readded as deprecated to ease transition to its replacements. It will be removed again in future. [rhbz#1661059] [rhbz#1625386]: https://bugzilla.redhat.com/show_bug.cgi?id=1625386 [rhbz#1653316]: https://bugzilla.redhat.com/show_bug.cgi?id=1653316 [rhbz#1655055]: https://bugzilla.redhat.com/show_bug.cgi?id=1655055 [rhbz#1656953]: https://bugzilla.redhat.com/show_bug.cgi?id=1656953 [rhbz#1657166]: https://bugzilla.redhat.com/show_bug.cgi?id=1657166 [rhbz#1659051]: https://bugzilla.redhat.com/show_bug.cgi?id=1659051 [rhbz#1659144]: https://bugzilla.redhat.com/show_bug.cgi?id=1659144 [rhbz#1660702]: https://bugzilla.redhat.com/show_bug.cgi?id=1660702 [rhbz#1661059]: https://bugzilla.redhat.com/show_bug.cgi?id=1661059 [rhbz#1664057]: https://bugzilla.redhat.com/show_bug.cgi?id=1664057 [rhbz#1664828]: https://bugzilla.redhat.com/show_bug.cgi?id=1664828 [rhbz#1665404]: https://bugzilla.redhat.com/show_bug.cgi?id=1665404 [rhbz#1667040]: https://bugzilla.redhat.com/show_bug.cgi?id=1667040 [rhbz#1667053]: https://bugzilla.redhat.com/show_bug.cgi?id=1667053 [rhbz#1667058]: https://bugzilla.redhat.com/show_bug.cgi?id=1667058 [rhbz#1667090]: https://bugzilla.redhat.com/show_bug.cgi?id=1667090 [rhbz#1668223]: https://bugzilla.redhat.com/show_bug.cgi?id=1668223 [rhbz#1668422]: https://bugzilla.redhat.com/show_bug.cgi?id=1668422 [rhbz#1673822]: https://bugzilla.redhat.com/show_bug.cgi?id=1673822 [rhbz#1673825]: https://bugzilla.redhat.com/show_bug.cgi?id=1673825 [rhbz#1674005]: https://bugzilla.redhat.com/show_bug.cgi?id=1674005 [rhbz#1676945]: https://bugzilla.redhat.com/show_bug.cgi?id=1676945 [rhbz#1676957]: https://bugzilla.redhat.com/show_bug.cgi?id=1676957 [rhbz#1679196]: https://bugzilla.redhat.com/show_bug.cgi?id=1679196 [rhbz#1679197]: https://bugzilla.redhat.com/show_bug.cgi?id=1679197 [rhbz#1687562]: https://bugzilla.redhat.com/show_bug.cgi?id=1687562 [rhbz#1687965]: https://bugzilla.redhat.com/show_bug.cgi?id=1687965 ## [0.10.1] - 2018-11-23 ### Removed - Pcs-0.10 removes support for CMAN, Corosync 1.x, Corosync 2.x and Pacemaker 1.x based clusters. For managing those clusters use pcs-0.9.x. - Pcs-0.10 requires Python 3.6 and Ruby 2.2, support for older Python and Ruby versions has been removed. - `pcs resource failcount reset` command has been removed as `pcs resource cleanup` is doing exactly the same job. ([rhbz#1427273]) - Deprecated commands `pcs cluster remote-node add | remove` have been removed as they were replaced with `pcs cluster node add-guest | remove-guest` - Ability to create master resources has been removed as they are deprecated in Pacemaker 2.x ([rhbz#1542288]) - Instead of `pcs resource create ... master` use `pcs resource create ... promotable` or `pcs resource create ... clone promotable=true` - Instead of `pcs resource master` use `pcs resource promotable` or `pcs resource clone ... promotable=true` - Deprecated --clone option from `pcs resource create` command - Ability to manage node attributes with `pcs property set|unset|show` commands (using `--node` option). The same functionality is still available using `pcs node attribute` command. - Undocumented version of the `pcs constraint colocation add` command, its syntax was `pcs constraint colocation add [score] [options]` - Deprecated commands `pcs cluster standby | unstandby`, use `pcs node standby | unstandby` instead - Deprecated command `pcs cluster quorum unblock` which was replaced by `pcs quorum unblock` - Subcommand `pcs status groups` as it was not showing a cluster status but cluster configuration. The same functionality is still available using command `pcs resource group list` - Undocumented command `pcs acl target`, use `pcs acl user` instead ### Added - Validation for an unaccessible resource inside a bundle ([rhbz#1462248]) - Options to filter failures by an operation and its interval in `pcs resource cleanup` and `pcs resource failcount show` commands ([rhbz#1427273]) - Commands for listing and testing watchdog devices ([rhbz#1578891]) - Commands for creating promotable clone resources `pcs resource promotable` and `pcs resource create ... promotable` ([rhbz#1542288]) - `pcs resource update` and `pcs resource meta` commands change master resources to promotable clone resources because master resources are deprecated in Pacemaker 2.x ([rhbz#1542288]) - Support for the `promoted-max` bundle option replacing the `masters` option in Pacemaker 2.x ([rhbz#1542288]) - Support for OP\_NO\_RENEGOTIATION option when OpenSSL supports it (even with Python 3.6) ([rhbz#1566430]) - Support for container types `rkt` and `podman` into bundle commands ([rhbz#1619620]) - Support for promotable clone resources in pcsd and web UI ([rhbz#1542288]) - Obsoleting parameters of resource and fence agents are now supported and preferred over deprecated parameters ([rhbz#1436217]) - `pcs status` now shows failed and pending fencing actions and `pcs status --full` shows the whole fencing history. Pacemaker supporting fencing history is required. ([rhbz#1615891]) - `pcs stonith history` commands for displaying, synchronizing and cleaning up fencing history. Pacemaker supporting fencing history is required. ([rhbz#1620190]) - Validation of node existence in a cluster when creating location constraints ([rhbz#1553718]) - Command `pcs client local-auth` for authentication of pcs client against local pcsd. This is required when a non-root user wants to execute a command which requires root permissions (e.g. `pcs cluster start`). ([rhbz#1554302]) - Command `pcs resource group list` which has the same functionality as removed command `pcs resource show --groups` ### Fixed - Fixed encoding of the CIB\_user\_groups cookie in communication between nodes. - `pcs cluster cib-push diff-against=` does not consider an empty diff as an error ([ghpull#166]) - `pcs cluster cib-push diff-against=` exits gracefully with an error message if crm\_feature\_set < 3.0.9 ([rhbz#1488044]) - `pcs resource update` does not create an empty meta\_attributes element any more ([rhbz#1568353]) - `pcs resource debug-*` commands provide debug messages even with pacemaker-1.1.18 and newer ([rhbz#1574898]) - Improve `pcs quorum device add` usage and man page ([rhbz#1476862]) - Removing resources using web UI when the operation takes longer than expected ([rhbz#1579911]) - Removing a cluster node no longer leaves the node in the CIB and therefore cluster status even if the removal is run on the node which is being removed ([rhbz#1595829]) - Possible race condition causing an HTTP 408 error when sending larger files via pcs ([rhbz#1600169]) - Configuring QDevice works even if NSS with the new db format (cert9.db, key4.db, pkcs11.txt) is used ([rhbz#1596721]) - Options starting with '-' and '--' are no longer accepted by commands for which those options have no effect ([rhbz#1533866]) - When a user makes an error in a pcs command, usage for that specific command is printed instead of printing the whole usage - Show more user friendly error message when testing watchdog device and multiple devices are present ([rhbz#1578891]) - Do not distinguish between supported and unsupported watchdog devices as SBD cannot reliably provide such information ([rhbz#1578891]) - `pcs config` no longer crashes when `crm_mon` prints something to stderr ([rhbz#1578955]) - `pcs resource bundle update` cmd for bundles which are using unsupported container backend ([rhbz#1619620]) - Do not crash if unable to load SSL certificate or key, log errors and exit gracefully instead ([rhbz#1638852]) - Fixed several issues in parsing `pcs constraint colocation add` command. - All `remove` subcommands now have `delete` aliases and vice versa. Previously, only some of them did and it was mostly undocumented. - The `pcs acl role delete` command no longer deletes ACL users and groups with no ACL roles assigned ### Changed - Authentication has been overhauled ([rhbz#1549535]): - The `pcs cluster auth` command only authenticates nodes in a local cluster and does not accept a node list. - The new command for authentication is `pcs host auth`. It allows to specify host names, addresses and pcsd ports. - Previously, running `pcs cluster auth A B C` caused A, B and C to be all authenticated against each other. Now, `pcs host auth A B C` makes the local host authenticated against A, B and C. This allows better control of what is authenticated against what. - The `pcs pcsd clear-auth` command has been replaced by `pcs pcsd deauth` and `pcs host deauth` commands. The new commands allows to deauthenticate a single host / token as well as all hosts / tokens. - These changes are not backward compatible. You should use the `pcs host auth` command to re-authenticate your hosts. - The `pcs cluster setup` command has been overhauled ([rhbz#1158816], [rhbz#1183103]): - It works with Corosync 3.x only and supports knet as well as udp/udpu. - Node names are now supported. - The number of Corosync options configurable by the command has been significantly increased. - The syntax of the command has been completely changed to accommodate the changes and new features. - Corosync encryption is enabled by default when knet is used ([rhbz#1648942]) - The `pcs cluster node add` command has been overhauled ([rhbz#1158816], [rhbz#1183103]) - It works with Corosync 3.x only and supports knet as well as udp/udpu. - Node names are now supported. - The syntax of the command has been changed to accommodate new features and to be consistent with other pcs commands. - The `pcs cluster node remove` has been overhauled ([rhbz#1158816], [rhbz#1595829]): - It works with Corosync 3.x only and supports knet as well as udp/udpu. - It is now possible to remove more than one node at once. - Removing a cluster node no longer leaves the node in the CIB and therefore cluster status even if the removal is run on the node which is being removed - Node names are fully supported now and are no longer coupled with node addresses. It is possible to set up a cluster where Corosync communicates over different addresses than pcs/pcsd. ([rhbz#1158816], [rhbz#1183103]) - Node names are now required while node addresses are optional in the `pcs cluster node add-guest` and `pcs cluster node add-remove` commands. Previously, it was the other way around. - Web UI has been updated following changes in authentication and support for Corosync 3.x ([rhbz#1158816], [rhbz#1183103], [rhbz#1549535]) - Commands related to resource failures have been overhauled to support changes in pacemaker. Failures are now tracked per resource operations on top of resources and nodes. ([rhbz#1427273], [rhbz#1588667]) - `--watchdog` and `--device` options of `pcs stonith sbd enable` and `pcs stonith sbd device setup` commands have been replaced with `watchdog` and `device` options respectively - Update pacemaker daemon names to match changes in pacemaker-2.0 ([rhbz#1573344]) - Watchdog devices are validated against a list provided by sbd ([rhbz#1578891]) - Resource operation option `requires` is no longer accepted to match changes in pacemaker-2.0 ([rhbz#1605185]) - Update pacemaker exit codes to match changes in pacemaker-2.0 ([rhbz#1536121]) - `pcs cluster cib-upgrade` no longer exits with an error if the CIB schema is already the latest available (this has been changed in pacemaker-2.0) - Pcs now configures corosync to put timestamps in its log ([rhbz#1615420]) - Option `-V` has been replaced with `--full` and a CIB file can be specified only using option `-f` in `pcs cluster verify` - Master resources are now called promotable clone resources to match changes in pacemaker-2.0 ([rhbz#1542288]) - Key size of default pcsd self-generated certificates increased from 2048b to 3072b ([rhbz#1638852]) - pcsd.service now depends on network-online.target ([rhbz#1640477]) - Split command `pcs resource [show]` into two new commands: - `pcs resource [status]` - same as `pcs resource [show]` - `pcs resource config` - same as `pcs resource [show] --full` or resource id specified instead of --full Respective changes have been made to `pcs stonith [show]` command. - Previously, `pcs cluster sync` synchronized only corosync configuration across all nodes configured in the cluster. This command will be changed in the future to sync all cluster configuration. New subcommand `pcs cluster sync corosync` has been introduced to sync only corosync configuration. For now, both commands have the same functionality. ### Security - CVE-2018-1086: Debug parameter removal bypass, allowing information disclosure ([rhbz#1557366]) - CVE-2018-1079: Privilege escalation via authorized user malicious REST call ([rhbz#1550243]) ### Deprecated - The `masters` bundle option is obsoleted by the `promoted-max` option in Pacemaker 2.x and therefore in pcs ([rhbz#1542288]) - `pcs cluster uidgid rm`, use `pcs cluster uidgid delete` or `pcs cluster uidgid remove` instead [ghpull#166]: https://github.com/ClusterLabs/pcs/pull/166 [rhbz#1158816]: https://bugzilla.redhat.com/show_bug.cgi?id=1158816 [rhbz#1183103]: https://bugzilla.redhat.com/show_bug.cgi?id=1183103 [rhbz#1427273]: https://bugzilla.redhat.com/show_bug.cgi?id=1427273 [rhbz#1436217]: https://bugzilla.redhat.com/show_bug.cgi?id=1436217 [rhbz#1462248]: https://bugzilla.redhat.com/show_bug.cgi?id=1462248 [rhbz#1476862]: https://bugzilla.redhat.com/show_bug.cgi?id=1476862 [rhbz#1488044]: https://bugzilla.redhat.com/show_bug.cgi?id=1488044 [rhbz#1533866]: https://bugzilla.redhat.com/show_bug.cgi?id=1533866 [rhbz#1536121]: https://bugzilla.redhat.com/show_bug.cgi?id=1536121 [rhbz#1542288]: https://bugzilla.redhat.com/show_bug.cgi?id=1542288 [rhbz#1549535]: https://bugzilla.redhat.com/show_bug.cgi?id=1549535 [rhbz#1550243]: https://bugzilla.redhat.com/show_bug.cgi?id=1550243 [rhbz#1553718]: https://bugzilla.redhat.com/show_bug.cgi?id=1553718 [rhbz#1554302]: https://bugzilla.redhat.com/show_bug.cgi?id=1554302 [rhbz#1557366]: https://bugzilla.redhat.com/show_bug.cgi?id=1557366 [rhbz#1566430]: https://bugzilla.redhat.com/show_bug.cgi?id=1566430 [rhbz#1568353]: https://bugzilla.redhat.com/show_bug.cgi?id=1568353 [rhbz#1573344]: https://bugzilla.redhat.com/show_bug.cgi?id=1573344 [rhbz#1574898]: https://bugzilla.redhat.com/show_bug.cgi?id=1574898 [rhbz#1578891]: https://bugzilla.redhat.com/show_bug.cgi?id=1578891 [rhbz#1578955]: https://bugzilla.redhat.com/show_bug.cgi?id=1578955 [rhbz#1579911]: https://bugzilla.redhat.com/show_bug.cgi?id=1579911 [rhbz#1588667]: https://bugzilla.redhat.com/show_bug.cgi?id=1588667 [rhbz#1595829]: https://bugzilla.redhat.com/show_bug.cgi?id=1595829 [rhbz#1596721]: https://bugzilla.redhat.com/show_bug.cgi?id=1596721 [rhbz#1600169]: https://bugzilla.redhat.com/show_bug.cgi?id=1600169 [rhbz#1605185]: https://bugzilla.redhat.com/show_bug.cgi?id=1605185 [rhbz#1615420]: https://bugzilla.redhat.com/show_bug.cgi?id=1615420 [rhbz#1615891]: https://bugzilla.redhat.com/show_bug.cgi?id=1615891 [rhbz#1619620]: https://bugzilla.redhat.com/show_bug.cgi?id=1619620 [rhbz#1620190]: https://bugzilla.redhat.com/show_bug.cgi?id=1620190 [rhbz#1638852]: https://bugzilla.redhat.com/show_bug.cgi?id=1638852 [rhbz#1640477]: https://bugzilla.redhat.com/show_bug.cgi?id=1640477 [rhbz#1648942]: https://bugzilla.redhat.com/show_bug.cgi?id=1648942 ## [0.9.163] - 2018-02-20 ### Added - Added `pcs status booth` as an alias to `pcs booth status` - A warning is displayed in `pcs status` and a stonith device detail in web UI when a stonith device has its `method` option set to `cycle` ([rhbz#1523378]) ### Fixed - `--skip-offline` is no longer ignored in the `pcs quorum device remove` command - pcs now waits up to 5 minutes (previously 10 seconds) for pcsd restart when synchronizing pcsd certificates - Usage and man page now correctly state it is possible to enable or disable several stonith devices at once - It is now possible to set the `action` option of stonith devices in web UI by using force ([rhbz#1421702]) - Do not crash when `--wait` is used in `pcs stonith create` ([rhbz#1522813]) - Nodes are now authenticated after running `pcs cluster auth` even if an existing corosync.conf defines no nodes ([ghissue#153], [rhbz#1517333]) - Pcs now properly exits with code 1 when an error occurs in `pcs cluster node add-remote` and `pcs cluster node add-guest` commands ([rhbz#1464781]) - Fixed a crash in the `pcs booth sync` command ([rhbz#1527530]) - Always replace the whole CIB instead of applying a diff when crm\_feature\_set <= 3.0.8 ([rhbz#1488044]) - Fixed `pcs cluster auth` in a cluster when not authenticated and using a non-default port ([rhbz#1415197]) - Fixed `pcs cluster auth` in a cluster when previously authenticated using a non-default port and reauthenticating using an implicit default port ([rhbz#1415197]) [ghissue#153]: https://github.com/ClusterLabs/pcs/issues/153 [rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197 [rhbz#1421702]: https://bugzilla.redhat.com/show_bug.cgi?id=1421702 [rhbz#1464781]: https://bugzilla.redhat.com/show_bug.cgi?id=1464781 [rhbz#1488044]: https://bugzilla.redhat.com/show_bug.cgi?id=1488044 [rhbz#1517333]: https://bugzilla.redhat.com/show_bug.cgi?id=1517333 [rhbz#1522813]: https://bugzilla.redhat.com/show_bug.cgi?id=1522813 [rhbz#1523378]: https://bugzilla.redhat.com/show_bug.cgi?id=1523378 [rhbz#1527530]: https://bugzilla.redhat.com/show_bug.cgi?id=1527530 ## [0.9.162] - 2017-11-15 ### Added - `pcs status --full` now displays information about tickets ([rhbz#1389943]) - Support for managing qdevice heuristics ([rhbz#1389209]) - SNMP agent providing information about cluster to the master agent. It supports only python 2.7 for now ([rhbz#1367808]). ### Fixed - Fixed crash when loading a huge xml ([rhbz#1506864]) - Fixed adding an existing cluster into the web UI ([rhbz#1415197]) - False warnings about failed actions when resource is master/unmaster from the web UI ([rhbz#1506220]) ### Changed - `pcs resource|stonith cleanup` no longer deletes the whole operation history of resources. Instead, it only deletes failed operations from the history. The original functionality is available in the `pcs resource|stonith refresh` command. ([rhbz#1508351], [rhbz#1508350]) [rhbz#1367808]: https://bugzilla.redhat.com/show_bug.cgi?id=1367808 [rhbz#1389209]: https://bugzilla.redhat.com/show_bug.cgi?id=1389209 [rhbz#1389943]: https://bugzilla.redhat.com/show_bug.cgi?id=1389943 [rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197 [rhbz#1506220]: https://bugzilla.redhat.com/show_bug.cgi?id=1506220 [rhbz#1506864]: https://bugzilla.redhat.com/show_bug.cgi?id=1506864 [rhbz#1508350]: https://bugzilla.redhat.com/show_bug.cgi?id=1508350 [rhbz#1508351]: https://bugzilla.redhat.com/show_bug.cgi?id=1508351 ## [0.9.161] - 2017-11-02 ### Added - List of pcs and pcsd capabilities ([rhbz#1230919]) ### Fixed - Fixed `pcs cluster auth` when already authenticated and using different port ([rhbz#1415197]) - It is now possible to restart a bundle resource on one node ([rhbz#1501274]) - `resource update` no longer exits with an error when the `remote-node` meta attribute is set to the same value that it already has ([rhbz#1502715], [ghissue#145]) - Listing and describing resource and stonith agents no longer crashes when agents' metadata contain non-ascii characters ([rhbz#1503110], [ghissue#151]) [ghissue#145]: https://github.com/ClusterLabs/pcs/issues/145 [ghissue#151]: https://github.com/ClusterLabs/pcs/issues/151 [rhbz#1230919]: https://bugzilla.redhat.com/show_bug.cgi?id=1230919 [rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197 [rhbz#1501274]: https://bugzilla.redhat.com/show_bug.cgi?id=1501274 [rhbz#1502715]: https://bugzilla.redhat.com/show_bug.cgi?id=1502715 [rhbz#1503110]: https://bugzilla.redhat.com/show_bug.cgi?id=1503110 ## [0.9.160] - 2017-10-09 ### Added - Configurable pcsd port ([rhbz#1415197]) - Description of the `--force` option added to man page and help ([rhbz#1491631]) ### Fixed - Fixed some crashes when pcs encounters a non-ascii character in environment variables, command line arguments and so on ([rhbz#1435697]) - Fixed detecting if systemd is in use ([ghissue#118]) - Upgrade CIB schema version when `resource-discovery` option is used in location constraints ([rhbz#1420437]) - Fixed error messages in `pcs cluster report` ([rhbz#1388783]) - Increase request timeout when starting a cluster with large number of nodes to prevent timeouts ([rhbz#1463327]) - Fixed "Unable to update cib" error caused by invalid resource operation IDs - `pcs resource op defaults` now fails on an invalid option ([rhbz#1341582]) - Fixed behaviour of `pcs cluster verify` command when entered with the filename argument ([rhbz#1213946]) ### Changed - CIB changes are now pushed to pacemaker as a diff in commands overhauled to the new architecture (previously the whole CIB was pushed). This resolves race conditions and ACLs related errors when pushing CIB. ([rhbz#1441673]) - All actions / operations defined in resource agent's metadata (except meta-data, status and validate-all) are now copied to the CIB when creating a resource. ([rhbz#1418199], [ghissue#132]) - Improve documentation of the `pcs stonith confirm` command ([rhbz#1489682]) ### Deprecated - This is the last version fully supporting CMAN clusters and python 2.6. Support for these will be gradually dropped. [ghissue#118]: https://github.com/ClusterLabs/pcs/issues/118 [ghissue#132]: https://github.com/ClusterLabs/pcs/issues/132 [rhbz#1213946]: https://bugzilla.redhat.com/show_bug.cgi?id=1213946 [rhbz#1341582]: https://bugzilla.redhat.com/show_bug.cgi?id=1341582 [rhbz#1388783]: https://bugzilla.redhat.com/show_bug.cgi?id=1388783 [rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197 [rhbz#1418199]: https://bugzilla.redhat.com/show_bug.cgi?id=1418199 [rhbz#1420437]: https://bugzilla.redhat.com/show_bug.cgi?id=1420437 [rhbz#1435697]: https://bugzilla.redhat.com/show_bug.cgi?id=1435697 [rhbz#1441673]: https://bugzilla.redhat.com/show_bug.cgi?id=1441673 [rhbz#1463327]: https://bugzilla.redhat.com/show_bug.cgi?id=1463327 [rhbz#1489682]: https://bugzilla.redhat.com/show_bug.cgi?id=1489682 [rhbz#1491631]: https://bugzilla.redhat.com/show_bug.cgi?id=1491631 ## [0.9.159] - 2017-06-30 ### Added - Option to create a cluster with or without corosync encryption enabled, by default the encryption is disabled ([rhbz#1165821]) - It is now possible to disable, enable, unmanage and manage bundle resources and set their meta attributes ([rhbz#1447910]) - Pcs now warns against using the `action` option of stonith devices ([rhbz#1421702]) ### Fixed - Fixed crash of the `pcs cluster setup` command when the `--force` flag was used ([rhbz#1176018]) - Fixed crash of the `pcs cluster destroy --all` command when the cluster was not running ([rhbz#1176018]) - Fixed crash of the `pcs config restore` command when restoring pacemaker authkey ([rhbz#1176018]) - Fixed "Error: unable to get cib" when adding a node to a stopped cluster ([rhbz#1176018]) - Fixed a crash in the `pcs cluster node add-remote` command when an id conflict occurs ([rhbz#1386114]) - Fixed creating a new cluster from the web UI ([rhbz#1284404]) - `pcs cluster node add-guest` now works with the flag `--skip-offline` ([rhbz#1176018]) - `pcs cluster node remove-guest` can be run again when the guest node was unreachable first time ([rhbz#1176018]) - Fixed "Error: Unable to read /etc/corosync/corosync.conf" when running `pcs resource create`([rhbz#1386114]) - It is now possible to set `debug` and `verbose` parameters of stonith devices ([rhbz#1432283]) - Resource operation ids are now properly validated and no longer ignored in `pcs resource create`, `pcs resource update` and `pcs resource op add` commands ([rhbz#1443418]) - Flag `--force` works correctly when an operation is not successful on some nodes during `pcs cluster node add-remote` or `pcs cluster node add-guest` ([rhbz#1464781]) ### Changed - Binary data are stored in corosync authkey ([rhbz#1165821]) - It is now mandatory to specify container type in the `resource bundle create` command - When creating a new cluster, corosync communication encryption is disabled by default (in 0.9.158 it was enabled by default, in 0.9.157 and older it was disabled) [rhbz#1165821]: https://bugzilla.redhat.com/show_bug.cgi?id=1165821 [rhbz#1176018]: https://bugzilla.redhat.com/show_bug.cgi?id=1176018 [rhbz#1284404]: https://bugzilla.redhat.com/show_bug.cgi?id=1284404 [rhbz#1386114]: https://bugzilla.redhat.com/show_bug.cgi?id=1386114 [rhbz#1421702]: https://bugzilla.redhat.com/show_bug.cgi?id=1421702 [rhbz#1432283]: https://bugzilla.redhat.com/show_bug.cgi?id=1432283 [rhbz#1443418]: https://bugzilla.redhat.com/show_bug.cgi?id=1443418 [rhbz#1447910]: https://bugzilla.redhat.com/show_bug.cgi?id=1447910 [rhbz#1464781]: https://bugzilla.redhat.com/show_bug.cgi?id=1464781 ## [0.9.158] - 2017-05-23 ### Added - Support for bundle resources (CLI only) ([rhbz#1433016]) - Commands for adding and removing guest and remote nodes including handling pacemaker authkey (CLI only) ([rhbz#1176018], [rhbz#1254984], [rhbz#1386114], [rhbz#1386512]) - Command `pcs cluster node clear` to remove a node from pacemaker's configuration and caches - Backing up and restoring cluster configuration by `pcs config backup` and `pcs config restore` commands now support corosync and pacemaker authkeys ([rhbz#1165821], [rhbz#1176018]) ### Deprecated - `pcs cluster remote-node add` and `pcs cluster remote-node remove `commands have been deprecated in favor of `pcs cluster node add-guest` and `pcs cluster node remove-guest` commands ([rhbz#1386512]) ### Fixed - Fixed a bug which under specific conditions caused pcsd to crash on start when running under systemd ([ghissue#134]) - `pcs resource unmanage` now sets the unmanaged flag to primitive resources even if a clone or master/slave resource is specified. Thus the primitive resources will not become managed just by uncloning. This also prevents some discrepancies between disabled monitor operations and the unmanaged flag. ([rhbz#1303969]) - `pcs resource unmanage --monitor` now properly disables monitor operations even if a clone or master/slave resource is specified. ([rhbz#1303969]) - `--help` option now shows help just for the specified command. Previously the usage for a whole group of commands was shown. - Fixed a crash when `pcs cluster cib-push` is called with an explicit value of the `--wait` flag ([rhbz#1422667]) - Handle pcsd crash when an unusable address is set in `PCSD_BIND_ADDR` ([rhbz#1373614]) - Removal of a pacemaker remote resource no longer causes the respective remote node to be fenced ([rhbz#1390609]) ### Changed - Newly created clusters are set up to encrypt corosync communication ([rhbz#1165821], [ghissue#98]) [ghissue#98]: https://github.com/ClusterLabs/pcs/issues/98 [ghissue#134]: https://github.com/ClusterLabs/pcs/issues/134 [rhbz#1176018]: https://bugzilla.redhat.com/show_bug.cgi?id=1176018 [rhbz#1254984]: https://bugzilla.redhat.com/show_bug.cgi?id=1254984 [rhbz#1303969]: https://bugzilla.redhat.com/show_bug.cgi?id=1303969 [rhbz#1373614]: https://bugzilla.redhat.com/show_bug.cgi?id=1373614 [rhbz#1386114]: https://bugzilla.redhat.com/show_bug.cgi?id=1386114 [rhbz#1386512]: https://bugzilla.redhat.com/show_bug.cgi?id=1386512 [rhbz#1390609]: https://bugzilla.redhat.com/show_bug.cgi?id=1390609 [rhbz#1422667]: https://bugzilla.redhat.com/show_bug.cgi?id=1422667 [rhbz#1433016]: https://bugzilla.redhat.com/show_bug.cgi?id=1433016 [rhbz#1165821]: https://bugzilla.redhat.com/show_bug.cgi?id=1165821 ## [0.9.157] - 2017-04-10 ### Added - Resources in location constraints now may be specified by resource name patterns in addition to resource names ([rhbz#1362493]) - Proxy settings description in pcsd configuration file ([rhbz#1315627]) - Man page for pcsd ([rhbz#1378742]) - Pcs now allows to set `trace_ra` and `trace_file` options of `ocf:heartbeat` and `ocf:pacemaker` resources ([rhbz#1421702]) - `pcs resource describe` and `pcs stonith describe` commands now show all information about the specified agent if the `--full` flag is used - `pcs resource manage | unmanage` enables respectively disables monitor operations when the `--monitor` flag is specified ([rhbz#1303969]) - Support for shared storage in SBD. Currently, there is very limited support in web UI ([rhbz#1413958]) ### Changed - It is now possible to specify more than one resource in the `pcs resource enable` and `pcs resource disable` commands. ### Fixed - Python 3: pcs no longer spams stderr with error messages when communicating with another node - Stopping a cluster does not timeout too early and it generally works better even if the cluster is running Virtual IP resources ([rhbz#1334429]) - `pcs booth remove` now works correctly even if the booth resource group is disabled (another fix) ([rhbz#1389941]) - Fixed Cross-site scripting (XSS) vulnerability in web UI ([CVE-2017-2661], [rhbz#1434111]) - Pcs no longer allows to create a stonith resource based on an agent whose name contains a colon ([rhbz#1415080]) - Pcs command now launches Python interpreter with "sane" options (python -Es) ([rhbz#1328882]) - Clufter is now supported on both Python 2 and Python 3 ([rhbz#1428350]) - Do not colorize clufter output if saved to a file [CVE-2017-2661]: https://access.redhat.com/security/cve/CVE-2017-2661 [rhbz#1303969]: https://bugzilla.redhat.com/show_bug.cgi?id=1303969 [rhbz#1315627]: https://bugzilla.redhat.com/show_bug.cgi?id=1315627 [rhbz#1328882]: https://bugzilla.redhat.com/show_bug.cgi?id=1328882 [rhbz#1334429]: https://bugzilla.redhat.com/show_bug.cgi?id=1334429 [rhbz#1362493]: https://bugzilla.redhat.com/show_bug.cgi?id=1362493 [rhbz#1378742]: https://bugzilla.redhat.com/show_bug.cgi?id=1378742 [rhbz#1389941]: https://bugzilla.redhat.com/show_bug.cgi?id=1389941 [rhbz#1413958]: https://bugzilla.redhat.com/show_bug.cgi?id=1413958 [rhbz#1415080]: https://bugzilla.redhat.com/show_bug.cgi?id=1415080 [rhbz#1421702]: https://bugzilla.redhat.com/show_bug.cgi?id=1421702 [rhbz#1428350]: https://bugzilla.redhat.com/show_bug.cgi?id=1428350 [rhbz#1434111]: https://bugzilla.redhat.com/show_bug.cgi?id=1434111 ## [0.9.156] - 2017-02-10 ### Added - Fencing levels now may be targeted in CLI by a node name pattern or a node attribute in addition to a node name ([rhbz#1261116]) - `pcs cluster cib-push` allows to push a diff obtained internally by comparing CIBs in specified files ([rhbz#1404233], [rhbz#1419903]) - Added flags `--wait`, `--disabled`, `--group`, `--after`, `--before` into the command `pcs stonith create` - Added commands `pcs stonith enable` and `pcs stonith disable` - Command line option --request-timeout ([rhbz#1292858]) - Check whenever proxy is set when unable to connect to a node ([rhbz#1315627]) ### Changed - `pcs node [un]standby` and `pcs node [un]maintenance` is now atomic even if more than one node is specified ([rhbz#1315992]) - Restarting pcsd initiated from pcs is now a synchronous operation ([rhbz#1284404]) - Stopped bundling fonts used in pcsd web UI ([ghissue#125]) - In `pcs resource create` flags `--master` and `--clone` changed to keywords `master` and `clone` - libcurl is now used for node to node communication ### Fixed - When upgrading CIB to the latest schema version, check for minimal common version across the cluster ([rhbz#1389443]) - `pcs booth remove` now works correctly even if the booth resource group is disabled ([rhbz#1389941]) - Adding a node in a CMAN cluster does not cause the new node to be fenced immediately ([rhbz#1394846]) - Show proper error message when there is an HTTP communication failure ([rhbz#1394273]) - Fixed searching for files to remove in the `/var/lib` directory ([ghpull#119], [ghpull#120]) - Fixed messages when managing services (start, stop, enable, disable...) - Fixed disabling services on systemd systems when using instances ([rhbz#1389501]) - Fixed parsing commandline options ([rhbz#1404229]) - Pcs does not exit with a false error message anymore when pcsd-cli.rb outputs to stderr ([ghissue#124]) - Pcs now exits with an error when both `--all` and a list of nodes is specified in the `pcs cluster start | stop | enable | disable` commands ([rhbz#1339355]) - built-in help and man page fixes and improvements ([rhbz#1347335]) - In `pcs resource create` the flag `--clone` no longer steals arguments from the keywords `meta` and `op` ([rhbz#1395226]) - `pcs resource create` does not produce invalid cib when group id is already occupied with non-resource element ([rhbz#1382004]) - Fixed misbehavior of the flag `--master` in `pcs resource create` command ([rhbz#1378107]) - Fixed tacit acceptance of invalid resource operation in `pcs resource create` ([rhbz#1398562]) - Fixed misplacing metadata for disabling when running `pcs resource create` with flags `--clone` and `--disabled` ([rhbz#1402475]) - Fixed incorrect acceptance of the invalid attribute of resource operation in `pcs resource create` ([rhbz#1382597]) - Fixed validation of options of resource operations in `pcs resource create` ([rhbz#1390071]) - Fixed silent omission of duplicate options ([rhbz#1390066]) - Added more validation for resource agent names ([rhbz#1387670]) - Fixed network communication issues in pcsd when a node was specified by an IPv6 address - Fixed JS error in web UI when empty cluster status is received ([rhbz#1396462]) - Fixed sending user group in cookies from Python 3 - Fixed pcsd restart in Python 3 - Fixed parsing XML in Python 3 (caused crashes when reading resource agents metadata) ([rhbz#1419639]) - Fixed the recognition of the structure of a resource agent name that contains a systemd instance ([rhbz#1419661]) ### Removed - Ruby 1.8 and 1.9 is no longer supported due to bad libcurl support [ghissue#124]: https://github.com/ClusterLabs/pcs/issues/124 [ghissue#125]: https://github.com/ClusterLabs/pcs/issues/125 [ghpull#119]: https://github.com/ClusterLabs/pcs/pull/119 [ghpull#120]: https://github.com/ClusterLabs/pcs/pull/120 [rhbz#1261116]: https://bugzilla.redhat.com/show_bug.cgi?id=1261116 [rhbz#1284404]: https://bugzilla.redhat.com/show_bug.cgi?id=1284404 [rhbz#1292858]: https://bugzilla.redhat.com/show_bug.cgi?id=1292858 [rhbz#1315627]: https://bugzilla.redhat.com/show_bug.cgi?id=1315627 [rhbz#1315992]: https://bugzilla.redhat.com/show_bug.cgi?id=1315992 [rhbz#1339355]: https://bugzilla.redhat.com/show_bug.cgi?id=1339355 [rhbz#1347335]: https://bugzilla.redhat.com/show_bug.cgi?id=1347335 [rhbz#1378107]: https://bugzilla.redhat.com/show_bug.cgi?id=1378107 [rhbz#1382004]: https://bugzilla.redhat.com/show_bug.cgi?id=1382004 [rhbz#1382597]: https://bugzilla.redhat.com/show_bug.cgi?id=1382597 [rhbz#1387670]: https://bugzilla.redhat.com/show_bug.cgi?id=1387670 [rhbz#1389443]: https://bugzilla.redhat.com/show_bug.cgi?id=1389443 [rhbz#1389501]: https://bugzilla.redhat.com/show_bug.cgi?id=1389501 [rhbz#1389941]: https://bugzilla.redhat.com/show_bug.cgi?id=1389941 [rhbz#1390066]: https://bugzilla.redhat.com/show_bug.cgi?id=1390066 [rhbz#1390071]: https://bugzilla.redhat.com/show_bug.cgi?id=1390071 [rhbz#1394273]: https://bugzilla.redhat.com/show_bug.cgi?id=1394273 [rhbz#1394846]: https://bugzilla.redhat.com/show_bug.cgi?id=1394846 [rhbz#1395226]: https://bugzilla.redhat.com/show_bug.cgi?id=1395226 [rhbz#1396462]: https://bugzilla.redhat.com/show_bug.cgi?id=1396462 [rhbz#1398562]: https://bugzilla.redhat.com/show_bug.cgi?id=1398562 [rhbz#1402475]: https://bugzilla.redhat.com/show_bug.cgi?id=1402475 [rhbz#1404229]: https://bugzilla.redhat.com/show_bug.cgi?id=1404229 [rhbz#1404233]: https://bugzilla.redhat.com/show_bug.cgi?id=1404233 [rhbz#1419639]: https://bugzilla.redhat.com/show_bug.cgi?id=1419639 [rhbz#1419661]: https://bugzilla.redhat.com/show_bug.cgi?id=1419661 [rhbz#1419903]: https://bugzilla.redhat.com/show_bug.cgi?id=1419903 ## [0.9.155] - 2016-11-03 ### Added - Show daemon status in `pcs status` on non-systemd machines - SBD support for cman clusters ([rhbz#1380352]) - Alerts management in pcsd ([rhbz#1376480]) ### Changed - Get all information about resource and stonith agents from pacemaker. Pcs now supports the same set of agents as pacemaker does. ([rhbz#1262001], [ghissue#81]) - `pcs resource create` now exits with an error if more than one resource agent matches the specified short agent name instead of randomly selecting one of the agents - Allow to remove multiple alerts and alert recipients at once ### Fixed - When stopping a cluster with some of the nodes unreachable, stop the cluster completely on all reachable nodes ([rhbz#1380372]) - Fixed pcsd crash when rpam rubygem is installed ([ghissue#109]) - Fixed occasional crashes / failures when using locale other than en\_US.UTF8 ([rhbz#1387106]) - Fixed starting and stopping cluster services on systemd machines without the `service` executable ([ghissue#115]) [ghissue#81]: https://github.com/ClusterLabs/pcs/issues/81 [ghissue#109]: https://github.com/ClusterLabs/pcs/issues/109 [ghissue#115]: https://github.com/ClusterLabs/pcs/issues/115 [rhbz#1262001]: https://bugzilla.redhat.com/show_bug.cgi?id=1262001 [rhbz#1376480]: https://bugzilla.redhat.com/show_bug.cgi?id=1376480 [rhbz#1380352]: https://bugzilla.redhat.com/show_bug.cgi?id=1380352 [rhbz#1380372]: https://bugzilla.redhat.com/show_bug.cgi?id=1380372 [rhbz#1387106]: https://bugzilla.redhat.com/show_bug.cgi?id=1387106 ## [0.9.154] - 2016-09-21 - There is no change log for this and previous releases. We are sorry. - Take a look at git history if you are interested. pcs-0.10.4/COPYING000066400000000000000000000432541356771603100134350ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. pcs-0.10.4/MANIFEST.in000066400000000000000000000003311356771603100141250ustar00rootroot00000000000000include Makefile include COPYING include pcs/pcs.8 include pcs/bash_completion include pcsd/.bundle/config graft pcsd graft pcsd/vendor/cache prune pcsd/vendor/bundle prune pcsd/test recursive-exclude pcsd .gitignore pcs-0.10.4/Makefile000066400000000000000000000313541356771603100140400ustar00rootroot00000000000000# Compatibility with GNU/Linux [i.e. Debian] based distros UNAME_OS_GNU := $(shell if uname -o | grep -q "GNU/Linux" ; then echo true; else echo false; fi) DISTRO_DEBIAN := $(shell if [ -e /etc/debian_version ] ; then echo true; else echo false; fi) IS_DEBIAN=false DISTRO_DEBIAN_VER_8=false ifeq ($(UNAME_OS_GNU),true) ifeq ($(DISTRO_DEBIAN),true) IS_DEBIAN=true DISTRO_DEBIAN_VER_8 := $(shell if grep -q -i "^8\|jessie" /etc/debian_version ; then echo true; else echo false; fi) endif endif ifeq ($(IS_DEBIAN),true) EXTRA_SETUP_OPTS="--install-layout=deb" endif # Check for systemd presence ifeq ($(SYSTEMCTL_OVERRIDE),true) IS_SYSTEMCTL=true else ifeq ($(SYSTEMCTL_OVERRIDE),false) IS_SYSTEMCTL=false else IS_SYSTEMCTL = $(shell if [ -d /run/systemd/system ] || [ -d /var/run/systemd/system ] ; then echo true ; else echo false; fi) endif endif # VARIABLES OVERRIDABLE FROM OUTSIDE # ================================== ifndef PYTHON # some distros ship python3 as python PYTHON := $(shell which python3 || which python) endif # PYTHON_SITELIB is a path (relative to DESTDIR, e.g. # /usr/local/lib/python3.7/site-packages) where the command # `python setup.py install` puts pcs python files. The reasons to know the path # in makefile are that: # 1) There is a need to modify .../pcs/settings.py after installation (for # debian) and regenerate .pyc file aftermath. # 2) It is needed remove pcs directory from PYTHON_SITELIB after installation ifndef PYTHON_SITELIB # USE_PYTHON_PLATLIB is a flag that instructs installation process to use # platlib (e.g. /usr/local/lib64/python3.7/site-packages) instead of pureleb # (e.g. /usr/local/lib/python3.7/site-packages) as default value for # PYTHON_SITELIB. .../lib is preferred over .../lib64 because of hardcoded # path in pcs/settings.py (more in rhel specfile). ifeq ($(USE_PYTHON_PLATLIB), true) PYTHON_SITELIB=$(shell $(PYTHON) setup.py platlib | tail --lines=1) else PYTHON_SITELIB=$(shell $(PYTHON) setup.py purelib | tail --lines=1) endif endif # Check for an override for building gems ifndef BUILD_GEMS BUILD_GEMS=true endif ifndef PREFIX PREFIX=$(shell prefix=`$(PYTHON) -c "import sys; print(sys.prefix)"` || prefix="/usr"; echo $$prefix) endif ifndef SYSTEMD_DIR SYSTEMD_DIR=/usr/lib/systemd endif ifndef SYSTEMD_UNIT_DIR SYSTEMD_UNIT_DIR=${SYSTEMD_DIR}/system endif ifndef INIT_DIR INIT_DIR=/etc/init.d endif ifndef BASH_COMPLETION_DIR BASH_COMPLETION_DIR=/etc/bash_completion.d endif ifndef CONF_DIR ifeq ($(IS_DEBIAN),true) CONF_DIR = /etc/default else CONF_DIR = /etc/sysconfig endif endif ifndef SYSTEMD_SERVICE_FILE ifeq ($(IS_DEBIAN),true) SYSTEMD_SERVICE_FILE = pcsd/pcsd.service.debian else SYSTEMD_SERVICE_FILE = pcsd/pcsd.service endif endif ifndef LIB_DIR ifeq ($(IS_DEBIAN),true) LIB_DIR = /usr/share else LIB_DIR = ${PREFIX}/lib endif endif ifndef BUNDLE_LOCAL_DIR BUNDLE_LOCAL_DIR=./pcs/bundled/ endif ifndef SNMP_MIB_DIR SNMP_MIB_DIR=/share/snmp/mibs/ endif # INSTALLATION FINE DETAIL CONTROLL # ================================= # `BUNDLE_INSTALL_PYAGENTX=false` # to disable the default automatic pyagentx instalation # `BUNDLE_PYAGENTX_SRC_DIR=/path/to/pyagentx/sources` # to install pyagentx from the given location instead of using default # location for downloading sources and an installation # `BUNDLE_TORNADO_SRC_DIR=/path/to/tornado/sources` # to install tornado from specified location (tornado is not installed by # default) BUNDLE_PYAGENTX_VERSION="0.4.pcs.2" BUNDLE_PYAGENTX_URI="https://github.com/ondrejmular/pyagentx/archive/v${BUNDLE_PYAGENTX_VERSION}.tar.gz" ifndef BUNDLE_INSTALL_PYAGENTX BUNDLE_INSTALL_PYAGENTX=true endif BUNDLE_PYAGENTX_SRC_DOWNLOAD=false ifndef BUNDLE_PYAGENTX_SRC_DIR BUNDLE_PYAGENTX_SRC_DOWNLOAD=true endif ifneq ($(BUNDLE_INSTALL_PYAGENTX),true) BUNDLE_PYAGENTX_SRC_DOWNLOAD=false endif # There is BUNDLE_TO_INSTALL when BUNDLE_INSTALL_PYAGENTX is true or # BUNDLE_TORNADO_SRC_DIR is specified BUNDLE_TO_INSTALL=false ifeq ($(BUNDLE_INSTALL_PYAGENTX), true) BUNDLE_TO_INSTALL=true endif ifdef BUNDLE_TORNADO_SRC_DIR BUNDLE_TO_INSTALL=true endif # DESTINATION DIRS # ================ DEST_PYTHON_SITELIB = ${DESTDIR}${PYTHON_SITELIB} DEST_PYTHON_SCRIPT_DIR=${DESTDIR}$(shell $(PYTHON) setup.py scriptdir | tail --lines=1) DEST_MAN=${DESTDIR}/usr/share/man/man8 DEST_SYSTEMD_SYSTEM = ${DESTDIR}${SYSTEMD_UNIT_DIR} DEST_INIT = ${DESTDIR}${INIT_DIR} DEST_BASH_COMPLETION = ${DESTDIR}${BASH_COMPLETION_DIR} DEST_CONF = ${DESTDIR}${CONF_DIR} DEST_LIB = ${DESTDIR}${LIB_DIR} DEST_PREFIX = ${DESTDIR}${PREFIX} DEST_BUNDLE_LIB=${DEST_LIB}/pcs/bundled DEST_BUNDLE_LOCAL=$(shell readlink -f ${BUNDLE_LOCAL_DIR}) DEST_SNMP_MIB=${DEST_PREFIX}${SNMP_MIB_DIR} # OTHER # ===== pcsd_fonts = \ LiberationSans-Regular.ttf;LiberationSans:style=Regular \ LiberationSans-Bold.ttf;LiberationSans:style=Bold \ LiberationSans-BoldItalic.ttf;LiberationSans:style=BoldItalic \ LiberationSans-Italic.ttf;LiberationSans:style=Italic \ Overpass-Regular.ttf;Overpass:style=Regular \ Overpass-Bold.ttf;Overpass:style=Bold # 1 - an alternative file # 2 - a file which will be replaced by the alternative file define use-alternative-file rm -f $(2) install -m644 $(1) $(2) endef # 1 - sources directory - with python package sources # 2 - destination directory - python package will be installed into the # `packages` subdirectory of this destination directory define build_python_bundle cd $(1) && \ PYTHONPATH=$(2)/packages/ \ $(PYTHON) setup.py install --install-lib /packages/ --root $(2) endef # TARGETS # ======= bundle_pyagentx: ifeq ($(BUNDLE_PYAGENTX_SRC_DOWNLOAD),true) mkdir -p ${DEST_BUNDLE_LOCAL}/src $(eval BUNDLE_PYAGENTX_SRC_DIR=${DEST_BUNDLE_LOCAL}/src/pyagentx-${BUNDLE_PYAGENTX_VERSION}) rm -rf ${BUNDLE_PYAGENTX_SRC_DIR} wget -qO- ${BUNDLE_PYAGENTX_URI} | tar xvz -C ${DEST_BUNDLE_LOCAL}/src endif ifeq ($(BUNDLE_INSTALL_PYAGENTX),true) $(call build_python_bundle,${BUNDLE_PYAGENTX_SRC_DIR},$(PYAGENTX_LIB_DIR)) endif ifeq ($(BUNDLE_PYAGENTX_SRC_DOWNLOAD),true) rm -rf ${BUNDLE_PYAGENTX_SRC_DIR} endif install_bundled_libs: ifeq ($(BUNDLE_TO_INSTALL),true) install -d ${DEST_BUNDLE_LIB} endif ifdef BUNDLE_TORNADO_SRC_DIR $(call build_python_bundle,${BUNDLE_TORNADO_SRC_DIR},${DEST_BUNDLE_LIB}) endif $(MAKE) PYAGENTX_LIB_DIR=$(DEST_BUNDLE_LIB) bundle_pyagentx install_python_part: install_bundled_libs # make Python interpreter execution sane (via -Es flags) printf "[build]\nexecutable = $(PYTHON) -Es\n" > setup.cfg $(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS} # fix excessive script interpreting "executable" quoting with old setuptools: # https://github.com/pypa/setuptools/issues/188 # https://bugzilla.redhat.com/1353934 sed -i '1s|^\(#!\)"\(.*\)"$$|\1\2|' ${DEST_PYTHON_SCRIPT_DIR}/pcs sed -i '1s|^\(#!\)"\(.*\)"$$|\1\2|' ${DEST_PYTHON_SCRIPT_DIR}/pcs_snmp_agent sed -i '1s|^\(#!\)"\(.*\)"$$|\1\2|' ${DEST_PYTHON_SCRIPT_DIR}/pcs_internal rm setup.cfg mkdir -p ${DEST_PREFIX}/sbin/ mv ${DEST_PYTHON_SCRIPT_DIR}/pcs ${DEST_PREFIX}/sbin/pcs mv ${DEST_PYTHON_SCRIPT_DIR}/pcsd ${DEST_PREFIX}/sbin/pcsd install -D -m644 pcs/bash_completion ${DEST_BASH_COMPLETION}/pcs install -m644 -D pcs/pcs.8 ${DEST_MAN}/pcs.8 # pcs_internal mkdir -p ${DEST_LIB}/pcs/ mv ${DEST_PYTHON_SCRIPT_DIR}/pcs_internal ${DEST_LIB}/pcs/pcs_internal # pcs SNMP install mv ${DEST_PYTHON_SCRIPT_DIR}/pcs_snmp_agent ${DEST_LIB}/pcs/pcs_snmp_agent install -d ${DEST_SNMP_MIB} install -m 644 pcs/snmp/mibs/PCMK-PCS*-MIB.txt ${DEST_SNMP_MIB} install -m 644 -D pcs/snmp/pcs_snmp_agent.conf ${DEST_CONF}/pcs_snmp_agent install -m 644 -D pcs/snmp/pcs_snmp_agent.8 ${DEST_MAN}/pcs_snmp_agent.8 ifeq ($(IS_DEBIAN),true) $(call use-alternative-file,pcs/settings.py.debian,${DEST_PYTHON_SITELIB}/pcs/settings.py) endif $(PYTHON) -m compileall -fl ${DEST_PYTHON_SITELIB}/pcs/settings.py ifeq ($(IS_SYSTEMCTL),true) install -d ${DEST_SYSTEMD_SYSTEM} install -m 644 pcs/snmp/pcs_snmp_agent.service ${DEST_SYSTEMD_SYSTEM} endif install: install_python_part ifeq ($(BUILD_GEMS),true) make -C pcsd build_gems endif install -d -m 700 ${DESTDIR}/var/log/pcsd mkdir -p ${DEST_LIB}/pcsd/ cp -r pcsd ${DEST_LIB} install -m 644 -D pcsd/pcsd.conf ${DEST_CONF}/pcsd install -d ${DESTDIR}/etc/pam.d install -m 644 pcsd/pcsd.pam ${DESTDIR}/etc/pam.d/pcsd ifeq ($(IS_DEBIAN),true) $(call use-alternative-file,pcsd/settings.rb.debian,${DEST_LIB}/pcsd/settings.rb) endif ifeq ($(IS_DEBIAN)$(IS_SYSTEMCTL),truefalse) install -m 755 -D pcsd/pcsd.debian ${DEST_INIT}/pcsd else install -d ${DEST_SYSTEMD_SYSTEM} install -m 644 ${SYSTEMD_SERVICE_FILE} ${DEST_SYSTEMD_SYSTEM}/pcsd.service endif install -m 700 -d ${DESTDIR}/var/lib/pcsd install -m 644 -D pcsd/pcsd.logrotate ${DESTDIR}/etc/logrotate.d/pcsd install -m644 -D pcsd/pcsd.8 ${DEST_MAN}/pcsd.8 $(foreach font,$(pcsd_fonts),\ $(eval font_file = $(word 1,$(subst ;, ,$(font)))) \ $(eval font_def = $(word 2,$(subst ;, ,$(font)))) \ $(eval font_path = $(shell fc-match '--format=%{file}' '$(font_def)')) \ $(if $(font_path),ln -s -f $(font_path) ${DEST_LIB}/pcsd/public/css/$(font_file);,$(error Font $(font_def) not found)) \ ) # For running pcs_snmp_agent from a local (git clone) directory (without full # pcs installation) it is necessary to have pyagentx installed in expected # location inside the local directory. bundle_pyagentx_local: $(MAKE) PYAGENTX_LIB_DIR=$(DEST_BUNDLE_LOCAL) bundle_pyagentx uninstall: rm -f ${DEST_PREFIX}/sbin/pcs rm -rf ${DEST_PYTHON_SITELIB}/pcs rm -rf ${DEST_LIB}/pcsd rm -rf ${DEST_LIB}/pcs ifeq ($(IS_DEBIAN)$(IS_SYSTEMCTL),truefalse) rm -f ${DEST_INIT}/pcsd else rm -f ${DEST_SYSTEMD_SYSTEM}/pcsd.service rm -f ${DEST_SYSTEMD_SYSTEM}/pcs_snmp_agent.service endif rm -f ${DESTDIR}/etc/pam.d/pcsd rm -rf ${DESTDIR}/var/lib/pcsd rm -f ${DEST_SNMP_MIB}/PCMK-PCS*-MIB.txt newversion: $(PYTHON) newversion.py # CODE QUALITY # =========== install_pip: requirements.txt $(PYTHON) -m pip install --upgrade -r $< pylint_requirements: install_pip pylint: time $(PYTHON) -m pylint --rcfile pylintrc --persistent=n --reports=n --score=n --disable similarities pcs pcs_test get_lxml_stubs: mkdir -p $(BUNDLE_LOCAL_DIR)/stubs git clone https://github.com/JelleZijlstra/lxml-stubs.git $(BUNDLE_LOCAL_DIR)/stubs mypy_requirements: install_pip bundle_pyagentx_local get_lxml_stubs mypy: time $(PYTHON) -m mypy -p pcs python_static_code_analysis_reqirements: pylint_requirements mypy_requirements python_static_code_analysis: pylint mypy # RPM BUILD # ========= RPM_BUILD_DIR = rpm_build SPEC = pcs.spec GIT_COMMIT_HASH := $(shell git rev-parse HEAD) ifndef GIT_TAG ifeq ($(shell git describe --tag --exact-match > /dev/null; echo $$?),0) GIT_TAG := $(shell git describe --tag --exact-match) endif endif GIT_LAST_TAG := $(strip $(shell git describe --abbrev=0 --tags)) ifndef GIT_TAG DIST_VERSION_NAME := $(GIT_COMMIT_HASH) else DIST_VERSION_NAME := $(GIT_TAG) GIT_LAST_TAG := $(GIT_TAG) endif DIST_NAME := pcs-$(DIST_VERSION_NAME) DIST_ARCHIVE_NAME := $(DIST_NAME).tar.gz RPMBUILDOPTS = --define "_sourcedir $(PWD)/$(RPM_BUILD_DIR)" \ --define "_specdir $(PWD)/$(RPM_BUILD_DIR)" \ --define "_builddir $(PWD)/$(RPM_BUILD_DIR)" \ --define "_srcrpmdir $(PWD)/$(RPM_BUILD_DIR)" \ --define "_rpmdir $(PWD)/$(RPM_BUILD_DIR)" BUNDLE_CONGIG_FILE := $(RPM_BUILD_DIR)/pcsd-bundle-config $(RPM_BUILD_DIR): mkdir -p $@ clean: $(PYTHON) setup.py clean rm -rf $(RPM_BUILD_DIR) rm -f $(SPEC) rm -rf $(BUNDLE_LOCAL_DIR) rm -f pcs-*.tar.gz dist: clean rm -rf /tmp/$(DIST_NAME) mkdir -p /tmp/$(DIST_NAME) cp -r . /tmp/$(DIST_NAME) tar -zcf $(DIST_ARCHIVE_NAME) -C /tmp $(DIST_NAME) rm -rf /tmp/$(DIST_NAME) $(BUNDLE_CONGIG_FILE): $(RPM_BUILD_DIR) rm -f $@ echo '---' >> $@ echo 'BUNDLE_FROZEN: "true"' >> $@ echo 'BUNDLE_PATH: "vendor/bundle"' >> $@ echo 'BUNDLE_DISABLE_SHARED_GEMS: "true"' >> $@ echo "BUNDLE_BUILD: \"--with-ldflags='-Wl,-z,now -Wl,-z,relro'\"" >> $@ $(SPEC): $(SPEC).in rm -f $@-t $@ date="$(shell LC_ALL=C date "+%a %b %d %Y")" && \ gitversion="$(GIT_LAST_TAG)" && \ numcommit=`git rev-list $$gitversion..HEAD | wc -l` && \ gitcommit="$(GIT_COMMIT_HASH)" && \ sed \ -e "s#@VERSION@#$$gitversion#g" \ -e "s#@NUMCOMMIT@#$$numcommit#g" \ -e "s#@COMMIT@#$$gitcommit#g" \ -e "s#@DATE@#$$date#g" \ $< > $@-t; \ chmod a-w $@-t mv $@-t $@ sources: dist $(SPEC) $(RPM_BUILD_DIR) cd $(RPM_BUILD_DIR) && \ cp ../$(DIST_ARCHIVE_NAME) $(DIST_ARCHIVE_NAME) && \ cp ../$(SPEC) $(SPEC) && \ spectool -S $(SPEC) | sed -En "s/^[^ ]+ (.*)$$/\1/p" | grep "^http.*" | xargs -n 1 curl -OL $(MAKE) $(BUNDLE_CONGIG_FILE) srpm: sources cd $(RPM_BUILD_DIR) && \ rpmbuild $(RPMBUILDOPTS) --nodeps -bs $(SPEC) rpm: sources cd $(RPM_BUILD_DIR) && \ rpmbuild $(RPMBUILDOPTS) -ba $(SPEC) pcs-0.10.4/README.md000066400000000000000000000115361356771603100136570ustar00rootroot00000000000000## PCS - Pacemaker/Corosync Configuration System Pcs is a Corosync and Pacemaker configuration tool. It permits users to easily view, modify and create Pacemaker based clusters. Pcs contains pcsd, a pcs daemon, which operates as a remote server for pcs and provides a web UI. --- ### Pcs Versions There are two pcs branches currently: * master [![Build Status](https://travis-ci.org/ClusterLabs/pcs.svg?branch=master)](https://travis-ci.org/ClusterLabs/pcs) * This is where pcs-0.10 lives. * Clusters running Pacemaker 2.x on top of Corosync 3.x are supported. * The main development happens here. * pcs-0.9 [![Build Status](https://travis-ci.org/ClusterLabs/pcs.svg?branch=pcs-0.9)](https://travis-ci.org/ClusterLabs/pcs) * Clusters running Pacemaker 1.x on top of Corosync 2.x or Corosync 1.x with CMAN are supported. * This branch is in maintenance mode - bugs are being fixed but only a subset of new features lands here. --- ### Dependencies These are the runtime dependencies of pcs and pcsd: * python 3.6+ * python3-lxml * python3-pycurl * python3-setuptools * python3-pyOpenSSL (python3-openssl) * python3-tornado 6.x * ruby 2.2.0+ * killall (package psmisc) * openssl * corosync 3.x * pacemaker 2.x It is also recommended to have these: * python3-clufter * liberation fonts (package liberation-sans-fonts or fonts-liberation or fonts-liberation2) * overpass fonts (package overpass-fonts) --- ### Installation from Source Apart from the dependencies listed above, these are also required for installation: * python development files (package python3-devel) * ruby development files (package ruby-devel) * rubygems * rubygem bundler (package rubygem-bundler or ruby-bundler or bundler) * gcc * gcc-c++ * FFI development files (package libffi-devel or libffi-dev) * fontconfig * printf (package coreutils) * redhat-rpm-config (if you are using Fedora) * wget (to download bundled libraries) During the installation, all required rubygems are automatically downloaded and compiled. To install pcs and pcsd run the following in terminal: ```shell # tar -xzvf pcs-0.10.1.tar.gz # cd pcs-0.10.1 # make install # make install_pcsd ``` If you are using GNU/Linux with systemd, it is now time to: ```shell # systemctl daemon-reload ``` Start pcsd and make it start on boot: ```shell # systemctl start pcsd # systemctl enable pcsd ``` --- ### Packages Currently this is built into Fedora, RHEL and its clones and Debian and its derivates. * [Fedora package git repositories](https://src.fedoraproject.org/rpms/pcs) * [Current Fedora .spec](https://src.fedoraproject.org/rpms/pcs/blob/master/f/pcs.spec) * [Debian-HA project home page](https://wiki.debian.org/Debian-HA) --- ### Quick Start * **Authenticate cluster nodes** Set the same password for the `hacluster` user on all nodes. ```shell # passwd hacluster ``` To authenticate the nodes, run the following command on one of the nodes (replacing node1, node2, node3 with a list of nodes in your future cluster). Specify all your cluster nodes in the command. Make sure pcsd is running on all nodes. ```shell # pcs host auth node1 node2 node3 -u hacluster ``` * **Create a cluster** To create a cluster run the following command on one node (replacing cluster\_name with a name of your cluster and node1, node2, node3 with a list of nodes in the cluster). `--start` and `--enable` will start your cluster and configure the nodes to start the cluster on boot respectively. ```shell # pcs cluster setup cluster_name node1 node2 node3 --start --enable ``` * **Check the cluster status** After a few moments the cluster should startup and you can get the status of the cluster. ```shell # pcs status ``` * **Add cluster resources** After this you can add stonith agents and resources: ```shell # pcs -h stonith create ``` and ```shell # pcs -h resource create ``` --- ### Accessing the Web UI Apart from command line interface you can use web user interface to view and configure your cluster. To access the web UI open a browser to the following URL (replace nodename with an address of your node): ``` https://nodename:2224 ``` Login as the `hacluster` user. --- ### Further Documentation [ClusterLabs website](https://clusterlabs.org) is an excellent place to learn more about Pacemaker clusters. * [ClusterLabs quick start](https://clusterlabs.org/quickstart.html) * [Clusters from Scratch](https://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/2.0/html/Clusters_from_Scratch/index.html) * [ClusterLabs documentation page](https://clusterlabs.org/pacemaker/doc/) --- ### Inquiries If you have any bug reports or feature requests please feel free to open a github issue on the pcs project. Alternatively you can use ClusterLabs [users mailinglist](https://oss.clusterlabs.org/mailman/listinfo/users) which is also a great place to ask Pacemaker clusters related questions. pcs-0.10.4/mypy.ini000066400000000000000000000017211356771603100140720ustar00rootroot00000000000000[mypy] mypy_path = ./pcs/bundled/stubs:./pcs/bundled/packages # Modules and packages with full support have more strict checks enabled [mypy-pcs.lib.cib.resource.relations] disallow_untyped_defs = True [mypy-pcs.lib.commands.status] disallow_untyped_defs = True [mypy-pcs.common.pacemaker.*] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.cli.common.printable_tree] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.common.interface.*] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.cli.resource.relations] disallow_untyped_defs = True # We don't want to type check tests [mypy-pcs_test.*] ignore_errors = True # Modules with issues in typehinting: # TODO: fix [mypy-pcs.daemon.*] ignore_errors = True # External libraries [mypy-xml.dom.*] ignore_missing_imports = True [mypy-pyagentx.*] ignore_errors = True [mypy-clufter.*] ignore_missing_imports = True [mypy-OpenSSL] ignore_missing_imports = True pcs-0.10.4/newversion.py000066400000000000000000000032741356771603100151510ustar00rootroot00000000000000#!/usr/bin/python3 import sys import os import locale import datetime sys.path.insert( 0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "pcs") ) import settings locale.setlocale(locale.LC_ALL, ("en_US", "UTF-8")) # Get the current version, increment by 1, verify changes, git commit & tag pcs_version_split = settings.pcs_version.split('.') pcs_version_split[2] = str(int(pcs_version_split[2]) + 1) new_version = ".".join(pcs_version_split) print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' setup.py")) print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' pcs/settings_default.py")) print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' pcsd/bootstrap.rb")) print(os.system("sed -i 's/\#\# \[Unreleased\]/\#\# ["+new_version+"] - "+datetime.date.today().strftime('%Y-%m-%d')+"/' CHANGELOG.md")) def manpage_head(component, package="pcs"): return '.TH {component} "8" "{date}" "{package} {version}" "System Administration Utilities"'.format( component=component.upper(), date=datetime.date.today().strftime('%B %Y'), version=new_version, package=package, ) print(os.system("sed -i '1c " + manpage_head("pcs") + "' pcs/pcs.8")) print(os.system("sed -i '1c " + manpage_head("pcsd") + "' pcsd/pcsd.8")) print(os.system( "sed -i '1c {man_head}' pcs/snmp/pcs_snmp_agent.8".format( man_head=manpage_head("pcs_snmp_agent", package="pcs-snmp"), ) )) print(os.system("git diff")) print("Look good? (y/n)") choice = sys.stdin.read(1) if choice != "y": print("Ok, exiting") sys.exit(0) print(os.system("git commit -a -m 'Bumped to "+new_version+"'")) print(os.system("git tag "+new_version)) pcs-0.10.4/pcs.spec.in000066400000000000000000000351241356771603100144450ustar00rootroot00000000000000%global pcs_git_version @VERSION@ %global numcommit @NUMCOMMIT@ %global commit @COMMIT@ Name: pcs Version: %{pcs_git_version} Release: 99.git.%{numcommit}.%{commit}.1%{?dist} # https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses # GPLv2: pcs # ASL 2.0: tornado # MIT: handlebars License: GPLv2 and ASL 2.0 and MIT URL: https://github.com/ClusterLabs/pcs Summary: Pacemaker Configuration System %if "%{numcommit}" == "0" %global pcs_source_name %{name}-%{pcs_git_version} %else %global pcs_source_name %{name}-%{commit} %endif # ui_commit can be determined by hash, tag or branch %global ui_commit 0.1.1 %global ui_src_name pcs-web-ui-%{ui_commit} %global pcs_snmp_pkg_name pcs-snmp %global pyagentx_version 0.4.pcs.2 %global tornado_version 6.0.3 %global version_rubygem_backports 3.11.4 %global version_rubygem_ethon 0.11.0 %global version_rubygem_ffi 1.9.25 %global version_rubygem_json 2.1.0 %global version_rubygem_mustermann 1.0.3 %global version_rubygem_open4 1.3.4 %global version_rubygem_rack 2.0.6 %global version_rubygem_rack_protection 2.0.4 %global version_rubygem_rack_test 1.0.0 %global version_rubygem_sinatra 2.0.4 %global version_rubygem_tilt 2.0.9 # We do not use _libdir macro because upstream is not prepared for it. # Pcs does not include binaries and thus it should live in /usr/lib. Tornado # and gems include binaries and thus it should live in /usr/lib64. But the # path to tornado/gems is hardcoded in pcs sources. Modify hard links in pcs # sources is not the way since then rpmdiff complains that the same file has # different content in different architectures. %global pcs_libdir %{_prefix}/lib %global bundled_src_dir pcs/bundled %global pcsd_public_dir pcsd/public %global rubygem_cache_dir pcsd/vendor/cache %global rubygem_bundle_dir pcsd/vendor/bundle/ruby # mangling shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/test from /usr/bin/env ruby to #!/usr/bin/ruby #*** ERROR: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/test.ru has shebang which doesn't start with '/' (../../bin/rackup) #mangling shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/rackup_stub.rb from /usr/bin/env ruby to #!/usr/bin/ruby #*** WARNING: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/sample_rackup.ru is executable but has empty or no shebang, removing executable bit #*** WARNING: ./usr/lib/pcsd/vendor/bundle/ruby/gems/rack-2.0.5/test/cgi/lighttpd.conf is executable but has empty or no shebang, removing executable bit #*** ERROR: ambiguous python shebang in /usr/lib/pcsd/vendor/bundle/ruby/gems/ffi-1.9.25/ext/ffi_c/libffi/generate-darwin-source-and-headers.py: #!/usr/bin/env python. Change it to python3 (or python2) explicitly. %undefine __brp_mangle_shebangs # https://fedoraproject.org/wiki/Changes/Avoid_usr_bin_python_in_RPM_Build#Python_bytecompilation # Enforce python3 because bytecompilation of tornado produced warnings: # DEPRECATION WARNING: python2 invoked with /usr/bin/python. # Use /usr/bin/python3 or /usr/bin/python2 # /usr/bin/python will be removed or switched to Python 3 in the future. %global __python %{__python3} Source0: %{pcs_source_name}.tar.gz Source1: pcsd-bundle-config Source41: https://github.com/ondrejmular/pyagentx/archive/v%{pyagentx_version}/pyagentx-%{pyagentx_version}.tar.gz Source42: https://github.com/tornadoweb/tornado/archive/v%{tornado_version}/tornado-%{tornado_version}.tar.gz Source81: https://rubygems.org/downloads/backports-%{version_rubygem_backports}.gem Source82: https://rubygems.org/downloads/ethon-%{version_rubygem_ethon}.gem Source83: https://rubygems.org/downloads/ffi-%{version_rubygem_ffi}.gem Source84: https://rubygems.org/downloads/json-%{version_rubygem_json}.gem Source86: https://rubygems.org/downloads/mustermann-%{version_rubygem_mustermann}.gem Source87: https://rubygems.org/downloads/open4-%{version_rubygem_open4}.gem Source88: https://rubygems.org/downloads/rack-%{version_rubygem_rack}.gem Source89: https://rubygems.org/downloads/rack-protection-%{version_rubygem_rack_protection}.gem Source90: https://rubygems.org/downloads/rack-test-%{version_rubygem_rack_test}.gem Source91: https://rubygems.org/downloads/sinatra-%{version_rubygem_sinatra}.gem Source92: https://rubygems.org/downloads/tilt-%{version_rubygem_tilt}.gem Source100: https://github.com/idevat/pcs-web-ui/archive/%{ui_commit}/%{ui_src_name}.tar.gz Source101: https://github.com/idevat/pcs-web-ui/releases/download/%{ui_commit}/pcs-web-ui-node-modules-%{ui_commit}.tar.xz #printf from coreutils is used in makefile BuildRequires: coreutils BuildRequires: execstack BuildRequires: tar # python for pcs %if 0%{?fedora} >= 29 BuildRequires: python3 >= 3.6 BuildRequires: python3-setuptools %endif %if 0%{?rhel} >= 8 BuildRequires: platform-python BuildRequires: platform-python-setuptools %endif BuildRequires: python3-devel # gcc for compiling custom rubygems BuildRequires: gcc BuildRequires: gcc-c++ # ruby and gems for pcsd BuildRequires: ruby >= 2.2.0 BuildRequires: ruby-devel BuildRequires: rubygems # for post, preun and postun macros BuildRequires: systemd # pcsd fonts and font management tools for creating symlinks to fonts BuildRequires: fontconfig %if %{undefined suse_version} BuildRequires: liberation-sans-fonts BuildRequires: overpass-fonts %endif # for building web ui BuildRequires: npm # python and libraries for pcs, setuptools for pcs entrypoint %if 0%{?fedora} >= 29 Requires: python3 >= 3.6 Requires: python3-setuptools %endif %if 0%{?rhel} >= 8 Requires: platform-python Requires: platform-python-setuptools %endif Requires: python3-lxml Requires: python3-clufter => 0.70.0 Requires: python3-pycurl # ruby and gems for pcsd Requires: ruby >= 2.2.0 Requires: rubygems # for killall Requires: psmisc # for working with certificates (validation etc.) Requires: openssl Requires: python3-pyOpenSSL # cluster stack and related packages Requires: pacemaker >= 2.0.0 Requires: corosync >= 3.0 # pcs enables corosync encryption by default so we require libknet1-plugins-all Requires: libknet1-plugins-all # for post, preun and postun macros Requires(post): systemd Requires(preun): systemd Requires(postun): systemd # pam is used for authentication inside daemon (python ctypes) # more details: https://bugzilla.redhat.com/show_bug.cgi?id=1717113 Requires: pam # pcsd fonts Recommends: liberation-sans-fonts Recommends: overpass-fonts Provides: bundled(tornado) = %{tornado_version} Provides: bundled(backports) = %{version_rubygem_backports} Provides: bundled(ethon) = %{version_rubygem_ethon} Provides: bundled(ffi) = %{version_rubygem_ffi} Provides: bundled(json) = %{version_rubygem_json} Provides: bundled(mustermann) = %{version_rubygem_mustermann} Provides: bundled(open4) = %{version_rubygem_open4} Provides: bundled(rack) = %{version_rubygem_rack} Provides: bundled(rack) = %{version_rubygem_rack_protection} Provides: bundled(rack) = %{version_rubygem_rack_test} Provides: bundled(sinatra) = %{version_rubygem_sinatra} Provides: bundled(tilt) = %{version_rubygem_tilt} %description pcs is a corosync and pacemaker configuration tool. It permits users to easily view, modify and create pacemaker based clusters. # pcs-snmp package definition %package -n %{pcs_snmp_pkg_name} Group: System Environment/Base Summary: Pacemaker cluster SNMP agent # https://fedoraproject.org/wiki/Licensing:Main?rd=Licensing#Good_Licenses # GPLv2: pcs # BSD-2-Clause: pyagentx License: GPLv2 and BSD-2-Clause URL: https://github.com/ClusterLabs/pcs # tar for unpacking pyagetx source tar ball BuildRequires: tar Requires: pcs = %{version}-%{release} Requires: pacemaker Requires: net-snmp Provides: bundled(pyagentx) = %{pyagentx_version} %description -n %{pcs_snmp_pkg_name} SNMP agent that provides information about pacemaker cluster to the master agent (snmpd) %prep %autosetup -n %{pcs_source_name} # prepare dirs/files necessary for building web ui # inside SOURCE100 is only directory %%{ui_src_name} tar -xzf %SOURCE100 -C %{pcsd_public_dir} tar -xf %SOURCE101 -C %{pcsd_public_dir}/%{ui_src_name} # prepare dirs/files necessary for building all bundles # ----------------------------------------------------- # 1) configuration for rubygems mkdir -p pcsd/.bundle cp -f %SOURCE1 pcsd/.bundle/config # 2) rubygems sources mkdir -p pcsd/vendor/cache cp -f %SOURCE81 pcsd/vendor/cache cp -f %SOURCE82 pcsd/vendor/cache cp -f %SOURCE83 pcsd/vendor/cache cp -f %SOURCE84 pcsd/vendor/cache cp -f %SOURCE86 pcsd/vendor/cache cp -f %SOURCE87 pcsd/vendor/cache cp -f %SOURCE88 pcsd/vendor/cache cp -f %SOURCE89 pcsd/vendor/cache cp -f %SOURCE90 pcsd/vendor/cache cp -f %SOURCE91 pcsd/vendor/cache cp -f %SOURCE92 pcsd/vendor/cache # 3) dir for python bundles mkdir -p %{bundled_src_dir} # 4) sources for pyagentx tar -xzf %SOURCE41 -C %{bundled_src_dir} mv %{bundled_src_dir}/pyagentx-%{pyagentx_version} %{bundled_src_dir}/pyagentx cp %{bundled_src_dir}/pyagentx/LICENSE.txt pyagentx_LICENSE.txt cp %{bundled_src_dir}/pyagentx/CONTRIBUTORS.txt pyagentx_CONTRIBUTORS.txt cp %{bundled_src_dir}/pyagentx/README.md pyagentx_README.md # 5) sources for tornado tar -xzf %SOURCE42 -C %{bundled_src_dir} mv %{bundled_src_dir}/tornado-%{tornado_version} %{bundled_src_dir}/tornado cp %{bundled_src_dir}/tornado/LICENSE tornado_LICENSE cp %{bundled_src_dir}/tornado/README.rst tornado_README.rst %build %define debug_package %{nil} %install %if 0%{?fedora} >= 29 || 0%{?suse_version} > 1500 %define gem_install_params --no-document %else %define gem_install_params --no-rdoc --no-ri %endif # build bundled rubygems (in main install it is disabled by BUILD_GEMS=false) mkdir -p %{rubygem_bundle_dir} gem install \ --force --verbose -l --no-user-install %{gem_install_params} \ -i %{rubygem_bundle_dir} \ %{rubygem_cache_dir}/backports-%{version_rubygem_backports}.gem \ %{rubygem_cache_dir}/ethon-%{version_rubygem_ethon}.gem \ %{rubygem_cache_dir}/ffi-%{version_rubygem_ffi}.gem \ %{rubygem_cache_dir}/json-%{version_rubygem_json}.gem \ %{rubygem_cache_dir}/mustermann-%{version_rubygem_mustermann}.gem \ %{rubygem_cache_dir}/open4-%{version_rubygem_open4}.gem \ %{rubygem_cache_dir}/rack-%{version_rubygem_rack}.gem \ %{rubygem_cache_dir}/rack-protection-%{version_rubygem_rack_protection}.gem \ %{rubygem_cache_dir}/rack-test-%{version_rubygem_rack_test}.gem \ %{rubygem_cache_dir}/sinatra-%{version_rubygem_sinatra}.gem \ %{rubygem_cache_dir}/tilt-%{version_rubygem_tilt}.gem \ -- '--with-ldflags=-Wl,-z,relro -Wl,-z,ibt -Wl,-z,now -Wl,--gc-sections' \ '--with-cflags=-O2 -ffunction-sections' # We can remove files required for gem compilation rm -rf %{rubygem_bundle_dir}/gems/ffi-%{version_rubygem_ffi}/ext rm -rf %{rubygem_bundle_dir}/gems/json-%{version_rubygem_json}/ext # With this file there is "File is not stripped" problem during rpmdiff # See https://docs.engineering.redhat.com/display/HTD/rpmdiff-elf-stripping for fname in `find %{rubygem_bundle_dir}/extensions -type f -name "*.so"`; do strip ${fname} done # build web ui and put it to pcsd make -C %{pcsd_public_dir}/%{ui_src_name} build mv %{pcsd_public_dir}/%{ui_src_name}/build pcsd/public/ui rm -r %{pcsd_public_dir}/%{ui_src_name} # main pcs install make install \ DESTDIR=$RPM_BUILD_ROOT \ PREFIX=%{_prefix} \ SYSTEMD_UNIT_DIR=%{_unitdir} \ LIB_DIR=%{pcs_libdir} \ PYTHON=%{__python3} \ PYTHON_SITELIB=%{python3_sitelib} \ BASH_COMPLETION_DIR=%{_datadir}/bash-completion/completions \ BUNDLE_PYAGENTX_SRC_DIR=`readlink -f %{bundled_src_dir}/pyagentx` \ BUNDLE_TORNADO_SRC_DIR=`readlink -f %{bundled_src_dir}/tornado` \ BUILD_GEMS=false \ SYSTEMCTL_OVERRIDE=true \ hdrdir="%{_includedir}" \ rubyhdrdir="%{_includedir}" \ includedir="%{_includedir}" # With this file there is "File is not stripped" problem during rpmdiff # See https://docs.engineering.redhat.com/display/HTD/rpmdiff-elf-stripping for fname in `find ${RPM_BUILD_ROOT}%{pcs_libdir}/pcs/bundled/packages/tornado/ -type f -name "*.so"`; do strip ${fname} done #after the ruby gem compilation we do not need ruby gems in the cache rm -r -v $RPM_BUILD_ROOT%{pcs_libdir}/%{rubygem_cache_dir} pcsd_dir=$RPM_BUILD_ROOT%{pcs_libdir}/pcsd #remove pcsd tests, we do not distribute them in the rpm rm -r -v ${pcsd_dir}/test # remove javascript testing files rm -r -v ${pcsd_dir}/public/js/dev %post %systemd_post pcsd.service %post -n %{pcs_snmp_pkg_name} %systemd_post pcs_snmp_agent.service %preun %systemd_preun pcsd.service %preun -n %{pcs_snmp_pkg_name} %systemd_preun pcs_snmp_agent.service %postun %systemd_postun_with_restart pcsd.service %postun -n %{pcs_snmp_pkg_name} %systemd_postun_with_restart pcs_snmp_agent.service %files %doc CHANGELOG.md %doc README.md %doc tornado_README.rst %license tornado_LICENSE %license COPYING %{python3_sitelib}/pcs %{python3_sitelib}/pcs-*.egg-info %{_sbindir}/pcs %{_sbindir}/pcsd %{pcs_libdir}/pcs/pcs_internal %{pcs_libdir}/pcsd/* %{pcs_libdir}/pcsd/.bundle/config %{pcs_libdir}/pcs/bundled/packages/tornado* %{_unitdir}/pcsd.service %{_datadir}/bash-completion/completions/pcs %{_sharedstatedir}/pcsd %{_sysconfdir}/pam.d/pcsd %dir %{_var}/log/pcsd %config(noreplace) %{_sysconfdir}/logrotate.d/pcsd %config(noreplace) %{_sysconfdir}/sysconfig/pcsd %ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/cfgsync_ctl %ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/known-hosts %ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/pcsd.cookiesecret %ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/pcsd.crt %ghost %config(noreplace) %attr(0600,root,root) %{_sharedstatedir}/pcsd/pcsd.key %ghost %config(noreplace) %attr(0644,root,root) %{_sharedstatedir}/pcsd/pcs_settings.conf %ghost %config(noreplace) %attr(0644,root,root) %{_sharedstatedir}/pcsd/pcs_users.conf %{_mandir}/man8/pcs.* %{_mandir}/man8/pcsd.* %exclude %{pcs_libdir}/pcsd/*.debian %exclude %{pcs_libdir}/pcsd/pcsd.service %exclude %{pcs_libdir}/pcsd/pcsd.conf %exclude %{pcs_libdir}/pcsd/pcsd.8 %exclude %{pcs_libdir}/pcsd/public/js/dev/* %exclude %{pcs_libdir}/pcsd/Gemfile %exclude %{pcs_libdir}/pcsd/Gemfile.lock %exclude %{pcs_libdir}/pcsd/Makefile %exclude %{python3_sitelib}/pcs/bash_completion %exclude %{python3_sitelib}/pcs/pcs.8 %exclude %{python3_sitelib}/pcs/pcs %files -n %{pcs_snmp_pkg_name} %{pcs_libdir}/pcs/pcs_snmp_agent %{pcs_libdir}/pcs/bundled/packages/pyagentx* %{_unitdir}/pcs_snmp_agent.service %{_datadir}/snmp/mibs/PCMK-PCS*-MIB.txt %{_mandir}/man8/pcs_snmp_agent.* %config(noreplace) %{_sysconfdir}/sysconfig/pcs_snmp_agent %doc CHANGELOG.md %doc pyagentx_CONTRIBUTORS.txt %doc pyagentx_README.md %license COPYING %license pyagentx_LICENSE.txt %changelog * @DATE@ Autotools generated version - @VERSION@-99.git.@NUMCOMMIT@.@COMMIT@.1 - Autotools generated version pcs-0.10.4/pcs/000077500000000000000000000000001356771603100131575ustar00rootroot00000000000000pcs-0.10.4/pcs/COPYING000066400000000000000000000432541356771603100142220ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. pcs-0.10.4/pcs/__init__.py000066400000000000000000000000001356771603100152560ustar00rootroot00000000000000pcs-0.10.4/pcs/acl.py000066400000000000000000000173711356771603100143010ustar00rootroot00000000000000from pcs import ( prop, utils, ) from pcs.cli.common.errors import CmdLineInputError from pcs.common.tools import indent from pcs.lib.pacemaker.values import is_true def _print_list_of_objects(obj_list, transformation_fn): out = [] for obj in obj_list: out += transformation_fn(obj) if out: print("\n".join(out)) def show_acl_config(lib, argv, modifiers): """ Options: * -f - CIB file """ # TODO move to lib once lib supports cluster properties # enabled/disabled should be part of the structure returned # by lib.acl.get_config modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() properties = utils.get_set_properties( defaults=prop.get_default_properties() ) acl_enabled = properties.get("enable-acl", "").lower() if is_true(acl_enabled): print("ACLs are enabled") else: print("ACLs are disabled, run 'pcs acl enable' to enable") print() data = lib.acl.get_config() _print_list_of_objects(data.get("target_list", []), target_to_str) _print_list_of_objects(data.get("group_list", []), group_to_str) _print_list_of_objects(data.get("role_list", []), role_to_str) def acl_enable(lib, argv, modifiers): """ Options: * -f - CIB file """ # TODO move to lib once lib supports cluster properties modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() prop.set_property(lib, ["enable-acl=true"], modifiers.get_subset("-f")) def acl_disable(lib, argv, modifiers): """ Options: * -f - CIB file """ # TODO move to lib once lib supports cluster properties modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() prop.set_property(lib, ["enable-acl=false"], modifiers.get_subset("-f")) def user_create(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() user_name, role_list = argv[0], argv[1:] lib.acl.create_target(user_name, role_list) def user_delete(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 1: raise CmdLineInputError() lib.acl.remove_target(argv[0]) def group_create(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() group_name, role_list = argv[0], argv[1:] lib.acl.create_group(group_name, role_list) def group_delete(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 1: raise CmdLineInputError() lib.acl.remove_group(argv[0]) def argv_to_permission_info_list(argv): """ Commandline options: no options """ if len(argv) % 3 != 0: raise CmdLineInputError() #wrapping by list, #because in python3 zip() returns an iterator instead of a list #and the loop below makes iteration over it permission_info_list = list(zip( [permission.lower() for permission in argv[::3]], [scope_type.lower() for scope_type in argv[1::3]], argv[2::3] )) for permission, scope_type, dummy_scope in permission_info_list: if( permission not in ['read', 'write', 'deny'] or scope_type not in ['xpath', 'id'] ): raise CmdLineInputError() return permission_info_list def role_create(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() role_id = argv.pop(0) description = "" desc_key = 'description=' if argv and argv[0].startswith(desc_key) and len(argv[0]) > len(desc_key): description = argv.pop(0)[len(desc_key):] permission_info_list = argv_to_permission_info_list(argv) lib.acl.create_role(role_id, permission_info_list, description) def role_delete(lib, argv, modifiers): """ Options: * -f - CIB file * --autodelete - autodelete empty targets, groups """ modifiers.ensure_only_supported("-f", "--autodelete") if len(argv) != 1: raise CmdLineInputError() lib.acl.remove_role( argv[0], autodelete_users_groups=modifiers.get("--autodelete") ) def _role_assign_unassign(argv, keyword, not_specific_fn, user_fn, group_fn): """ Commandline options: no options """ argv_len = len(argv) if argv_len < 2: raise CmdLineInputError() if argv_len == 2: not_specific_fn(*argv) elif argv_len == 3: role_id, something, ug_id = argv if something == keyword: not_specific_fn(role_id, ug_id) elif something == "user": user_fn(role_id, ug_id) elif something == "group": group_fn(role_id, ug_id) else: raise CmdLineInputError() elif argv_len == 4 and argv[1] == keyword and argv[2] in ["group", "user"]: role_id, _, user_group, ug_id = argv if user_group == "user": user_fn(role_id, ug_id) else: group_fn(role_id, ug_id) else: raise CmdLineInputError() def role_assign(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") _role_assign_unassign( argv, "to", lib.acl.assign_role_not_specific, lib.acl.assign_role_to_target, lib.acl.assign_role_to_group ) def role_unassign(lib, argv, modifiers): """ Options: * -f - CIB file * --autodelete - autodelete empty targets, groups """ modifiers.ensure_only_supported("-f", "--autodelete") _role_assign_unassign( argv, "from", lambda role_id, ug_id: lib.acl.unassign_role_not_specific( role_id, ug_id, modifiers.get("--autodelete") ), lambda role_id, ug_id: lib.acl.unassign_role_from_target( role_id, ug_id, modifiers.get("--autodelete") ), lambda role_id, ug_id: lib.acl.unassign_role_from_group( role_id, ug_id, modifiers.get("--autodelete") ) ) def permission_add(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) < 4: raise CmdLineInputError() role_id, argv_next = argv[0], argv[1:] lib.acl.add_permission(role_id, argv_to_permission_info_list(argv_next)) def run_permission_delete(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 1: raise CmdLineInputError() lib.acl.remove_permission(argv[0]) def _target_group_to_str(type_name, obj): return ["{0}: {1}".format(type_name.title(), obj.get("id"))] + indent( [" ".join(["Roles:"] + obj.get("role_list", []))] ) def target_to_str(target): return _target_group_to_str("user", target) def group_to_str(group): return _target_group_to_str("group", group) def role_to_str(role): out = [] if role.get("description"): out.append("Description: {0}".format(role.get("description"))) out += map(_permission_to_str, role.get("permission_list", [])) return ["Role: {0}".format(role.get("id"))] + indent(out) def _permission_to_str(permission): out = ["Permission:", permission.get("kind")] if permission.get("xpath") is not None: out += ["xpath", permission.get("xpath")] elif permission.get("reference") is not None: out += ["id", permission.get("reference")] out.append("({0})".format(permission.get("id"))) return " ".join(out) pcs-0.10.4/pcs/alert.py000066400000000000000000000143341356771603100146450ustar00rootroot00000000000000import json from functools import partial from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import prepare_options, group_by_keywords from pcs.common.tools import indent parse_cmd_sections = partial(group_by_keywords, implicit_first_group_key="main") def ensure_only_allowed_options(parameter_dict, allowed_list): for arg, value in parameter_dict.items(): if arg not in allowed_list: raise CmdLineInputError( "Unexpected parameter '{0}={1}'".format(arg, value) ) def alert_add(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() sections = parse_cmd_sections(argv, set(["options", "meta"])) main_args = prepare_options(sections["main"]) ensure_only_allowed_options(main_args, ["id", "description", "path"]) lib.alert.create_alert( main_args.get("id", None), main_args.get("path", None), prepare_options(sections["options"]), prepare_options(sections["meta"]), main_args.get("description", None) ) def alert_update(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() alert_id = argv[0] sections = parse_cmd_sections(argv[1:], set(["options", "meta"])) main_args = prepare_options(sections["main"]) ensure_only_allowed_options(main_args, ["description", "path"]) lib.alert.update_alert( alert_id, main_args.get("path", None), prepare_options(sections["options"]), prepare_options(sections["meta"]), main_args.get("description", None) ) def alert_remove(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() lib.alert.remove_alert(argv) def recipient_add(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) * --force - allows not unique recipient values """ modifiers.ensure_only_supported("-f", "--force") if len(argv) < 2: raise CmdLineInputError() alert_id = argv[0] sections = parse_cmd_sections(argv[1:], set(["options", "meta"])) main_args = prepare_options(sections["main"]) ensure_only_allowed_options(main_args, ["description", "id", "value"]) lib.alert.add_recipient( alert_id, main_args.get("value", None), prepare_options(sections["options"]), prepare_options(sections["meta"]), recipient_id=main_args.get("id", None), description=main_args.get("description", None), allow_same_value=modifiers.get("--force") ) def recipient_update(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) * --force - allows not unique recipient values """ modifiers.ensure_only_supported("-f", "--force") if not argv: raise CmdLineInputError() recipient_id = argv[0] sections = parse_cmd_sections(argv[1:], set(["options", "meta"])) main_args = prepare_options(sections["main"]) ensure_only_allowed_options(main_args, ["description", "value"]) lib.alert.update_recipient( recipient_id, prepare_options(sections["options"]), prepare_options(sections["meta"]), recipient_value=main_args.get("value", None), description=main_args.get("description", None), allow_same_value=modifiers.get("--force") ) def recipient_remove(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() lib.alert.remove_recipient(argv) def _nvset_to_str(nvset_obj): # TODO duplicite to pcs.resource._nvpairs_strings key_val = { nvpair_obj["name"]: nvpair_obj["value"] for nvpair_obj in nvset_obj } output = [] for name, value in sorted(key_val.items()): if " " in value: value = f'"{value}"' output.append(f"{name}={value}") return " ".join(output) def __description_attributes_to_str(obj): output = [] if obj.get("description"): output.append("Description: {desc}".format(desc=obj["description"])) if obj.get("instance_attributes"): output.append("Options: {attributes}".format( attributes=_nvset_to_str(obj["instance_attributes"]) )) if obj.get("meta_attributes"): output.append("Meta options: {attributes}".format( attributes=_nvset_to_str(obj["meta_attributes"]) )) return output def _alert_to_str(alert): content = [] content.extend(__description_attributes_to_str(alert)) recipients = [] for recipient in alert.get("recipient_list", []): recipients.extend(_recipient_to_str(recipient)) if recipients: content.append("Recipients:") content.extend(indent(recipients, 1)) return ["Alert: {alert_id} (path={path})".format( alert_id=alert["id"], path=alert["path"] )] + indent(content, 1) def _recipient_to_str(recipient): return ["Recipient: {id} (value={value})".format( value=recipient["value"], id=recipient["id"] )] + indent(__description_attributes_to_str(recipient), 1) def print_alert_config(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() print("\n".join(alert_config_lines(lib))) def alert_config_lines(lib): lines = ["Alerts:"] alert_list = lib.alert.get_all_alerts() if alert_list: for alert in alert_list: lines.extend(indent(_alert_to_str(alert), 1)) else: lines.append(" No alerts defined") return lines def print_alerts_in_json(lib, argv, modifiers): """ This is used only by pcsd, will be removed in new architecture Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() print(json.dumps(lib.alert.get_all_alerts())) pcs-0.10.4/pcs/app.py000066400000000000000000000200701356771603100143100ustar00rootroot00000000000000import getopt import os import sys import logging from pcs import ( settings, usage, utils, ) from pcs.cli.common import ( capabilities, completion, errors, parse_args, routing, ) from pcs.cli.common.reports import process_library_reports from pcs.cli.routing import ( acl, alert, booth, client, cluster, config, constraint, host, node, pcsd, prop, qdevice, quorum, resource, status, stonith, ) from pcs.lib.errors import LibraryError def _non_root_run(argv_cmd): """ This function will run commands which has to be run as root for users which are not root. If it required to run such command as root it will do that by sending it to the local pcsd and then it will exit. """ # matching the commands both in here and in pcsd expects -o and --options # to be at the end of a command argv_and_options = argv_cmd[:] for option, value in utils.pcs_options.items(): if parse_args.is_option_expecting_value(option): argv_and_options.extend([option, value]) else: argv_and_options.append(option) # specific commands need to be run under root account, pass them to pcsd # don't forget to allow each command in pcsd.rb in "post /run_pcs do" root_command_list = [ ['cluster', 'auth', '...'], ['cluster', 'corosync', '...'], ['cluster', 'destroy', '...'], ['cluster', 'disable', '...'], ['cluster', 'enable', '...'], ['cluster', 'node', '...'], ['cluster', 'pcsd-status', '...'], ['cluster', 'start', '...'], ['cluster', 'stop', '...'], ['cluster', 'sync', '...'], # ['config', 'restore', '...'], # handled in config.config_restore ['host', 'auth', '...'], ['host', 'deauth', '...'], ['pcsd', 'deauth', '...'], ['pcsd', 'sync-certificates'], ["quorum", "device", "status", "..."], ["quorum", "status", "..."], ["status"], ['status', 'corosync', '...'], ['status', 'pcsd', '...'], ["status", "quorum", "..."], ["status", "status", "..."], ] for root_cmd in root_command_list: if ( (argv_and_options == root_cmd) or ( root_cmd[-1] == "..." and argv_and_options[:len(root_cmd)-1] == root_cmd[:-1] ) ): # handle interactivity of 'pcs cluster auth' if argv_and_options[0:2] in [["cluster", "auth"], ["host", "auth"]]: if "-u" not in utils.pcs_options: username = utils.get_terminal_input('Username: ') argv_and_options.extend(["-u", username]) if "-p" not in utils.pcs_options: password = utils.get_terminal_password() argv_and_options.extend(["-p", password]) # call the local pcsd err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( argv_and_options ) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) if std_out.strip(): print(std_out) if std_err.strip(): sys.stderr.write(std_err) sys.exit(exitcode) logging.basicConfig() usefile = False filename = "" def main(argv=None): # pylint: disable=global-statement # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements if completion.has_applicable_environment(os.environ): print(completion.make_suggestions( os.environ, usage.generate_completion_tree_from_usage() )) sys.exit() argv = argv if argv else sys.argv[1:] utils.subprocess_setup() global filename, usefile utils.pcs_options = {} # we want to support optional arguments for --wait, so if an argument # is specified with --wait (ie. --wait=30) then we use them waitsecs = None new_argv = [] for arg in argv: if arg.startswith("--wait="): tempsecs = arg.replace("--wait=", "") if tempsecs: waitsecs = tempsecs arg = "--wait" new_argv.append(arg) argv = new_argv try: pcs_options, dummy_argv = getopt.gnu_getopt( parse_args.filter_out_non_option_negative_numbers(argv), parse_args.PCS_SHORT_OPTIONS, parse_args.PCS_LONG_OPTIONS, ) except getopt.GetoptError as err: usage.main() print(err) if err.opt in {"V", "clone", "device", "watchdog"}: # Print error messages which point users to the changes section in # pcs manpage. # To be removed in the next significant version. print(f"Hint: {errors.HINT_SYNTAX_CHANGE}") sys.exit(1) argv = parse_args.filter_out_options(argv) full = False for option, dummy_value in pcs_options: if option == "--full": full = True break for opt, val in pcs_options: if not opt in utils.pcs_options: utils.pcs_options[opt] = val else: # If any options are a list then they've been entered twice which # isn't valid utils.err("%s can only be used once" % opt) if opt in ("-h", "--help"): if not argv: usage.main() sys.exit() else: argv = [argv[0], "help"] + argv[1:] elif opt == "-f": usefile = True filename = val utils.usefile = usefile utils.filename = filename elif opt == "--corosync_conf": settings.corosync_conf_file = val elif opt == "--version": print(settings.pcs_version) if full: print(" ".join( sorted([ feat["id"] for feat in capabilities.get_pcs_capabilities() ]) )) sys.exit() elif opt == "--fullhelp": usage.full_usage() sys.exit() elif opt == "--wait": utils.pcs_options[opt] = waitsecs elif opt == "--request-timeout": request_timeout_valid = False try: timeout = int(val) if timeout > 0: utils.pcs_options[opt] = timeout request_timeout_valid = True except ValueError: pass if not request_timeout_valid: utils.err( ( "'{0}' is not a valid --request-timeout value, use " "a positive integer" ).format(val) ) logger = logging.getLogger("pcs") logger.propagate = 0 logger.handlers = [] if (os.getuid() != 0) and (argv and argv[0] != "help") and not usefile: _non_root_run(argv) cmd_map = { "resource": resource.resource_cmd, "cluster": cluster.cluster_cmd, "stonith": stonith.stonith_cmd, "property": prop.property_cmd, "constraint": constraint.constraint_cmd, "acl": acl.acl_cmd, "status": status.status_cmd, "config": config.config_cmd, "pcsd": pcsd.pcsd_cmd, "node": node.node_cmd, "quorum": quorum.quorum_cmd, "qdevice": qdevice.qdevice_cmd, "alert": alert.alert_cmd, "booth": booth.booth_cmd, "host": host.host_cmd, "client": client.client_cmd, "help": lambda lib, argv, modifiers: usage.main(), } try: routing.create_router(cmd_map, [])( utils.get_library_wrapper(), argv, utils.get_input_modifiers() ) except LibraryError as e: process_library_reports(e.args) except errors.CmdLineInputError: if argv and argv[0] in cmd_map: usage.show(argv[0], []) else: usage.main() sys.exit(1) pcs-0.10.4/pcs/bash_completion000066400000000000000000000020051356771603100162450ustar00rootroot00000000000000# bash completion for pcs _pcs_completion(){ LENGTHS=() for WORD in "${COMP_WORDS[@]}"; do LENGTHS+=(${#WORD}) done COMPREPLY=( $( \ env COMP_WORDS="${COMP_WORDS[*]}" \ COMP_LENGTHS="${LENGTHS[*]}" \ COMP_CWORD=$COMP_CWORD \ PCS_AUTO_COMPLETE=1 pcs \ ) ) #examples what we get: #pcs #COMP_WORDS: pcs COMP_LENGTHS: 3 #pcs co #COMP_WORDS: pcs co COMP_LENGTHS: 3 2 # pcs config #COMP_WORDS: pcs config COMP_LENGTHS: 3 6 # pcs config " #COMP_WORDS: pcs config " COMP_LENGTHS: 3 6 4 # pcs config "'\\n #COMP_WORDS: pcs config "'\\n COMP_LENGTHS: 3 6 5'" } # -o default # Use readline's default filename completion if the compspec generates no # matches. # -F function # The shell function function is executed in the current shell environment. # When it finishes, the possible completions are retrieved from the value of # the COMPREPLY array variable. complete -o default -F _pcs_completion pcs pcs-0.10.4/pcs/cli/000077500000000000000000000000001356771603100137265ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/__init__.py000066400000000000000000000000001356771603100160250ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/booth/000077500000000000000000000000001356771603100150415ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/booth/__init__.py000066400000000000000000000000001356771603100171400ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/booth/command.py000066400000000000000000000205371356771603100170400ustar00rootroot00000000000000from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import group_by_keywords, prepare_options def config_setup(lib, arg_list, modifiers): """ create booth config Options: * --force - overwrite existing * --booth-conf - booth config file * --booth-key - booth authkey file * --name - name of a booth instance """ modifiers.ensure_only_supported( "--force", "--booth-conf", "--booth-key", "--name", ) peers = group_by_keywords( arg_list, set(["sites", "arbitrators"]), keyword_repeat_allowed=False ) if "sites" not in peers or not peers["sites"]: raise CmdLineInputError() lib.booth.config_setup( peers["sites"], peers["arbitrators"], instance_name=modifiers.get("--name"), overwrite_existing=modifiers.get("--force"), ) def config_destroy(lib, arg_list, modifiers): """ destroy booth config Options: --force - ignore config load issues --name - name of a booth instance """ modifiers.ensure_only_supported("--force", "--name") if arg_list: raise CmdLineInputError() lib.booth.config_destroy( instance_name=modifiers.get("--name"), ignore_config_load_problems=modifiers.get("--force") ) def config_show(lib, arg_list, modifiers): """ print booth config Options: * --name - name of a booth instace * --request-timeout - HTTP timeout for getting config from remote host """ modifiers.ensure_only_supported("--name", "--request-timeout") if len(arg_list) > 1: raise CmdLineInputError() node = None if not arg_list else arg_list[0] print( lib.booth.config_text( instance_name=modifiers.get("--name"), node_name=node ).decode("utf-8").rstrip() ) def config_ticket_add(lib, arg_list, modifiers): """ add ticket to current configuration Options: * --force * --booth-conf - booth config file * --booth-key - booth auth key file * --name - name of a booth instace """ modifiers.ensure_only_supported( "--force", "--booth-conf", "--name", "--booth-key" ) if not arg_list: raise CmdLineInputError lib.booth.config_ticket_add( arg_list[0], prepare_options(arg_list[1:]), instance_name=modifiers.get("--name"), allow_unknown_options=modifiers.get("--force") ) def config_ticket_remove(lib, arg_list, modifiers): """ add ticket to current configuration Options: * --booth-conf - booth config file * --booth-key - booth auth key file * --name - name of a booth instace """ modifiers.ensure_only_supported("--booth-conf", "--name", "--booth-key") if len(arg_list) != 1: raise CmdLineInputError lib.booth.config_ticket_remove( arg_list[0], instance_name=modifiers.get("--name"), ) def _ticket_operation(lib_call, arg_list, booth_name): """ Commandline options: * --name - name of a booth instance """ site_ip = None if len(arg_list) == 2: site_ip = arg_list[1] elif len(arg_list) != 1: raise CmdLineInputError() ticket = arg_list[0] lib_call(ticket, site_ip=site_ip, instance_name=booth_name) def ticket_revoke(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") _ticket_operation( lib.booth.ticket_revoke, arg_list, modifiers.get("--name") ) def ticket_grant(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") _ticket_operation( lib.booth.ticket_grant, arg_list, modifiers.get("--name") ) def create_in_cluster(lib, arg_list, modifiers): """ Options: * --force - allows to create booth resource even if its agent is not installed * -f - CIB file * --name - name of a booth instance """ modifiers.ensure_only_supported("--force", "-f", "--name") if len(arg_list) != 2 or arg_list[0] != "ip": raise CmdLineInputError() lib.booth.create_in_cluster( arg_list[1], instance_name=modifiers.get("--name"), allow_absent_resource_agent=modifiers.get("--force") ) def get_remove_from_cluster(resource_remove): #TODO resource_remove is provisional hack until resources are not moved to #lib def remove_from_cluster(lib, arg_list, modifiers): """ Options: * --force - allow remove of multiple * -f - CIB file * --name - name of a booth instance """ modifiers.ensure_only_supported("--force", "-f", "--name") if arg_list: raise CmdLineInputError() lib.booth.remove_from_cluster( resource_remove, instance_name=modifiers.get("--name"), allow_remove_multiple=modifiers.get("--force"), ) return remove_from_cluster def get_restart(resource_restart): #TODO resource_restart is provisional hack until resources are not moved to #lib def restart(lib, arg_list, modifiers): """ Options: * --force - allow multiple * --name - name of a booth instance """ modifiers.ensure_only_supported("--force", "--name") if arg_list: raise CmdLineInputError() lib.booth.restart( lambda resource_id_list: resource_restart( lib, resource_id_list, modifiers.get_subset("--force") ), instance_name=modifiers.get("--name"), allow_multiple=modifiers.get("--force"), ) return restart def sync(lib, arg_list, modifiers): """ Options: * --skip-offline - skip offline nodes * --name - name of a booth instance * --booth-conf - booth config file * --booth-key - booth authkey file * --request-timeout - HTTP timeout for file ditribution """ modifiers.ensure_only_supported( "--skip-offline", "--name", "--booth-conf", "--booth-key", "--request-timeout", ) if arg_list: raise CmdLineInputError() lib.booth.config_sync( instance_name=modifiers.get("--name"), skip_offline_nodes=modifiers.get("--skip-offline") ) def enable(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() lib.booth.enable_booth(instance_name=modifiers.get("--name")) def disable(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() lib.booth.disable_booth(instance_name=modifiers.get("--name")) def start(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() lib.booth.start_booth(instance_name=modifiers.get("--name")) def stop(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() lib.booth.stop_booth(instance_name=modifiers.get("--name")) def pull(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance * --request-timeout - HTTP timeout for file ditribution """ modifiers.ensure_only_supported("--name", "--request-timeout") if len(arg_list) != 1: raise CmdLineInputError() lib.booth.pull_config( arg_list[0], instance_name=modifiers.get("--name"), ) def status(lib, arg_list, modifiers): """ Options: * --name - name of booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() booth_status = lib.booth.get_status(instance_name=modifiers.get("--name")) if booth_status.get("ticket"): print("TICKETS:") print(booth_status["ticket"]) if booth_status.get("peers"): print("PEERS:") print(booth_status["peers"]) if booth_status.get("status"): print("DAEMON STATUS:") print(booth_status["status"]) pcs-0.10.4/pcs/cli/booth/console_report.py000066400000000000000000000111611356771603100204500ustar00rootroot00000000000000from pcs.common import report_codes as codes from pcs.cli.common.console_report import ( format_file_role, format_optional, format_plural, ) from pcs.common.tools import format_list def format_booth_default(value, template): return "" if value in ("booth", "", None) else template.format(value) def booth_config_accepted_by_node(info): desc = "" if info["name_list"] and info["name_list"] not in [["booth"]]: desc = "{_s} {_list}".format( _s=("s" if len(info["name_list"]) > 1 else ""), _list=format_list(info["name_list"]) ) return "{_node_info}Booth config{_desc} saved".format( _node_info=format_optional(info["node"], "{0}: "), _desc=desc, ) #Each value (a callable taking report_item.info) returns a message. #Force text will be appended if necessary. #If it is necessary to put the force text inside the string then the callable #must take the force_text parameter. CODE_TO_MESSAGE_BUILDER_MAP = { codes.BOOTH_LACK_OF_SITES: lambda info: "lack of sites for booth configuration (need 2 at least): sites {0}" .format(", ".join(info["sites"]) if info["sites"] else "missing") , codes.BOOTH_EVEN_PEERS_NUM: lambda info: "odd number of peers is required (entered {number} peers)" .format(**info) , codes.BOOTH_ADDRESS_DUPLICATION: lambda info: "duplicate address for booth configuration: {0}" .format(", ".join(info["addresses"])) , codes.BOOTH_CONFIG_UNEXPECTED_LINES: lambda info: "unexpected {_line_pl} in booth config{_file_path}:\n{_line_list}" .format( _file_path=format_optional(info["file_path"], " '{0}'"), _line_pl=format_plural(info["line_list"], "line"), _line_list="\n".join(info["line_list"]), **info ) , codes.BOOTH_INVALID_NAME: lambda info: "booth name '{name}' is not valid ({reason})" .format(**info) , codes.BOOTH_TICKET_NAME_INVALID: lambda info: "booth ticket name '{0}' is not valid, use alphanumeric chars or dash" .format(info["ticket_name"]) , codes.BOOTH_TICKET_DUPLICATE: lambda info: "booth ticket name '{ticket_name}' already exists in configuration" .format(**info) , codes.BOOTH_TICKET_DOES_NOT_EXIST: lambda info: "booth ticket name '{ticket_name}' does not exist" .format(**info) , codes.BOOTH_ALREADY_IN_CIB: lambda info: "booth instance '{name}' is already created as cluster resource" .format(**info) , codes.BOOTH_NOT_EXISTS_IN_CIB: lambda info: "booth instance '{name}' not found in cib" .format(**info) , codes.BOOTH_CONFIG_IS_USED: lambda info: "booth instance '{0}' is used{1}".format( info["name"], " {0}".format(info["detail"]) if info["detail"] else "", ) , codes.BOOTH_MULTIPLE_TIMES_IN_CIB: lambda info: "found more than one booth instance '{name}' in cib" .format(**info) , codes.BOOTH_CONFIG_DISTRIBUTION_STARTED: lambda info: "Sending booth configuration to cluster nodes..." , codes.BOOTH_CONFIG_ACCEPTED_BY_NODE: booth_config_accepted_by_node, codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR: lambda info: "Unable to save booth config{_desc} on node '{node}': {reason}".format( _desc=format_booth_default(info["name"], " '{0}'"), **info ) , codes.BOOTH_FETCHING_CONFIG_FROM_NODE: lambda info: "Fetching booth config{desc} from node '{node}'...".format( desc=format_booth_default(info["config"], " '{0}'"), **info ) , codes.BOOTH_DAEMON_STATUS_ERROR: lambda info: "unable to get status of booth daemon: {reason}".format(**info) , codes.BOOTH_TICKET_STATUS_ERROR: "unable to get status of booth tickets", codes.BOOTH_PEERS_STATUS_ERROR: "unable to get status of booth peers", codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP: lambda info: "cannot determine local site ip, please specify site parameter" , codes.BOOTH_TICKET_OPERATION_FAILED: lambda info: ( "unable to {operation} booth ticket '{ticket_name}'" " for site '{site_ip}', reason: {reason}" ).format(**info) , codes.BOOTH_UNSUPPORTED_FILE_LOCATION: lambda info: ( "{_file_role} '{file_path}' is outside of supported booth config " "directory '{expected_dir}', ignoring the file" ).format( _file_role=format_file_role(info["file_type_code"]), **info ) , } pcs-0.10.4/pcs/cli/booth/env.py000066400000000000000000000060161356771603100162060ustar00rootroot00000000000000from pcs.common import ( file as pcs_file, file_type_codes, ) from pcs.cli.common import console_report from pcs.cli.file import metadata from pcs.lib import reports from pcs.lib.errors import LibraryError def middleware_config(config_path, key_path): if config_path and not key_path: raise console_report.error( "When --booth-conf is specified, " "--booth-key must be specified as well" ) if key_path and not config_path: raise console_report.error( "When --booth-key is specified, " "--booth-conf must be specified as well" ) is_mocked_environment = config_path and key_path if is_mocked_environment: config_file = pcs_file.RawFile( metadata.for_file_type( file_type_codes.BOOTH_CONFIG, config_path ) ) key_file = pcs_file.RawFile( metadata.for_file_type( file_type_codes.BOOTH_KEY, key_path ) ) def create_booth_env(): try: config_data = config_file.read() if config_file.exists() else None key_data = key_file.read() if key_file.exists() else None # TODO write custom error handling, do not use pcs.lib specific code # and LibraryError except pcs_file.RawFileError as e: raise LibraryError( reports.file_io_error( e.metadata.file_type_code, e.action, e.reason, file_path=e.metadata.path, ) ) return { "config_data": config_data, "key_data": key_data, "key_path": key_path, } def flush(modified_env): if not is_mocked_environment: return if not modified_env: #TODO now this would not happen #for more information see comment in #pcs.cli.common.lib_wrapper.lib_env_to_cli_env raise console_report.error("Error during library communication") try: key_file.write( modified_env["key_file"]["content"], can_overwrite=True ) config_file.write( modified_env["config_file"]["content"], can_overwrite=True ) # TODO write custom error handling, do not use pcs.lib specific code # and LibraryError except pcs_file.RawFileError as e: raise LibraryError( reports.file_io_error( e.metadata.file_type_code, e.action, e.reason, file_path=e.metadata.path, ) ) def apply(next_in_line, env, *args, **kwargs): env.booth = create_booth_env() if is_mocked_environment else {} result_of_next = next_in_line(env, *args, **kwargs) if is_mocked_environment: flush(env.booth["modified_env"]) return result_of_next return apply pcs-0.10.4/pcs/cli/cluster/000077500000000000000000000000001356771603100154075ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/cluster/__init__.py000066400000000000000000000000001356771603100175060ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/cluster/command.py000066400000000000000000000121251356771603100174000ustar00rootroot00000000000000from pcs.cli.resource.parse_args import( parse_create_simple as parse_resource_create_args ) from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import prepare_options def _node_add_remote_separate_name_and_addr(arg_list): """ Commandline options: no options """ node_name = arg_list[0] if len(arg_list) == 1: node_addr = None rest_args = [] elif "=" in arg_list[1] or arg_list[1] in ["op", "meta"]: node_addr = None rest_args = arg_list[1:] else: node_addr = arg_list[1] rest_args = arg_list[2:] return node_name, node_addr, rest_args def node_add_remote(lib, arg_list, modifiers): """ Options: * --wait * --force - allow incomplete distribution of files, allow pcmk remote service to fail * --skip-offline - skip offline nodes * --request-timeout - HTTP request timeout * --no-default-ops - do not use default operations For tests: * --corosync_conf * -f """ modifiers.ensure_only_supported( "--wait", "--force", "--skip-offline", "--request-timeout", "--corosync_conf", "-f", "--no-default-ops", ) if not arg_list: raise CmdLineInputError() node_name, node_addr, rest_args = _node_add_remote_separate_name_and_addr( arg_list ) parts = parse_resource_create_args(rest_args) force = modifiers.get("--force") lib.remote_node.node_add_remote( node_name, node_addr, parts["op"], parts["meta"], parts["options"], skip_offline_nodes=modifiers.get("--skip-offline"), allow_incomplete_distribution=force, allow_pacemaker_remote_service_fail=force, allow_invalid_operation=force, allow_invalid_instance_attributes=force, use_default_operations=not modifiers.get("--no-default-ops"), wait=modifiers.get("--wait"), ) def create_node_remove_remote(remove_resource): def node_remove_remote(lib, arg_list, modifiers): """ Options: * --force - allow multiple nodes removal, allow pcmk remote service to fail, don't stop a resource before its deletion (this is side effect of old resource delete command used here) * --skip-offline - skip offline nodes * --request-timeout - HTTP request timeout For tests: * --corosync_conf * -f """ modifiers.ensure_only_supported( "--force", "--skip-offline", "--request-timeout", "--corosync_conf", "-f", ) if len(arg_list) != 1: raise CmdLineInputError() lib.remote_node.node_remove_remote( arg_list[0], remove_resource, skip_offline_nodes=modifiers.get("--skip-offline"), allow_remove_multiple_nodes=modifiers.get("--force"), allow_pacemaker_remote_service_fail=modifiers.get("--force"), ) return node_remove_remote def node_add_guest(lib, arg_list, modifiers): """ Options: * --wait * --force - allow incomplete distribution of files, allow pcmk remote service to fail * --skip-offline - skip offline nodes * --request-timeout - HTTP request timeout For tests: * --corosync_conf * -f """ modifiers.ensure_only_supported( "--wait", "--force", "--skip-offline", "--request-timeout", "--corosync_conf", "-f", ) if len(arg_list) < 2: raise CmdLineInputError() node_name = arg_list[0] resource_id = arg_list[1] meta_options = prepare_options(arg_list[2:]) lib.remote_node.node_add_guest( node_name, resource_id, meta_options, skip_offline_nodes=modifiers.get("--skip-offline"), allow_incomplete_distribution=modifiers.get("--force"), allow_pacemaker_remote_service_fail=modifiers.get("--force"), wait=modifiers.get("--wait"), ) def node_remove_guest(lib, arg_list, modifiers): """ Options: * --wait * --force - allow multiple nodes removal, allow pcmk remote service to fail * --skip-offline - skip offline nodes * --request-timeout - HTTP request timeout For tests: * --corosync_conf * -f """ modifiers.ensure_only_supported( "--wait", "--force", "--skip-offline", "--request-timeout", "--corosync_conf", "-f", ) if len(arg_list) != 1: raise CmdLineInputError() lib.remote_node.node_remove_guest( arg_list[0], skip_offline_nodes=modifiers.get("--skip-offline"), allow_remove_multiple_nodes=modifiers.get("--force"), allow_pacemaker_remote_service_fail=modifiers.get("--force"), wait=modifiers.get("--wait"), ) def node_clear(lib, arg_list, modifiers): """ Options: * --force - allow to clear a cluster node """ modifiers.ensure_only_supported("--force") if len(arg_list) != 1: raise CmdLineInputError() lib.cluster.node_clear( arg_list[0], allow_clear_cluster_node=modifiers.get("--force") ) pcs-0.10.4/pcs/cli/common/000077500000000000000000000000001356771603100152165ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/common/__init__.py000066400000000000000000000000001356771603100173150ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/common/capabilities.py000066400000000000000000000026311356771603100202230ustar00rootroot00000000000000import os.path from textwrap import dedent from lxml import etree from pcs import settings from pcs.cli.common.console_report import error from pcs.common.tools import xml_fromstring def get_capabilities_definition(): """ Read and parse capabilities file The point is to return all data in python structures for further processing. """ filename = os.path.join(settings.pcsd_exec_location, "capabilities.xml") try: with open(filename, mode="r") as file_: capabilities_xml = xml_fromstring(file_.read()) except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e: raise error( "Cannot read capabilities definition file '{0}': '{1}'" .format(filename, str(e)) ) capabilities = [] for feat_xml in capabilities_xml.findall(".//capability"): feat = dict(feat_xml.attrib) desc = feat_xml.find("./description") # dedent and strip remove indentation in the XML file feat["description"] = "" if desc is None else dedent(desc.text).strip() capabilities.append(feat) return capabilities def get_pcs_capabilities(): """ Get pcs capabilities form the capabilities file """ return [ { "id": feat["id"], "description": feat["description"], } for feat in get_capabilities_definition() if feat["in-pcs"] == "1" ] pcs-0.10.4/pcs/cli/common/completion.py000066400000000000000000000057041356771603100177470ustar00rootroot00000000000000def has_applicable_environment(environment): """ dict environment - very likely os.environ """ return bool( all( key in environment for key in ["COMP_WORDS", "COMP_LENGTHS", "COMP_CWORD", "PCS_AUTO_COMPLETE"] ) and environment['PCS_AUTO_COMPLETE'].strip() not in ('0', '') and environment['COMP_CWORD'].isdigit() ) def make_suggestions(environment, suggestion_tree): """ dict environment - very likely os.environ dict suggestion_tree - {'acl': {'role': {'create': ...}}}... """ if not has_applicable_environment(environment): raise EnvironmentError("Environment is not completion read") try: typed_word_list = _split_words( environment["COMP_WORDS"], environment["COMP_LENGTHS"].split(" "), ) except EnvironmentError: return "" return "\n".join(_find_suggestions( suggestion_tree, typed_word_list, int(environment['COMP_CWORD']) )) def _split_words(joined_words, word_lengths): cursor_position = 0 words_string_len = len(joined_words) word_list = [] for length in word_lengths: if not length.isdigit(): raise EnvironmentError( "Length of word '{0}' is not digit".format(length) ) next_position = cursor_position + int(length) if next_position > words_string_len: raise EnvironmentError( "Expected lengths are bigger than word lengths" ) if( next_position != words_string_len and not joined_words[next_position].isspace() ): raise EnvironmentError("Words separator is not expected space") word_list.append(joined_words[cursor_position:next_position]) cursor_position = next_position + 1 if words_string_len > next_position: raise EnvironmentError("Expected lengths are smaller then word lengths") return word_list def _find_suggestions(suggestion_tree, typed_word_list, word_under_cursor_idx): if not 1 <= word_under_cursor_idx <= len(typed_word_list): return [] if len(typed_word_list) == word_under_cursor_idx: #not started type the last word yet word_under_cursor = '' else: word_under_cursor = typed_word_list[word_under_cursor_idx] words_for_current_cursor_position = _get_subcommands( suggestion_tree, typed_word_list[1:word_under_cursor_idx] ) return [ word for word in words_for_current_cursor_position if word.startswith(word_under_cursor) ] def _get_subcommands(suggestion_tree, previous_subcommand_list): subcommand_tree = suggestion_tree for subcommand in previous_subcommand_list: if subcommand not in subcommand_tree: return [] subcommand_tree = subcommand_tree[subcommand] return sorted(list(subcommand_tree.keys())) pcs-0.10.4/pcs/cli/common/console_report.py000066400000000000000000002237171356771603100206410ustar00rootroot00000000000000# pylint: disable=too-many-lines from collections import defaultdict from collections.abc import Iterable from functools import partial import sys from pcs.common import ( file_type_codes, report_codes as codes, ) from pcs.common.file import RawFileError from pcs.common.fencing_topology import TARGET_TYPE_ATTRIBUTE from pcs.common.tools import ( format_list, indent, ) INSTANCE_SUFFIX = "@{0}" NODE_PREFIX = "{0}: " _type_translation = { "acl_group": "ACL group", "acl_permission": "ACL permission", "acl_role": "ACL role", "acl_target": "ACL user", # Pacemaker-2.0 deprecated masters. Masters are now called promotable # clones. We treat masters as clones. Do not report we were doing something # with a master, say we were doing it with a clone instead. "master": "clone", "primitive": "resource", } _type_articles = { "ACL group": "an", "ACL user": "an", "ACL role": "an", "ACL permission": "an", } _file_operation_translation = { RawFileError.ACTION_CHMOD: "change permissions of", RawFileError.ACTION_CHOWN: "change ownership of", RawFileError.ACTION_READ: "read", RawFileError.ACTION_REMOVE: "remove", RawFileError.ACTION_WRITE: "write", } _file_role_translation = { file_type_codes.BOOTH_CONFIG: "Booth configuration", file_type_codes.BOOTH_KEY: "Booth key", file_type_codes.COROSYNC_AUTHKEY: "Corosync authkey", file_type_codes.PACEMAKER_AUTHKEY: "Pacemaker authkey", file_type_codes.PCSD_ENVIRONMENT_CONFIG: "pcsd configuration", file_type_codes.PCSD_SSL_CERT: "pcsd SSL certificate", file_type_codes.PCSD_SSL_KEY: "pcsd SSL key", file_type_codes.PCS_KNOWN_HOSTS: "known-hosts", file_type_codes.PCS_SETTINGS_CONF: "pcs configuration", } _file_role_to_option_translation = { file_type_codes.BOOTH_CONFIG: "--booth-conf", file_type_codes.BOOTH_KEY: "--booth-key", file_type_codes.CIB: "-f", file_type_codes.COROSYNC_CONF: "--corosync_conf", } def warn(message): sys.stdout.write(format_message(message, "Warning: ")) def format_message(message, prefix): return "{0}{1}\n".format(prefix, message) def error(message): sys.stderr.write(format_message(message, "Error: ")) return SystemExit(1) def format_optional(value, template, empty_case=""): # Number 0 is considered False which does not suit our needs so we check # for it explicitly. Beware that False == 0 is true, so we must have an # additional check for that (bool is a subclass of int). if ( value or (isinstance(value, int) and not isinstance(value, bool) and value == 0) ): return template.format(value) return empty_case def _is_multiple(what): """ Return True if 'what' does not mean one item, False otherwise iterable/int what -- this will be counted """ retval = False if isinstance(what, int): retval = abs(what) != 1 elif not isinstance(what, str): try: retval = len(what) != 1 except TypeError: pass return retval def _add_s(word): """ add "s" or "es" to the word based on its ending string word -- word where "s" or "es" should be added """ if ( word[-1:] in ("s", "x", "o") or word[-2:] in ("ss", "sh", "ch") ): return word + "es" return word + "s" def format_plural(depends_on, singular, plural=None): """ Takes the singular word form and returns its plural form if depends_on is not equal to one/contains one item iterable/int/string depends_on -- if number (of items) isn't equal to one, returns plural string singular -- singular word (like: is, do, node) string plural -- optional irregular plural form """ common_plurals = { "is": "are", "has": "have", "does": "do", } if not _is_multiple(depends_on): return singular if plural: return plural if singular in common_plurals: return common_plurals[singular] return _add_s(singular) def format_fencing_level_target(target_type, target_value): if target_type == TARGET_TYPE_ATTRIBUTE: return "{0}={1}".format(target_value[0], target_value[1]) return target_value def format_file_action(action): return _file_operation_translation.get(action, action) def format_file_role(role): return _file_role_translation.get(role, role) def is_iterable_not_str(value): return isinstance(value, Iterable) and not isinstance(value, str) def service_operation_started(operation, info): return "{operation} {service}{instance_suffix}...".format( operation=operation, instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX), **info ) def service_operation_error(operation, info): return ( "{node_prefix}Unable to {operation} {service}{instance_suffix}:" " {reason}" ).format( operation=operation, instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX), node_prefix=format_optional(info["node"], NODE_PREFIX), **info ) def service_operation_success(operation, info): return "{node_prefix}{service}{instance_suffix} {operation}".format( operation=operation, instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX), node_prefix=format_optional(info["node"], NODE_PREFIX), **info ) def service_operation_skipped(operation, info): return ( "{node_prefix}not {operation} {service}{instance_suffix}: {reason}" ).format( operation=operation, instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX), node_prefix=format_optional(info["node"], NODE_PREFIX), **info ) def type_to_string(type_name, article=False): if not type_name: return "" # get a translation or make a type_name a string translated = _type_translation.get(type_name, "{0}".format(type_name)) if not article: return translated return "{article} {type}".format( article=_type_articles.get(translated, "a"), type=translated ) def typelist_to_string(type_list, article=False): if not type_list: return "" # use set to drop duplicate items: # * master is translated to clone # * i.e. "clone, master" is translated to "clone, clone" # * so we want to drop the second clone new_list = sorted({ # get a translation or make a type_name a string _type_translation.get(type_name, "{0}".format(type_name)) for type_name in type_list }) types = "/".join(new_list) if not article: return types return "{article} {types}".format( article=_type_articles.get(new_list[0], "a"), types=types ) def skip_reason_to_string(reason): translate = { "not_live_cib": "the command does not run on a live cluster (e.g. -f " "was used)" , "unreachable": "pcs is unable to connect to the node(s)", } return translate.get(reason, reason) def stdout_stderr_to_string(stdout, stderr, prefix=""): new_lines = [prefix] if prefix else [] for line in stdout.splitlines() + stderr.splitlines(): if line.strip(): new_lines.append(line) return "\n".join(new_lines) def id_belongs_to_unexpected_type(info): return "'{id}' is not {expected_type}".format( id=info["id"], expected_type=typelist_to_string(info["expected_types"], article=True) ) def object_with_id_in_unexpected_context(info): context_type = type_to_string(info["expected_context_type"]) if info.get("expected_context_id", ""): context = "{_expected_context_type} '{expected_context_id}'".format( _expected_context_type=context_type, **info ) else: context = "'{_expected_context_type}'".format( _expected_context_type=context_type, ) return "{_type} '{id}' exists but does not belong to {_context}".format( _context=context, _type=type_to_string(info["type"]), **info ) def id_not_found(info): desc = format_optional(typelist_to_string(info["expected_types"]), "{0} ") if not info["context_type"] or not info["context_id"]: return "{desc}'{id}' does not exist".format(desc=desc, id=info["id"]) return ( "there is no {desc}'{id}' in the {context_type} '{context_id}'".format( desc=desc, id=info["id"], context_type=info["context_type"], context_id=info["context_id"], ) ) def resource_running_on_nodes(info): role_label_map = { "Started": "running", } state_info = {} for state, node_list in info["roles_with_nodes"].items(): state_info.setdefault( role_label_map.get(state, state.lower()), [] ).extend(node_list) return "resource '{resource_id}' is {detail_list}".format( resource_id=info["resource_id"], detail_list="; ".join(sorted([ "{run_type} on node{s} {node_list}".format( run_type=run_type, s="s" if len(node_list) > 1 else "", node_list=format_list(node_list) ) for run_type, node_list in state_info.items() ])) ) def invalid_options(info): template = "invalid {_desc}option{_plural_options} {_option_names_list}," if not info["allowed"] and not info["allowed_patterns"]: template += " there are no options allowed" elif not info["allowed_patterns"]: template += " allowed option{_plural_allowed} {_allowed_values}" elif not info["allowed"]: template += ( " allowed are options matching patterns: {_allowed_patterns_values}" ) else: template += ( " allowed option{_plural_allowed} {_allowed_values}" " and" " options matching patterns: {_allowed_patterns_values}" ) return template.format( _desc=format_optional(info["option_type"], "{0} "), _allowed_values=format_list(info["allowed"]), _allowed_patterns_values=format_list(info["allowed_patterns"]), _option_names_list=format_list(info["option_names"]), _plural_options=("s:" if len(info["option_names"]) > 1 else ""), _plural_allowed=("s are:" if len(info["allowed"]) > 1 else " is"), **info ) def invalid_option_value(info): if info["cannot_be_empty"]: template = "{option_name} cannot be empty" elif info["forbidden_characters"]: template = ( "{option_name} cannot contain {forbidden_characters} characters" ) else: template = "'{option_value}' is not a valid {option_name} value" if info["allowed_values"]: template += ", use {_hint}" return template.format( _hint=( # "allowed_values" value is overloaded: # * it can be a list -> it expreses possible option values # * it can be a string -> it is a textual description of the value format_list(info["allowed_values"]) if is_iterable_not_str(info["allowed_values"]) else info["allowed_values"] ), **info ) def corosync_config_cannot_save_invalid_names_values(info): prefix = "Cannot save corosync.conf containing " if ( not info["section_name_list"] and not info["attribute_name_list"] and not info["attribute_value_pairs"] ): return prefix + "invalid section names, option names or option values" parts = [] if info["section_name_list"]: parts.append( "invalid section name(s): " + format_list(info["section_name_list"]) ) if info["attribute_name_list"]: parts.append( "invalid option name(s): " + format_list(info["attribute_name_list"]) ) if info["attribute_value_pairs"]: pairs = ", ".join([ f"'{value}' (option '{name}')" for name, value in info["attribute_value_pairs"] ]) parts.append("invalid option value(s): " + pairs) return prefix + "; ".join(parts) def corosync_bad_node_addresses_count(info): if info["min_count"] == info["max_count"]: template = ( "{max_count} address{_s_allowed} must be specified for a node, " "{actual_count} address{_s_specified} specified{_node_desc}" ) else: template = ( "At least {min_count} and at most {max_count} address{_s_allowed} " "must be specified for a node, {actual_count} " "address{_s_specified} specified{_node_desc}" ) node_template = " for node '{}'" return template.format( _node_desc=( format_optional(info["node_name"], node_template) or format_optional(info["node_index"], node_template) ), _s_allowed=("es" if info["max_count"] > 1 else ""), _s_specified=("es" if info["actual_count"] > 1 else ""), **info ) def corosync_node_address_count_mismatch(info): count_node = defaultdict(list) for node_name, count in info["node_addr_count"].items(): count_node[count].append(node_name) parts = ["All nodes must have the same number of addresses"] # List most common number of addresses first. for count, nodes in sorted( count_node.items(), key=lambda pair: len(pair[1]), reverse=True ): parts.append( "node{s} {nodes} {have} {count} address{es}".format( s=("s" if len(nodes) > 1 else ""), nodes=format_list(nodes), have=("have" if len(nodes) > 1 else "has"), count=count, es=("es" if count > 1 else "") )) return "; ".join(parts) def corosync_link_does_not_exist_cannot_update(info): template = "Cannot set options for non-existent link '{link_number}'" if info.get("existing_link_list"): template += ", existing links: {_link_list}" return template.format( _link_list=format_list(info["existing_link_list"]), **info ) if info.get("link_count"): template += ", {link_count} link{_s_are} defined starting with link 0" return template.format( _s_are=("s are" if info["link_count"] > 1 else " is"), **info ) return template.format(**info) def service_version_mismatch(info): version_host = defaultdict(list) for host_name, version in info["hosts_version"].items(): version_host[version].append(host_name) parts = [ "Hosts do not have the same version of '{}'".format(info["service"]) ] # List most common versions first. for version, hosts in sorted( version_host.items(), key=lambda pair: len(pair[1]), reverse=True ): parts.append( "host{s} {hosts} {have} version {version}".format( s=("s" if len(hosts) > 1 else ""), hosts=format_list(hosts), have=("have" if len(hosts) > 1 else "has"), version=version )) return "; ".join(parts) def resource_move_ban_clear_master_resource_not_promotable(info): return ( "when specifying --master you must use the promotable clone id{_id}" .format( _id=format_optional(info["promotable_id"], " ({0})"), ) ) def resource_move_ban_pcmk_success(info): new_lines = [] for line in info["stdout"].splitlines() + info["stderr"].splitlines(): if not line.strip(): continue line = line.replace( "WARNING: Creating rsc_location constraint", "Warning: Creating location constraint" ) line = line.replace( " using the clear option or by editing the CIB with an " "appropriate tool", "" ) new_lines.append(line) return "\n".join(new_lines) def build_node_description(node_types): if not node_types: return "Node" label = "{0} node".format if isinstance(node_types, str): return label(node_types) if len(node_types) == 1: return label(node_types[0]) return "nor " + " or ".join([label(ntype) for ntype in node_types]) #Each value (a callable taking report_item.info) returns a message. #Force text will be appended if necessary. #If it is necessary to put the force text inside the string then the callable #must take the force_text parameter. CODE_TO_MESSAGE_BUILDER_MAP = { codes.EMPTY_RESOURCE_SET_LIST: "Resource set list is empty", codes.REQUIRED_OPTIONS_ARE_MISSING: lambda info: "required {desc}option{s} {option_names_list} {are} missing" .format( desc=format_optional(info["option_type"], "{0} "), option_names_list=format_list(info["option_names"]), s=("s" if len(info["option_names"]) > 1 else ""), are=( "are" if len(info["option_names"]) > 1 else "is" ) ) , codes.PREREQUISITE_OPTION_MUST_BE_ENABLED_AS_WELL: lambda info: ( "If {_opt_desc}option '{option_name}' is enabled, " "{_pre_desc}option '{prerequisite_name}' must be enabled as well" ).format( _opt_desc=format_optional(info.get("option_type"), "{0} "), _pre_desc=format_optional(info.get("prerequisite_type"), "{0} "), **info ) , codes.PREREQUISITE_OPTION_MUST_BE_DISABLED: lambda info: ( "If {_opt_desc}option '{option_name}' is enabled, " "{_pre_desc}option '{prerequisite_name}' must be disabled" ).format( _opt_desc=format_optional(info.get("option_type"), "{0} "), _pre_desc=format_optional(info.get("prerequisite_type"), "{0} "), **info ) , codes.PREREQUISITE_OPTION_MUST_NOT_BE_SET: lambda info: ( "Cannot set {_opt_desc}option '{option_name}' because " "{_pre_desc}option '{prerequisite_name}' is already set" ).format( _opt_desc=format_optional(info.get("option_type"), "{0} "), _pre_desc=format_optional(info.get("prerequisite_type"), "{0} "), **info ) , codes.PREREQUISITE_OPTION_IS_MISSING: lambda info: ( "If {opt_desc}option '{option_name}' is specified, " "{pre_desc}option '{prerequisite_name}' must be specified as well" ).format( opt_desc=format_optional(info.get("option_type"), "{0} "), pre_desc=format_optional(info.get("prerequisite_type"), "{0} "), **info ) , codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING: lambda info: "{desc}option {option_names_list} has to be specified" .format( desc=format_optional(info.get("option_type"), "{0} "), option_names_list=" or ".join(sorted([ "'{0}'".format(name) for name in info["option_names"] ])), ) , codes.INVALID_OPTIONS: invalid_options, codes.INVALID_OPTION_VALUE: invalid_option_value, codes.INVALID_OPTION_TYPE: lambda info: #value on key "allowed_types" is overloaded: # * it can be a list - then it express possible option types # * it can be a string - then it is verbal description of the type "specified {option_name} is not valid, use {_hint}" .format( _hint=( format_list(info["allowed_types"]) if is_iterable_not_str(info["allowed_types"]) else info["allowed_types"] ), **info ) , codes.INVALID_USERDEFINED_OPTIONS: lambda info: ( "invalid {_desc}option{_plural_options} {_option_names_list}, " "{_desc}options may contain {allowed_characters} characters only" ).format( _desc=format_optional(info["option_type"], "{0} "), _option_names_list=format_list(info["option_names"]), _plural_options=("s:" if len(info["option_names"]) > 1 else ""), **info ) , codes.DEPRECATED_OPTION: lambda info: ( "{_desc}option '{option_name}' is deprecated and should not be " "used, use {_hint} instead" ).format( _desc=format_optional(info["option_type"], "{0} "), _hint=( ", ".join(sorted(info["replaced_by"])) ), **info ) , codes.MUTUALLY_EXCLUSIVE_OPTIONS: lambda info: # "{desc}options {option_names} are muttually exclusive".format( "Only one of {desc}options {option_names} can be used".format( desc=format_optional(info["option_type"], "{0} "), option_names=( format_list(sorted(info["option_names"])[:-1]) + " and '{0}'".format(sorted(info["option_names"])[-1]) ) ) , codes.EMPTY_ID: lambda info: "{id_description} cannot be empty" .format(**info) , codes.INVALID_CIB_CONTENT: lambda info: "invalid cib:\n{report}{_more_verbose}" .format( _more_verbose=format_optional( info["can_be_more_verbose"], "\n\nUse --full for more details." ), **info ) , codes.INVALID_ID: lambda info: ( "invalid {id_description} '{id}', '{invalid_character}' " "is not a valid {desc}character for a {id_description}" ).format( desc="first " if info["is_first_char"] else "", **info ) , codes.INVALID_TIMEOUT_VALUE: lambda info: "'{timeout}' is not a valid number of seconds to wait" .format(**info) , codes.INVALID_SCORE: lambda info: "invalid score '{score}', use integer or INFINITY or -INFINITY" .format(**info) , codes.MULTIPLE_SCORE_OPTIONS: "multiple score options cannot be specified", codes.RUN_EXTERNAL_PROCESS_STARTED: lambda info: "Running: {command}\nEnvironment:{env_part}\n{stdin_part}".format( stdin_part=format_optional( info["stdin"], "--Debug Input Start--\n{0}\n--Debug Input End--\n" ), env_part=( "" if not info["environment"] else "\n" + "\n".join([ " {0}={1}".format(key, val) for key, val in sorted(info["environment"].items()) ]) ), **info ) , codes.RUN_EXTERNAL_PROCESS_FINISHED: lambda info: ( "Finished running: {command}\n" "Return value: {return_value}\n" "--Debug Stdout Start--\n" "{stdout}\n" "--Debug Stdout End--\n" "--Debug Stderr Start--\n" "{stderr}\n" "--Debug Stderr End--\n" ).format(**info) , codes.RUN_EXTERNAL_PROCESS_ERROR: lambda info: "unable to run command {command}: {reason}" .format(**info) , codes.NODE_COMMUNICATION_RETRYING: lambda info: ( "Unable to connect to '{node}' via address '{failed_address}' and " "port '{failed_port}'. Retrying request '{request}' via address " "'{next_address}' and port '{next_port}'" ).format(**info) , codes.NODE_COMMUNICATION_NO_MORE_ADDRESSES: lambda info: "Unable to connect to '{node}' via any of its addresses".format(**info) , codes.NODE_COMMUNICATION_DEBUG_INFO: lambda info: ( "Communication debug info for calling: {target}\n" "--Debug Communication Info Start--\n" "{data}\n" "--Debug Communication Info End--\n" ).format(**info) , codes.NODE_COMMUNICATION_STARTED: lambda info: "Sending HTTP Request to: {target}\n{data_part}".format( data_part=format_optional( info["data"], "--Debug Input Start--\n{0}\n--Debug Input End--\n" ), **info ) , codes.NODE_COMMUNICATION_FINISHED: lambda info: ( "Finished calling: {target}\n" "Response Code: {response_code}\n" "--Debug Response Start--\n" "{response_data}\n" "--Debug Response End--\n" ).format(**info) , codes.NODE_COMMUNICATION_NOT_CONNECTED: lambda info: "Unable to connect to {node} ({reason})" .format(**info) , codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED: lambda info: ( "Unable to authenticate to {node} ({reason})," " try running 'pcs host auth {node}'" ) .format(**info) , codes.NODE_COMMUNICATION_ERROR_PERMISSION_DENIED: lambda info: "{node}: Permission denied ({reason})" .format(**info) , codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND: lambda info: "{node}: Unsupported command ({reason}), try upgrading pcsd" .format(**info) , codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL: lambda info: "{node}: {reason}" .format(**info) , codes.NODE_COMMUNICATION_ERROR: lambda info: "Error connecting to {node} ({reason})" .format(**info) , codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT: lambda info: "Unable to connect to {node} ({reason})" .format(**info) , codes.NODE_COMMUNICATION_ERROR_TIMED_OUT: lambda info: ( "{node}: Connection timeout, try setting higher timeout in " "--request-timeout option ({reason})" ).format(**info) , codes.NODE_COMMUNICATION_PROXY_IS_SET: "Proxy is set in environment variables, try disabling it" , codes.DEFAULTS_CAN_BE_OVERRIDEN: "Defaults do not apply to resources which override them with their " "own defined values" , codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED: "Sending updated corosync.conf to nodes..." , codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE: lambda info: "{node}: Succeeded" .format(**info) , codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR: lambda info: "{node}: Unable to set corosync config" .format(**info) , codes.COROSYNC_NOT_RUNNING_CHECK_STARTED: "Checking corosync is not running on nodes..." , codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR: lambda info: "{node}: Unable to check if corosync is not running" .format(**info) , codes.COROSYNC_NOT_RUNNING_ON_NODE: lambda info: "{node}: corosync is not running" .format(**info) , codes.COROSYNC_RUNNING_ON_NODE: lambda info: "{node}: corosync is running" .format(**info) , codes.COROSYNC_QUORUM_GET_STATUS_ERROR: lambda info: "{_node}Unable to get quorum status: {reason}" .format( _node=format_optional(info["node"], "{}: "), **info ) , codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR: lambda info: "Unable to set expected votes: {reason}" .format(**info) , codes.COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC: "No exec_NAME options are specified, so heuristics are effectively " "disabled" , codes.COROSYNC_CONFIG_RELOADED: lambda info: "{_node}Corosync configuration reloaded".format( _node=format_optional(info["node"], "{}: "), **info ) , codes.COROSYNC_CONFIG_RELOAD_ERROR: lambda info: "{_node}Unable to reload corosync configuration: {reason}" .format( _node=format_optional(info["node"], "{}: "), **info, ) , codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE: lambda info: ( "{node}: Corosync is not running, therefore reload of the corosync " "configuration is not possible" ).format(**info) , codes.UNABLE_TO_READ_COROSYNC_CONFIG: lambda info: "Unable to read {path}: {reason}" .format(**info) , codes.PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_AFTER_OPENING_BRACE: "Unable to parse corosync config: extra characters after {" , codes .PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_BEFORE_OR_AFTER_CLOSING_BRACE: "Unable to parse corosync config: extra characters before or after }" , codes.PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE: "Unable to parse corosync config: a line is not opening or closing " "a section or key: value" , codes.PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE: "Unable to parse corosync config: missing closing brace" , codes.PARSE_ERROR_COROSYNC_CONF_MISSING_SECTION_NAME_BEFORE_OPENING_BRACE: "Unable to parse corosync config: missing a section name before {" , codes.PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE: "Unable to parse corosync config: unexpected closing brace" , codes.PARSE_ERROR_COROSYNC_CONF: "Unable to parse corosync config" , codes.COROSYNC_CONFIG_CANNOT_SAVE_INVALID_NAMES_VALUES: corosync_config_cannot_save_invalid_names_values , codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES: lambda info: "Some nodes are missing names in corosync.conf, " + ("unable to continue" if info["fatal"] else "those nodes were omitted") , codes.COROSYNC_CONFIG_NO_NODES_DEFINED: "No nodes found in corosync.conf", codes.COROSYNC_ADDRESS_IP_VERSION_WRONG_FOR_LINK: lambda info: ( "Address '{address}' cannot be used in {_link} " "because the link uses {expected_address_type} addresses" ).format( _link=format_optional(info["link_number"], "link '{}'", "the link"), **info ) , codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT: corosync_bad_node_addresses_count , codes.COROSYNC_IP_VERSION_MISMATCH_IN_LINKS: lambda info: ( "Using both IPv4 and IPv6 on one link is not allowed; please, use " "either IPv4 or IPv6{_links}" ).format( _links=format_optional( ( format_list(info["link_numbers"]) if info["link_numbers"] else "" ), " on link(s): {}" ) ) , codes.COROSYNC_LINK_NUMBER_DUPLICATION: lambda info: "Link numbers must be unique, duplicate link numbers: {_nums}".format( _nums=format_list(info["link_number_list"]) ) , codes.NODE_ADDRESSES_ALREADY_EXIST: lambda info: ( "Node address{_es} {_addrs} {_are} already used by existing nodes; " "please, use other address{_es}" ).format( _addrs=format_list(info["address_list"]), _es=("es" if len(info["address_list"]) > 1 else ""), _are=("are" if len(info["address_list"]) > 1 else "is"), ) , codes.NODE_ADDRESSES_CANNOT_BE_EMPTY: lambda info: ( "Empty address set for node{_s} {_nodes}, " "an address cannot be empty" ).format( _s=("s" if len(info["node_name_list"]) > 1 else ""), _nodes=format_list(info["node_name_list"]) ) , codes.NODE_ADDRESSES_DUPLICATION: lambda info: "Node addresses must be unique, duplicate addresses: {_addrs}".format( _addrs=format_list(info["address_list"]) ) , codes.COROSYNC_NODE_ADDRESS_COUNT_MISMATCH: corosync_node_address_count_mismatch , codes.NODE_NAMES_ALREADY_EXIST: lambda info: ( "Node name{_s} {_names} {_are} already used by existing nodes; " "please, use other name{_s}" ).format( _names=format_list(info["name_list"]), _s=("s" if len(info["name_list"]) > 1 else ""), _are=("are" if len(info["name_list"]) > 1 else "is"), ) , codes.NODE_NAMES_DUPLICATION: lambda info: "Node names must be unique, duplicate names: {_names}".format( _names=format_list(info["name_list"]) ) , codes.COROSYNC_NODES_MISSING: "No nodes have been specified" , codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE: lambda info: "These options cannot be set when the cluster uses a quorum device: {0}" .format(", ".join(sorted(info["options_names"]))) , codes.COROSYNC_TOO_MANY_LINKS_OPTIONS: lambda info: ( "Cannot specify options for more links ({links_options_count}) " "than how many is defined by number of addresses per node " "({links_count})" ).format(**info) , codes.COROSYNC_CANNOT_ADD_REMOVE_LINKS_BAD_TRANSPORT: lambda info: ( "Cluster is using {actual_transport} transport which does not " "support {_action} links" ).format( _action=("adding" if info["add_or_not_remove"] else "removing"), **info ) , codes.COROSYNC_CANNOT_ADD_REMOVE_LINKS_NO_LINKS_SPECIFIED: lambda info: "Cannot {_action} links, no links to {_action} specified".format( _action=("add" if info["add_or_not_remove"] else "remove"), ) , codes.COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS: lambda info: ( "Cannot {_action} {links_change_count} link{_s_change}, there " "would be {links_new_count} link{_s_new} defined which is " "{_more_less} than allowed number of {links_limit_count} " "link{_s_limit}" ).format( _action=("add" if info["add_or_not_remove"] else "remove"), _more_less=("more" if info["add_or_not_remove"] else "less"), _s_change=("" if info["links_change_count"] == 1 else "s"), _s_new=("" if info["links_new_count"] == 1 else "s"), _s_limit=("" if info["links_limit_count"] == 1 else "s"), **info ) , codes.COROSYNC_LINK_ALREADY_EXISTS_CANNOT_ADD: lambda info: "Cannot add link '{link_number}', it already exists".format(**info) , codes.COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_REMOVE: lambda info: ( "Cannot remove non-existent link{_s} {_to_remove}, existing links: " "{_existing}" ).format( _s=("s" if len(info["link_list"]) > 1 else ""), _to_remove=format_list(info["link_list"]), _existing=format_list(info["existing_link_list"]), **info ) , codes.COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_UPDATE: corosync_link_does_not_exist_cannot_update , codes.COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS: lambda info: ( "The {actual_transport} transport does not support '{option_type}' " "options, use {_required_transports} transport" ).format( _required_transports=format_list(info["required_transport_list"]), **info ) , codes.QDEVICE_ALREADY_DEFINED: "quorum device is already defined" , codes.QDEVICE_NOT_DEFINED: "no quorum device is defined in this cluster" , codes.QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED: ( "You need to stop the cluster or remove qdevice from the cluster to" " continue" ), codes.QDEVICE_CLIENT_RELOAD_STARTED: "Reloading qdevice configuration on nodes..." , codes.QDEVICE_ALREADY_INITIALIZED: lambda info: "Quorum device '{model}' has been already initialized" .format(**info) , codes.QDEVICE_NOT_INITIALIZED: lambda info: "Quorum device '{model}' has not been initialized yet" .format(**info) , codes.QDEVICE_INITIALIZATION_SUCCESS: lambda info: "Quorum device '{model}' initialized" .format(**info) , codes.QDEVICE_INITIALIZATION_ERROR: lambda info: "Unable to initialize quorum device '{model}': {reason}" .format(**info) , codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED: "Setting up qdevice certificates on nodes..." , codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE: lambda info: "{node}: Succeeded" .format(**info) , codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED: "Removing qdevice certificates from nodes..." , codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE: lambda info: "{node}: Succeeded" .format(**info) , codes.QDEVICE_CERTIFICATE_IMPORT_ERROR: lambda info: "Unable to import quorum device certificate: {reason}" .format(**info) , codes.QDEVICE_CERTIFICATE_SIGN_ERROR: lambda info: "Unable to sign quorum device certificate: {reason}" .format(**info) , codes.QDEVICE_DESTROY_SUCCESS: lambda info: "Quorum device '{model}' configuration files removed" .format(**info) , codes.QDEVICE_DESTROY_ERROR: lambda info: "Unable to destroy quorum device '{model}': {reason}" .format(**info) , codes.QDEVICE_NOT_RUNNING: lambda info: "Quorum device '{model}' is not running" .format(**info) , codes.QDEVICE_GET_STATUS_ERROR: lambda info: "Unable to get status of quorum device '{model}': {reason}" .format(**info) , codes.QDEVICE_USED_BY_CLUSTERS: lambda info: "Quorum device is currently being used by cluster(s): {cluster_list}" .format(cluster_list=", ".join(info["clusters"])) , codes.ID_ALREADY_EXISTS: lambda info: "'{id}' already exists" .format(**info) , codes.ID_BELONGS_TO_UNEXPECTED_TYPE: id_belongs_to_unexpected_type, codes.OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT: object_with_id_in_unexpected_context , codes.ID_NOT_FOUND: id_not_found, codes.STONITH_RESOURCES_DO_NOT_EXIST: lambda info: "Stonith resource(s) '{stonith_id_list}' do not exist" .format( stonith_id_list="', '".join(info["stonith_ids"]), **info ) , codes.CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET: lambda info: "Role '{role_id}' is already assigned to '{target_id}'" .format(**info) , codes.CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET: lambda info: "Role '{role_id}' is not assigned to '{target_id}'" .format(**info) , codes.CIB_ACL_TARGET_ALREADY_EXISTS: lambda info: "'{target_id}' already exists" .format(**info) , codes.CIB_FENCING_LEVEL_ALREADY_EXISTS: lambda info: ( "Fencing level for '{target}' at level '{level}' " "with device(s) {_device_list} already exists" ).format( _device_list=format_list(info["devices"]), target=format_fencing_level_target( info["target_type"], info["target_value"] ), **info ) , codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST: lambda info: "Fencing level {part_target}{part_level}{part_devices}does not exist" .format( part_target=( "for '{0}' ".format(format_fencing_level_target( info["target_type"], info["target_value"] )) if info["target_type"] and info["target_value"] else "" ), part_level=format_optional(info["level"], "at level '{0}' "), part_devices=format_optional( format_list(info["devices"]) if info["devices"] else "", "with device(s) {0} " ) ) , codes.CIB_LOAD_ERROR: "unable to get cib", codes.CIB_LOAD_ERROR_SCOPE_MISSING: lambda info: "unable to get cib, scope '{scope}' not present in cib" .format(**info) , codes.CIB_LOAD_ERROR_BAD_FORMAT: lambda info: "unable to get cib, {reason}" .format(**info) , codes.CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION: "Unable to load CIB to get guest and remote nodes from it, " "those nodes cannot be considered in configuration validation" , codes.CIB_CANNOT_FIND_MANDATORY_SECTION: lambda info: "Unable to get {section} section of cib" .format(**info) , codes.CIB_PUSH_ERROR: lambda info: "Unable to update cib\n{reason}\n{pushed_cib}" .format(**info) , codes.CIB_DIFF_ERROR: lambda info: "Unable to diff CIB: {reason}\n{cib_new}" .format(**info) , codes.CIB_SIMULATE_ERROR: lambda info: "Unable to simulate changes in CIB: {reason}\n{cib}" .format(**info) , codes.CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET: lambda info: ( "Replacing the whole CIB instead of applying a diff, a race " "condition may happen if the CIB is pushed more than once " "simultaneously. To fix this, upgrade pacemaker to get " "crm_feature_set at least {required_set}, current is {current_set}." ).format(**info) , codes.CIB_SAVE_TMP_ERROR: lambda info: "Unable to save CIB to a temporary file: {reason}" .format(**info) , codes.CRM_MON_ERROR: lambda info: "error running crm_mon, is pacemaker running?{_reason}" .format( _reason=( ("\n" + "\n".join(indent(info["reason"].strip().splitlines()))) if info["reason"].strip() else "" ), **info ) , codes.BAD_CLUSTER_STATE_FORMAT: "cannot load cluster status, xml does not conform to the schema" , codes.WAIT_FOR_IDLE_NOT_SUPPORTED: "crm_resource does not support --wait, please upgrade pacemaker" , codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER: "Cannot use '-f' together with '--wait'" , codes.WAIT_FOR_IDLE_TIMED_OUT: lambda info: "waiting timeout\n\n{reason}" .format(**info) , codes.WAIT_FOR_IDLE_ERROR: lambda info: "{reason}" .format(**info) , codes.RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE: lambda info: ( "bundle '{bundle_id}' already contains resource '{resource_id}'" ", a bundle may contain at most one resource" ).format(**info) , codes.RESOURCE_CLEANUP_ERROR: lambda info: ( ( "Unable to forget failed operations of resource: {resource}" "\n{reason}" ) if info["resource"] else "Unable to forget failed operations of resources\n{reason}" ).format(**info) , codes.RESOURCE_REFRESH_ERROR: lambda info: ( "Unable to delete history of resource: {resource}\n{reason}" if info["resource"] else "Unable to delete history of resources\n{reason}" ).format(**info) , codes.RESOURCE_REFRESH_TOO_TIME_CONSUMING: lambda info: ( "Deleting history of all resources on all nodes will execute more " "than {threshold} operations in the cluster, which may " "negatively impact the responsiveness of the cluster. " "Consider specifying resource and/or node" ).format(**info) , codes.RESOURCE_OPERATION_INTERVAL_DUPLICATION: lambda info: ( "multiple specification of the same operation with the same interval:\n" +"\n".join([ "{0} with intervals {1}".format(name, ", ".join(intervals)) for name, intervals_list in info["duplications"].items() for intervals in intervals_list ]) ), codes.RESOURCE_OPERATION_INTERVAL_ADAPTED: lambda info: ( "changing a {operation_name} operation interval" " from {original_interval}" " to {adapted_interval} to make the operation unique" ).format(**info) , codes.RESOURCE_RUNNING_ON_NODES: resource_running_on_nodes, codes.RESOURCE_DOES_NOT_RUN: lambda info: "resource '{resource_id}' is not running on any node" .format(**info) , codes.RESOURCE_IS_UNMANAGED: lambda info: "'{resource_id}' is unmanaged" .format(**info) , codes.RESOURCE_IS_GUEST_NODE_ALREADY: lambda info: "the resource '{resource_id}' is already a guest node" .format(**info) , codes.RESOURCE_MANAGED_NO_MONITOR_ENABLED: lambda info: ( "Resource '{resource_id}' has no enabled monitor operations." " Re-run with '--monitor' to enable them." ) .format(**info) , codes.NODE_ADDRESSES_UNRESOLVABLE: lambda info: "Unable to resolve addresses: {_addrs}".format( _addrs=format_list(info["address_list"]) ) , codes.NODE_NOT_FOUND: lambda info: "{desc} '{node}' does not appear to exist in configuration".format( desc=build_node_description(info["searched_types"]), node=info["node"] ) , codes.NODE_REMOVE_IN_PACEMAKER_FAILED: lambda info: # TODO: Tests ( "{_node}Unable to remove node(s) {_node_list} from pacemaker" "{_reason_part}" ).format( _node=format_optional(info["node"], "{}: "), _reason_part=format_optional(info["reason"], ": {0}"), _node_list=format_list(info["node_list_to_remove"]), **info ) , codes.NODE_TO_CLEAR_IS_STILL_IN_CLUSTER: lambda info: ( "node '{node}' seems to be still in the cluster" "; this command should be used only with nodes that have been" " removed from the cluster" ) .format(**info) , codes.MULTIPLE_RESULTS_FOUND: lambda info: "more than one {result_type}{search_description} found: {what_found}" .format( what_found=format_list(info["result_identifier_list"]), search_description="" if not info["search_description"] else " for '{0}'".format(info["search_description"]) , result_type=info["result_type"] ) , codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND: lambda info: "unable to get local node name from pacemaker: {reason}" .format(**info) , codes.SERVICE_START_STARTED: partial(service_operation_started, "Starting"), codes.SERVICE_START_ERROR: partial(service_operation_error, "start"), codes.SERVICE_START_SUCCESS: partial(service_operation_success, "started"), codes.SERVICE_START_SKIPPED: partial(service_operation_skipped, "starting"), codes.SERVICE_STOP_STARTED: partial(service_operation_started, "Stopping"), codes.SERVICE_STOP_ERROR: partial(service_operation_error, "stop"), codes.SERVICE_STOP_SUCCESS: partial(service_operation_success, "stopped"), codes.SERVICE_ENABLE_STARTED: partial( service_operation_started, "Enabling" ), codes.SERVICE_ENABLE_ERROR: partial(service_operation_error, "enable"), codes.SERVICE_ENABLE_SUCCESS: partial(service_operation_success, "enabled"), codes.SERVICE_ENABLE_SKIPPED: partial( service_operation_skipped, "enabling" ), codes.SERVICE_DISABLE_STARTED: partial(service_operation_started, "Disabling") , codes.SERVICE_DISABLE_ERROR: partial(service_operation_error, "disable"), codes.SERVICE_DISABLE_SUCCESS: partial( service_operation_success, "disabled" ), codes.SERVICE_KILL_ERROR: lambda info: "Unable to kill {_service_list}: {reason}" .format( _service_list=", ".join(info["services"]), **info ) , codes.SERVICE_KILL_SUCCESS: lambda info: "{_service_list} killed" .format( _service_list=", ".join(info["services"]), **info ) , codes.UNABLE_TO_GET_AGENT_METADATA: lambda info: ( "Agent '{agent}' is not installed or does not provide valid" " metadata: {reason}" ).format(**info) , codes.INVALID_RESOURCE_AGENT_NAME: lambda info: ( "Invalid resource agent name '{name}'." " Use standard:provider:type when standard is 'ocf' or" " standard:type otherwise." " List of standards and providers can be obtained by using commands" " 'pcs resource standards' and 'pcs resource providers'" ) .format(**info) , codes.INVALID_STONITH_AGENT_NAME: lambda info: ( "Invalid stonith agent name '{name}'." " List of agents can be obtained by using command" " 'pcs stonith list'. Do not use the 'stonith:' prefix. Agent name" " cannot contain the ':' character." ) .format(**info) , codes.AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE: lambda info: ( "Multiple agents match '{agent}'" ", please specify full name: {possible_agents_str}" ).format(**info) , codes.AGENT_NAME_GUESS_FOUND_NONE: lambda info: "Unable to find agent '{agent}', try specifying its full name" .format(**info) , codes.AGENT_NAME_GUESSED: lambda info: "Assumed agent name '{guessed_name}' (deduced from '{entered_name}')" .format(**info) , codes.OMITTING_NODE: lambda info: "Omitting node '{node}'" .format(**info) , codes.SBD_CHECK_STARTED: "Running SBD pre-enabling checks...", codes.SBD_CHECK_SUCCESS: lambda info: "{node}: SBD pre-enabling checks done" .format(**info) , codes.SBD_CONFIG_DISTRIBUTION_STARTED: "Distributing SBD config...", codes.SBD_CONFIG_ACCEPTED_BY_NODE: lambda info: "{node}: SBD config saved" .format(**info) , codes.UNABLE_TO_GET_SBD_CONFIG: lambda info: "Unable to get SBD configuration from node '{node}'{reason_suffix}" .format( reason_suffix=format_optional(info["reason"], ": {0}"), **info ) , codes.SBD_ENABLING_STARTED: lambda info: "Enabling SBD service..." .format(**info) , codes.SBD_DISABLING_STARTED: "Disabling SBD service...", codes.SBD_DEVICE_INITIALIZATION_STARTED: lambda info: "Initializing device{_s} {_device_list}..." .format( _s=("s" if len(info["device_list"]) > 1 else ""), _device_list=format_list(info["device_list"]) ) , codes.SBD_DEVICE_INITIALIZATION_SUCCESS: lambda info: "{_device_pl} initialized successfully" .format( _device_pl=format_plural(info["device_list"], "Device") ) , codes.SBD_DEVICE_INITIALIZATION_ERROR: lambda info: ( "Initialization of device{_s} {_device_list} failed: {reason}" ).format( _s=("s" if len(info["device_list"]) > 1 else ""), _device_list=format_list(info["device_list"]), **info ) , codes.SBD_DEVICE_LIST_ERROR: lambda info: "Unable to get list of messages from device '{device}': {reason}" .format(**info) , codes.SBD_DEVICE_MESSAGE_ERROR: lambda info: "Unable to set message '{message}' for node '{node}' on device " "'{device}': {reason}" .format(**info) , codes.SBD_DEVICE_DUMP_ERROR: lambda info: "Unable to get SBD headers from device '{device}': {reason}" .format(**info) , codes.FILES_DISTRIBUTION_STARTED: lambda info: "Sending {_description}{_where}".format( _where=( "" if not info["node_list"] else " to " + format_list(info["node_list"]) ), _description=format_list(info["file_list"]) ) , codes.FILES_DISTRIBUTION_SKIPPED: lambda info: ( "Distribution of {_files} to {_nodes} was skipped because " "{_reason}. Please, distribute the file(s) manually." ).format( _files=format_list(info["file_list"]), _nodes=format_list(info["node_list"]), _reason=skip_reason_to_string(info["reason_type"]) ) , codes.FILE_DISTRIBUTION_SUCCESS: lambda info: "{node}: successful distribution of the file '{file_description}'" .format( **info ) , codes.FILE_DISTRIBUTION_ERROR: lambda info: "{node}: unable to distribute file '{file_description}': {reason}" .format( **info ) , codes.FILES_REMOVE_FROM_NODES_STARTED: lambda info: "Requesting remove {_description}{_where}".format( _where=( "" if not info["node_list"] else " from " + format_list(info["node_list"]) ), _description=format_list(info["file_list"]) ) , codes.FILES_REMOVE_FROM_NODES_SKIPPED: lambda info: ( "Removing {_files} from {_nodes} was skipped because {_reason}. " "Please, remove the file(s) manually." ).format( _files=format_list(info["file_list"]), _nodes=format_list(info["node_list"]), _reason=skip_reason_to_string(info["reason_type"]) ) , codes.FILE_REMOVE_FROM_NODE_SUCCESS: lambda info: "{node}: successful removal of the file '{file_description}'" .format( **info ) , codes.FILE_REMOVE_FROM_NODE_ERROR: lambda info: "{node}: unable to remove file '{file_description}': {reason}" .format( **info ) , codes.SERVICE_COMMANDS_ON_NODES_STARTED: lambda info: "Requesting {_description}{_where}".format( _where=( "" if not info["node_list"] else " on " + format_list(info["node_list"]) ), _description=format_list(info["action_list"]) ) , codes.SERVICE_COMMANDS_ON_NODES_SKIPPED: lambda info: ( "Running action(s) {_actions} on {_nodes} was skipped because " "{_reason}. Please, run the action(s) manually." ).format( _actions=format_list(info["action_list"]), _nodes=format_list(info["node_list"]), _reason=skip_reason_to_string(info["reason_type"]) ) , codes.SERVICE_COMMAND_ON_NODE_SUCCESS: lambda info: "{node}: successful run of '{service_command_description}'" .format( **info ) , codes.SERVICE_COMMAND_ON_NODE_ERROR: lambda info: ( "{node}: service command failed:" " {service_command_description}: {reason}" ) .format( **info ) , codes.SBD_DEVICE_PATH_NOT_ABSOLUTE: lambda info: "Device path '{device}'{on_node} is not absolute" .format( on_node=format_optional( info["node"], " on node '{0}'".format(info["node"]) ), **info ) , codes.SBD_DEVICE_DOES_NOT_EXIST: lambda info: "{node}: device '{device}' not found" .format(**info) , codes.SBD_DEVICE_IS_NOT_BLOCK_DEVICE: lambda info: "{node}: device '{device}' is not a block device" .format(**info) , codes.INVALID_RESPONSE_FORMAT: lambda info: "{node}: Invalid format of response" .format(**info) , codes.SBD_NO_DEVICE_FOR_NODE: lambda info: ( ( "Cluster uses SBD with shared storage so SBD devices must be " "specified for all nodes, no device specified for node '{node}'" ) if info["sbd_enabled_in_cluster"] else "No SBD device specified for node '{node}'" ).format(**info) , codes.SBD_TOO_MANY_DEVICES_FOR_NODE: lambda info: ( "At most {max_devices} SBD devices can be specified for a node, " "'{_devices}' specified for node '{node}'" ) .format( _devices="', '".join(info["device_list"]), **info ) , codes.SBD_NOT_INSTALLED: lambda info: "SBD is not installed on node '{node}'" .format(**info) , codes.SBD_NOT_USED_CANNOT_SET_SBD_OPTIONS: lambda info: ( "Cluster is not configured to use SBD, cannot specify SBD " "option(s) {__options} for node '{node}'" ).format( __options=format_list(info["options"]), **info ) , codes.SBD_WITH_DEVICES_NOT_USED_CANNOT_SET_DEVICE: lambda info: "Cluster is not configured to use SBD with shared storage, cannot " "specify SBD devices for node '{node}'" .format(**info) , codes.WATCHDOG_NOT_FOUND: lambda info: "Watchdog '{watchdog}' does not exist on node '{node}'" .format(**info) , codes.WATCHDOG_INVALID: lambda info: "Watchdog path '{watchdog}' is invalid." .format(**info) , codes.UNABLE_TO_GET_SBD_STATUS: lambda info: "Unable to get status of SBD from node '{node}'{reason_suffix}" .format( reason_suffix=format_optional(info["reason"], ": {0}"), **info ) , codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES: "Cluster restart is required in order to apply these changes." , codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS: lambda info: "Recipient '{recipient}' in alert '{alert}' already exists" .format(**info) , codes.CIB_ALERT_RECIPIENT_VALUE_INVALID: lambda info: "Recipient value '{recipient}' is not valid." .format(**info) , codes.CIB_UPGRADE_SUCCESSFUL: "CIB has been upgraded to the latest schema version." , codes.CIB_UPGRADE_FAILED: lambda info: "Upgrading of CIB to the latest schema failed: {reason}" .format(**info) , codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION: lambda info: ( "Unable to upgrade CIB to required schema version" " {required_version} or higher. Current version is" " {current_version}. Newer version of pacemaker is needed." ) .format(**info) , codes.FILE_ALREADY_EXISTS: lambda info: "{_node}{_file_role} file '{file_path}' already exists" .format( _node=format_optional(info["node"], NODE_PREFIX), _file_role=format_file_role(info["file_type_code"]), **info ) , codes.FILE_IO_ERROR: lambda info: "Unable to {_action} {_file_role}{_file_path}: {reason}" .format( _action=format_file_action(info["operation"]), _file_path=format_optional(info["file_path"], " '{0}'"), _file_role=format_file_role(info["file_type_code"]), **info ) , codes.UNABLE_TO_DETERMINE_USER_UID: lambda info: "Unable to determine uid of user '{user}'" .format(**info) , codes.UNABLE_TO_DETERMINE_GROUP_GID: lambda info: "Unable to determine gid of group '{group}'" .format(**info) , codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS: "unsupported operation on non systemd systems" , codes.LIVE_ENVIRONMENT_NOT_CONSISTENT: lambda info: "When {_given} is specified, {_missing} must be specified as well" .format( _given=format_list( info["mocked_files"], _file_role_to_option_translation ), _missing=format_list( info["required_files"], _file_role_to_option_translation ), ) , codes.LIVE_ENVIRONMENT_REQUIRED: lambda info: "This command does not support {_forbidden_options}" .format( _forbidden_options=format_list( info["forbidden_options"], _file_role_to_option_translation ), ) , codes.LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE: "Node(s) must be specified if -f is used" , codes.COROSYNC_NODE_CONFLICT_CHECK_SKIPPED: lambda info: ( "Unable to check if there is a conflict with nodes set in corosync " "because {_reason}" ).format( _reason=skip_reason_to_string(info["reason_type"]) ) , codes.COROSYNC_QUORUM_ATB_CANNOT_BE_DISABLED_DUE_TO_SBD: "Unable to disable auto_tie_breaker, SBD fencing would have no effect" , codes.COROSYNC_QUORUM_ATB_WILL_BE_ENABLED_DUE_TO_SBD: "auto_tie_breaker quorum option will be enabled to make SBD fencing " "effective. Cluster has to be offline to be able to make this change." , codes.USE_COMMAND_NODE_ADD_REMOTE: lambda info: ( "this command is not sufficient for creating a remote connection," " use 'pcs cluster node add-remote'" ) , codes.USE_COMMAND_NODE_ADD_GUEST: lambda info: ( "this command is not sufficient for creating a guest node, use" " 'pcs cluster node add-guest'" ) , codes.USE_COMMAND_NODE_REMOVE_GUEST: lambda info: ( "this command is not sufficient for removing a guest node, use" " 'pcs cluster node remove-guest'" ) , codes.TMP_FILE_WRITE: lambda info: ( "Writing to a temporary file {file_path}:\n" "--Debug Content Start--\n{content}\n--Debug Content End--\n" ).format(**info) , codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE: "Unable to perform operation on any available node/host, therefore it " "is not possible to continue" , codes.HOST_NOT_FOUND: lambda info: ( "Host{_s} {_hosts_comma} {_are} not known to pcs, try to " "authenticate the host{_s} using 'pcs host auth {_hosts_space}' " "command" ).format( _hosts_comma=format_list(info["host_list"]), _hosts_space=" ".join(sorted(info["host_list"])), _s=("s" if len(info["host_list"]) > 1 else ""), _are=("are" if len(info["host_list"]) > 1 else "is") ) , codes.NONE_HOST_FOUND: "None of hosts is known to pcs.", codes.HOST_ALREADY_AUTHORIZED: lambda info: "{host_name}: Already authorized".format(**info) , codes.CLUSTER_DESTROY_STARTED: lambda info: "Destroying cluster on hosts: {_hosts}...".format( _hosts=format_list(info["host_name_list"]) ) , codes.CLUSTER_DESTROY_SUCCESS: lambda info: "{node}: Successfully destroyed cluster".format(**info) , codes.CLUSTER_ENABLE_STARTED: lambda info: "Enabling cluster on hosts: {_hosts}...".format( _hosts=format_list(info["host_name_list"]) ) , codes.CLUSTER_ENABLE_SUCCESS: lambda info: "{node}: Cluster enabled".format(**info) , codes.CLUSTER_START_STARTED: lambda info: "Starting cluster on hosts: {_hosts}...".format( _hosts=format_list(info["host_name_list"]) ) , codes.CLUSTER_START_SUCCESS: lambda info: "{node}: Cluster started".format(**info) , codes.SERVICE_NOT_INSTALLED: lambda info: "{node}: Required cluster services not installed: {_services}".format( _services=format_list(info["service_list"]), **info ) , codes.HOST_ALREADY_IN_CLUSTER_CONFIG: lambda info: ( "{host_name}: Cluster configuration files found, the host " "seems to be in a cluster already" ).format(**info) , codes.HOST_ALREADY_IN_CLUSTER_SERVICES: lambda info: ( "{host_name}: Running cluster services: {_services}, the host " "seems to be in a cluster already" ).format( _services=format_list(info["service_list"]), **info ) , codes.SERVICE_VERSION_MISMATCH: service_version_mismatch, codes.WAIT_FOR_NODE_STARTUP_WITHOUT_START: "Cannot specify '--wait' without specifying '--start'" , codes.WAIT_FOR_NODE_STARTUP_STARTED: lambda info: "Waiting for node(s) to start: {_nodes}...".format( _nodes=format_list(info["node_name_list"]) ) , codes.WAIT_FOR_NODE_STARTUP_TIMED_OUT: "Node(s) startup timed out", codes.WAIT_FOR_NODE_STARTUP_ERROR: "Unable to verify all nodes have started" , codes.PCSD_VERSION_TOO_OLD: lambda info: ( "{node}: Old version of pcsd is running on the node, therefore it " "is unable to perform the action" ).format(**info) , codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED: lambda info: "Synchronizing pcsd SSL certificates on node(s) {_nodes}...".format( _nodes=format_list(info["node_name_list"]) ) , codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS: lambda info: "{node}: Success".format(**info) , codes.CLUSTER_WILL_BE_DESTROYED: "Some nodes are already in a cluster. Enforcing this will destroy " "existing cluster on those nodes. You should remove the nodes from " "their clusters instead to keep the clusters working properly" , codes.CLUSTER_SETUP_SUCCESS: "Cluster has been successfully set up." , codes.USING_KNOWN_HOST_ADDRESS_FOR_HOST: lambda info: "No addresses specified for host '{host_name}', using '{address}'" .format(**info) , codes.RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE: lambda info: ( "Resource '{inner_resource_id}' will not be accessible by the " "cluster inside bundle '{bundle_id}', at least one of bundle " "options 'control-port' or 'ip-range-start' has to be specified" ).format(**info) , codes.USING_DEFAULT_WATCHDOG: lambda info: ( "No watchdog has been specified for node '{node}'. Using default " "watchdog '{watchdog}'" ).format(**info) , codes.CANNOT_REMOVE_ALL_CLUSTER_NODES: ( "No nodes would be left in the cluster, if you intend to destroy " "the whole cluster, run 'pcs cluster destroy --all' instead" ) , codes.UNABLE_TO_CONNECT_TO_ANY_REMAINING_NODE: "Unable to connect to any remaining cluster node" , codes.UNABLE_TO_CONNECT_TO_ALL_REMAINING_NODE: lambda info: ( "Remaining cluster {_node_pl} {_nodes} could not be reached, run " "'pcs cluster sync' on any currently online node once the " "unreachable {_one_pl} become available" ).format( _node_pl=format_plural(info["node_list"], "node"), _nodes=format_list(info["node_list"]), _one_pl=format_plural(info["node_list"], "one") ) , codes.NODES_TO_REMOVE_UNREACHABLE: lambda info: ( "Removed {_node_pl} {_nodes} could not be reached and subsequently " "deconfigured. Run 'pcs cluster destroy' on the unreachable " "{_node_pl}." ).format( _node_pl=format_plural(info["node_list"], "node"), _nodes=format_list(info["node_list"]), ) , codes.NODE_USED_AS_TIE_BREAKER: lambda info: ( "Node '{node}' with id '{node_id}' is used as a tie breaker for " "a qdevice, run 'pcs quorum device update model " "tie_breaker=' to change it" ).format(**info) , codes.COROSYNC_QUORUM_WILL_BE_LOST: "This action will cause a loss of the quorum" , codes.COROSYNC_QUORUM_LOSS_UNABLE_TO_CHECK: ( "Unable to determine whether this action will cause a loss of the " "quorum" ) , codes.SBD_LIST_WATCHDOG_ERROR: lambda info: "Unable to query available watchdogs from sbd: {reason}".format(**info) , codes.SBD_WATCHDOG_NOT_SUPPORTED: lambda info: ( "{node}: Watchdog '{watchdog}' is not supported (it may be a " "software watchdog)" ).format(**info) , codes.SBD_WATCHDOG_VALIDATION_INACTIVE: "Not validating the watchdog" , codes.SBD_WATCHDOG_TEST_ERROR: lambda info: "Unable to initialize test of the watchdog: {reason}".format(**info) , codes.SBD_WATCHDOG_TEST_MULTUPLE_DEVICES: "Multiple watchdog devices available, therefore, watchdog which should " "be tested has to be specified. To list available watchdog devices use " "command 'pcs stonith sbd watchdog list'" , codes.SBD_WATCHDOG_TEST_FAILED: "System should have been reset already" , codes.SYSTEM_WILL_RESET: "System will reset shortly" , codes.RESOURCE_BUNDLE_UNSUPPORTED_CONTAINER_TYPE: lambda info: ( "Bundle '{bundle_id}' uses unsupported container type, therefore " "it is not possible to set its container options. Supported " "container types are: {_container_types}" ).format( _container_types=format_list(info["supported_container_types"]), **info, ) , codes.FENCE_HISTORY_COMMAND_ERROR: lambda info: "Unable to {command_label} fence history: {reason}".format(**info) , codes.FENCE_HISTORY_NOT_SUPPORTED: "Fence history is not supported, please upgrade pacemaker" , codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE: lambda info: ( "Value '{_val}' of option '{_attr}' is not unique across " "'{_agent}' resources. Following resources are configured " "with the same value of the instance attribute: {_res_id_list}" ).format( _val=info["instance_attr_value"], _attr=info["instance_attr_name"], _agent=info["agent_name"], _res_id_list=format_list(info["resource_id_list"]), ) , codes.CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP: lambda info: ( "Group '{group_id}' does not exist and therefore does not contain " "'{adjacent_resource_id}' resource to put resources next to" ) .format(**info) , codes.CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP: lambda info: ( "There is no resource '{adjacent_resource_id}' in the group " "'{group_id}', cannot put resources next to it in the group" ) .format(**info) , codes.CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP: lambda info: "{_resources} already exist{_s} in '{group_id}'" .format( _resources=format_list(info["resource_list"]), _s="" if len(info["resource_list"]) > 1 else "s", **info ) , codes.CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF: lambda info: "Cannot put resource '{resource_id}' next to itself".format(**info) , codes.CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE: lambda info: "Resources specified more than once: {_resources}" .format( _resources=format_list(info["resource_list"]), ) , codes.CANNOT_GROUP_RESOURCE_NO_RESOURCES: lambda info: "No resources to add" , codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE: lambda info: ( "'{resource_id}' is {_type_article} resource, {_type} resources " "cannot be put into a group" ).format( _type_article=type_to_string(info["resource_type"], article=True), _type=type_to_string(info["resource_type"], article=False), **info ) , codes.CANNOT_MOVE_RESOURCE_BUNDLE: "cannot move bundle resources", codes.CANNOT_MOVE_RESOURCE_CLONE: "cannot move cloned resources", codes.CANNOT_MOVE_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE: resource_move_ban_clear_master_resource_not_promotable , codes.CANNOT_MOVE_RESOURCE_PROMOTABLE_NOT_MASTER: lambda info: ( "to move promotable clone resources you must use --master and the " "promotable clone id ({promotable_id})" ).format(**info) , codes.CANNOT_MOVE_RESOURCE_STOPPED_NO_NODE_SPECIFIED: # Use both "moving" and "banning" to let user know using "ban" instead # of "move" will not help "You must specify a node when moving/banning a stopped resource" , codes.RESOURCE_MOVE_PCMK_ERROR: lambda info: # Pacemaker no longer prints crm_resource specific options since commit # 8008a5f0c0aa728fbce25f60069d622d0bcbbc9f. There is no need to # translate them or anything else anymore. stdout_stderr_to_string( info["stdout"], info["stderr"], prefix="cannot move resource '{resource_id}'".format(**info) ) , codes.RESOURCE_MOVE_PCMK_SUCCESS: resource_move_ban_pcmk_success, codes.CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE: resource_move_ban_clear_master_resource_not_promotable , codes.CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED: # Use both "moving" and "banning" to let user know using "move" instead # of "ban" will not help "You must specify a node when moving/banning a stopped resource" , codes.RESOURCE_BAN_PCMK_ERROR: lambda info: # Pacemaker no longer prints crm_resource specific options since commit # 8008a5f0c0aa728fbce25f60069d622d0bcbbc9f. There is no need to # translate them or anything else anymore. stdout_stderr_to_string( info["stdout"], info["stderr"], prefix="cannot ban resource '{resource_id}'".format(**info) ) , codes.RESOURCE_BAN_PCMK_SUCCESS: resource_move_ban_pcmk_success, codes.CANNOT_UNMOVE_UNBAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE: resource_move_ban_clear_master_resource_not_promotable , codes.RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED: "--expired is not supported, please upgrade pacemaker" , codes.RESOURCE_UNMOVE_UNBAN_PCMK_ERROR: lambda info: stdout_stderr_to_string( info["stdout"], info["stderr"], prefix="cannot clear resource '{resource_id}'".format(**info) ) , codes.RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS: lambda info: stdout_stderr_to_string(info["stdout"], info["stderr"]) , codes.PARSE_ERROR_JSON_FILE: lambda info: "Unable to parse {_file_type} file{_file_path}: {full_msg}".format( _file_path=format_optional(info["file_path"], " '{0}'"), _file_type=format_file_role(info["file_type_code"]), **info ) , codes.RESOURCE_DISABLE_AFFECTS_OTHER_RESOURCES: lambda info: ( "Disabling specified resources would have an effect on other " "resources\n\n{crm_simulate_plaintext_output}" ).format(**info) , } pcs-0.10.4/pcs/cli/common/env_cli.py000066400000000000000000000006051356771603100172100ustar00rootroot00000000000000class Env(): #pylint: disable=too-many-instance-attributes, too-few-public-methods def __init__(self): self.cib_data = None self.user = None self.groups = None self.corosync_conf_data = None self.booth = None self.pacemaker = None self.known_hosts_getter = None self.debug = False self.request_timeout = None pcs-0.10.4/pcs/cli/common/errors.py000066400000000000000000000030141356771603100171020ustar00rootroot00000000000000ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE = ( "Cannot specify both --all and a list of nodes." ) SEE_MAN_CHANGES = "See 'man pcs' -> Changes in pcs-0.10." HINT_SYNTAX_CHANGE = ( "Syntax has changed from previous version. " + SEE_MAN_CHANGES ) def msg_command_replaced(*new_commands): new = "', '".join(new_commands) return ( f"This command has been replaced with '{new}'. {SEE_MAN_CHANGES}" ) def raise_command_replaced(*new_commands): raise CmdLineInputError(message=msg_command_replaced(*new_commands)) class CmdLineInputError(Exception): """ Exception express that user entered incorrect commad in command line. """ def __init__( self, message=None, hint=None, show_both_usage_and_message=False ): """ string message -- explains what was wrong with the entered command string hint -- provides an additional hint how to proceed bool show_both_usage_and_message -- show both the message and usage The routine which handles this exception behaves according to whether the message was specified (prints this message to user) or not (prints appropriate part of documentation). If show_both_usage_and_message is True, documentation will be printed first and the message will be printed after that. Hint is printed every time as the last item. """ super().__init__(message) self.message = message self.hint = hint self.show_both_usage_and_message = show_both_usage_and_message pcs-0.10.4/pcs/cli/common/lib_wrapper.py000066400000000000000000000361751356771603100201120ustar00rootroot00000000000000import logging from collections import namedtuple from typing import Dict, Any from pcs.cli.common import middleware from pcs.lib.commands import ( acl, alert, booth, cib_options, cluster, fencing_topology, node, pcsd, qdevice, quorum, remote_node, resource, resource_agent, sbd, status, stonith, stonith_agent, ) from pcs.lib.commands.constraint import ( colocation as constraint_colocation, order as constraint_order, ticket as constraint_ticket ) from pcs.lib.env import LibraryEnvironment # Note: not properly typed _CACHE: Dict[Any, Any] = {} def wrapper(dictionary): return namedtuple('wrapper', dictionary.keys())(**dictionary) def cli_env_to_lib_env(cli_env): return LibraryEnvironment( logging.getLogger("pcs"), cli_env.report_processor, cli_env.user, cli_env.groups, cli_env.cib_data, cli_env.corosync_conf_data, booth_files_data=cli_env.booth, known_hosts_getter=cli_env.known_hosts_getter, request_timeout=cli_env.request_timeout, ) def lib_env_to_cli_env(lib_env, cli_env): if not lib_env.is_cib_live: cli_env.cib_data = lib_env.final_mocked_cib_content if not lib_env.is_corosync_conf_live: cli_env.corosync_conf_data = lib_env.get_corosync_conf_data() # TODO # We expect that when there is booth set up in cli_env then there is booth # set up in lib_env as well. The code works like that now. Once we start # communicate over the network, we must do extra checks in here to make # sure what the status really is. #this applies generally, not only for booth #corosync_conf and cib suffers with this problem as well but in this cases #it is dangerously hidden: when inconsistency between cli and lib #environment occurs, original content is put to file (which is wrong) if cli_env.booth: cli_env.booth["modified_env"] = lib_env.get_booth_env(name="").export() return cli_env def bind(cli_env, run_with_middleware, run_library_command): def run(cli_env, *args, **kwargs): lib_env = cli_env_to_lib_env(cli_env) lib_call_result = run_library_command(lib_env, *args, **kwargs) #midlewares needs finish its work and they see only cli_env #so we need reflect some changes to cli_env lib_env_to_cli_env(lib_env, cli_env) return lib_call_result def decorated_run(*args, **kwargs): return run_with_middleware(run, cli_env, *args, **kwargs) return decorated_run def bind_all(env, run_with_middleware, dictionary): return wrapper(dict( (exposed_fn, bind(env, run_with_middleware, library_fn)) for exposed_fn, library_fn in dictionary.items() )) def get_module(env, middleware_factory, name): if name not in _CACHE: _CACHE[name] = load_module(env, middleware_factory, name) return _CACHE[name] def load_module(env, middleware_factory, name): # pylint: disable=too-many-return-statements, too-many-branches if name == "acl": return bind_all( env, middleware.build(middleware_factory.cib), { "create_role": acl.create_role, "remove_role": acl.remove_role, "assign_role_not_specific": acl.assign_role_not_specific, "assign_role_to_target": acl.assign_role_to_target, "assign_role_to_group": acl.assign_role_to_group, "unassign_role_not_specific": acl.unassign_role_not_specific, "unassign_role_from_target": acl.unassign_role_from_target, "unassign_role_from_group": acl.unassign_role_from_group, "create_target": acl.create_target, "create_group": acl.create_group, "remove_target": acl.remove_target, "remove_group": acl.remove_group, "add_permission": acl.add_permission, "remove_permission": acl.remove_permission, "get_config": acl.get_config, } ) if name == "alert": return bind_all( env, middleware.build(middleware_factory.cib), { "create_alert": alert.create_alert, "update_alert": alert.update_alert, "remove_alert": alert.remove_alert, "add_recipient": alert.add_recipient, "update_recipient": alert.update_recipient, "remove_recipient": alert.remove_recipient, "get_all_alerts": alert.get_all_alerts, } ) if name == "booth": return bind_all( env, middleware.build( middleware_factory.booth_conf, middleware_factory.cib ), { "config_setup": booth.config_setup, "config_destroy": booth.config_destroy, "config_text": booth.config_text, "config_ticket_add": booth.config_ticket_add, "config_ticket_remove": booth.config_ticket_remove, "create_in_cluster": booth.create_in_cluster, "remove_from_cluster": booth.remove_from_cluster, "restart": booth.restart, "config_sync": booth.config_sync, "enable_booth": booth.enable_booth, "disable_booth": booth.disable_booth, "start_booth": booth.start_booth, "stop_booth": booth.stop_booth, "pull_config": booth.pull_config, "get_status": booth.get_status, "ticket_grant": booth.ticket_grant, "ticket_revoke": booth.ticket_revoke, } ) if name == "cluster": return bind_all( env, middleware.build(middleware_factory.cib), { "add_link": cluster.add_link, "add_nodes": cluster.add_nodes, "node_clear": cluster.node_clear, "remove_links": cluster.remove_links, "remove_nodes": cluster.remove_nodes, "remove_nodes_from_cib": cluster.remove_nodes_from_cib, "setup": cluster.setup, "update_link": cluster.update_link, "verify": cluster.verify, } ) if name == "remote_node": return bind_all( env, middleware.build( middleware_factory.cib, middleware_factory.corosync_conf_existing, ), { "node_add_remote": remote_node.node_add_remote, "node_add_guest": remote_node.node_add_guest, "node_remove_remote": remote_node.node_remove_remote, "node_remove_guest": remote_node.node_remove_guest, } ) if name == 'constraint_colocation': return bind_all( env, middleware.build(middleware_factory.cib), { 'set': constraint_colocation.create_with_set, 'show': constraint_colocation.show, } ) if name == 'constraint_order': return bind_all( env, middleware.build(middleware_factory.cib), { 'set': constraint_order.create_with_set, 'show': constraint_order.show, } ) if name == 'constraint_ticket': return bind_all( env, middleware.build(middleware_factory.cib), { 'set': constraint_ticket.create_with_set, 'show': constraint_ticket.show, 'add': constraint_ticket.create, 'remove': constraint_ticket.remove, } ) if name == "fencing_topology": return bind_all( env, middleware.build(middleware_factory.cib), { "add_level": fencing_topology.add_level, "get_config": fencing_topology.get_config, "remove_all_levels": fencing_topology.remove_all_levels, "remove_levels_by_params": fencing_topology.remove_levels_by_params, "verify": fencing_topology.verify, } ) if name == "node": return bind_all( env, middleware.build(middleware_factory.cib), { "maintenance_unmaintenance_all": node.maintenance_unmaintenance_all, "maintenance_unmaintenance_list": node.maintenance_unmaintenance_list, "maintenance_unmaintenance_local": node.maintenance_unmaintenance_local, "standby_unstandby_all": node.standby_unstandby_all, "standby_unstandby_list": node.standby_unstandby_list, "standby_unstandby_local": node.standby_unstandby_local, } ) if name == "pcsd": return bind_all( env, middleware.build(), { "synchronize_ssl_certificate": pcsd.synchronize_ssl_certificate, } ) if name == "qdevice": return bind_all( env, middleware.build(), { "status": qdevice.qdevice_status_text, "setup": qdevice.qdevice_setup, "destroy": qdevice.qdevice_destroy, "start": qdevice.qdevice_start, "stop": qdevice.qdevice_stop, "kill": qdevice.qdevice_kill, "enable": qdevice.qdevice_enable, "disable": qdevice.qdevice_disable, # following commands are internal use only, called from pcsd "client_net_setup": qdevice.client_net_setup, "client_net_import_certificate": qdevice.client_net_import_certificate, "client_net_destroy": qdevice.client_net_destroy, "sign_net_cert_request": qdevice.qdevice_net_sign_certificate_request, } ) if name == "quorum": return bind_all( env, middleware.build(middleware_factory.corosync_conf_existing), { "add_device": quorum.add_device, "get_config": quorum.get_config, "remove_device": quorum.remove_device, "remove_device_heuristics": quorum.remove_device_heuristics, "set_expected_votes_live": quorum.set_expected_votes_live, "set_options": quorum.set_options, "status": quorum.status_text, "status_device": quorum.status_device_text, "update_device": quorum.update_device, } ) if name == "resource_agent": return bind_all( env, middleware.build(), { "describe_agent": resource_agent.describe_agent, "list_agents": resource_agent.list_agents, "list_agents_for_standard_and_provider": resource_agent.list_agents_for_standard_and_provider, "list_ocf_providers": resource_agent.list_ocf_providers, "list_standards": resource_agent.list_standards, } ) if name == "resource": return bind_all( env, middleware.build( middleware_factory.cib, middleware_factory.corosync_conf_existing, ), { "ban": resource.ban, "bundle_create": resource.bundle_create, "bundle_reset": resource.bundle_reset, "bundle_update": resource.bundle_update, "create": resource.create, "create_as_clone": resource.create_as_clone, "create_in_group": resource.create_in_group, "create_into_bundle": resource.create_into_bundle, "disable": resource.disable, "disable_safe": resource.disable_safe, "disable_simulate": resource.disable_simulate, "enable": resource.enable, "get_failcounts": resource.get_failcounts, "group_add": resource.group_add, "manage": resource.manage, "move": resource.move, "get_resource_relations_tree": resource.get_resource_relations_tree, "unmanage": resource.unmanage, "unmove_unban": resource.unmove_unban, } ) if name == "cib_options": return bind_all( env, middleware.build( middleware_factory.cib, ), { "set_operations_defaults": cib_options.set_operations_defaults, "set_resources_defaults": cib_options.set_resources_defaults, } ) if name == "status": return bind_all( env, middleware.build( middleware_factory.cib, middleware_factory.corosync_conf_existing, ), { "full_cluster_status_plaintext": status.full_cluster_status_plaintext , } ) if name == "stonith": return bind_all( env, middleware.build( middleware_factory.cib, middleware_factory.corosync_conf_existing, ), { "create": stonith.create, "create_in_group": stonith.create_in_group, "history_get_text": stonith.history_get_text, "history_cleanup": stonith.history_cleanup, "history_update": stonith.history_update, } ) if name == "sbd": return bind_all( env, middleware.build(), { "enable_sbd": sbd.enable_sbd, "disable_sbd": sbd.disable_sbd, "get_cluster_sbd_status": sbd.get_cluster_sbd_status, "get_cluster_sbd_config": sbd.get_cluster_sbd_config, "get_local_sbd_config": sbd.get_local_sbd_config, "initialize_block_devices": sbd.initialize_block_devices, "get_local_devices_info": sbd.get_local_devices_info, "set_message": sbd.set_message, "get_local_available_watchdogs": sbd.get_local_available_watchdogs, "test_local_watchdog": sbd.test_local_watchdog, } ) if name == "stonith_agent": return bind_all( env, middleware.build(), { "describe_agent": stonith_agent.describe_agent, "list_agents": stonith_agent.list_agents, } ) raise Exception("No library part '{0}'".format(name)) class Library(): # pylint: disable=too-few-public-methods def __init__(self, env, middleware_factory): self.env = env self.middleware_factory = middleware_factory def __getattr__(self, name): return get_module(self.env, self.middleware_factory, name) pcs-0.10.4/pcs/cli/common/middleware.py000066400000000000000000000052231356771603100177070ustar00rootroot00000000000000from collections import namedtuple from functools import partial from pcs.cli.common.console_report import error def build(*middleware_list): def run(command, env, *args, **kwargs): next_in_line = command for next_command in reversed(middleware_list): next_in_line = partial(next_command, next_in_line) return next_in_line(env, *args, **kwargs) return run def cib(filename, touch_cib_file): """ return configured middleware that cares about local cib bool use_local_cib is flag if local cib was required callable load_cib_content returns local cib content, take no params callable write_cib put content of cib to required place """ def apply(next_in_line, env, *args, **kwargs): if filename: touch_cib_file(filename) try: with open(filename, mode="r") as cib_file: original_content = cib_file.read() except EnvironmentError as e: raise error( "Cannot read cib file '{0}': '{1}'" .format(filename, str(e)) ) env.cib_data = original_content result_of_next = next_in_line(env, *args, **kwargs) if filename and env.cib_data != original_content: try: with open(filename, mode="w") as cib_file: cib_file.write(env.cib_data) except EnvironmentError as e: raise error( "Cannot write cib file '{0}': '{1}'" .format(filename, str(e)) ) return result_of_next return apply def corosync_conf_existing(local_file_path): def apply(next_in_line, env, *args, **kwargs): if local_file_path: try: env.corosync_conf_data = open(local_file_path).read() except EnvironmentError as e: raise error("Unable to read {0}: {1}".format( local_file_path, e.strerror )) result_of_next = next_in_line(env, *args, **kwargs) if local_file_path: try: file = open(local_file_path, "w") file.write(env.corosync_conf_data) file.close() except EnvironmentError as e: raise error("Unable to write {0}: {1}".format( local_file_path, e.strerror )) return result_of_next return apply def create_middleware_factory(**kwargs): """ Commandline options: no options """ return namedtuple('MiddlewareFactory', kwargs.keys())(**kwargs) pcs-0.10.4/pcs/cli/common/parse_args.py000066400000000000000000000414261356771603100177250ustar00rootroot00000000000000from pcs.cli.common.console_report import format_plural from pcs.cli.common.errors import ( CmdLineInputError, HINT_SYNTAX_CHANGE, ) from pcs.common.tools import format_list ARG_TYPE_DELIMITER = "%" # h = help, f = file, # p = password (cluster auth), u = user (cluster auth), PCS_SHORT_OPTIONS = "hf:p:u:" PCS_LONG_OPTIONS = [ "debug", "version", "help", "fullhelp", "force", "skip-offline", "interactive", "autodelete", "simulate", "all", "full", "local", "wait", "config", "start", "enable", "disabled", "off", "request-timeout=", "safe", "no-strict", "pacemaker", "corosync", "no-default-ops", "defaults", "nodesc", "master", "name=", "group=", "node=", "from=", "to=", "after=", "before=", "corosync_conf=", "booth-conf=", "booth-key=", "no-watchdog-validation", "no-keys-sync", #in pcs status - do not display resource status on inactive node "hide-inactive", # pcs resource (un)manage - enable or disable monitor operations "monitor", # TODO remove # used only in deprecated 'pcs resource|stonith show' "groups", # "pcs resource clear --expired" - only clear expired moves and bans "expired", ] def split_list(arg_list, separator): """return list of list of arg_list using separator as delimiter""" separator_indexes = [i for i, x in enumerate(arg_list) if x == separator] bounds = zip([0]+[i+1 for i in separator_indexes], separator_indexes+[None]) return [arg_list[i:j] for i, j in bounds] def split_list_by_any_keywords(arg_list, keyword_label): """ Return a list of lists of args using any arg not containing = as a delimiter iterable arg_list -- (part of) argv string keyword_label -- description of all keywords """ if "=" in arg_list[0]: raise CmdLineInputError( "Invalid character '=' in {} '{}'".format( keyword_label, arg_list[0], ) ) current_keyword = None groups = {} for arg in arg_list: if "=" in arg: groups[current_keyword].append(arg) else: current_keyword = arg if current_keyword in groups: raise CmdLineInputError( "{} '{}' defined multiple times".format( keyword_label.capitalize(), current_keyword ) ) groups[current_keyword] = [] return groups def split_option(arg, allow_empty_value=True): """ Get (key, value) from a key=value commandline argument. Split the argument by the first = and return resulting parts. Raise CmdLineInputError if the argument cannot be splitted. string arg -- commandline argument allow_empty_value -- if True, empty value is allowed. Otherwise, CmdLineInputError exception is raised Commandline options: no options """ if "=" not in arg: raise CmdLineInputError("missing value of '{0}' option".format(arg)) if arg.startswith("="): raise CmdLineInputError("missing key in '{0}' option".format(arg)) key, value = arg.split("=", 1) if not (value or allow_empty_value): raise CmdLineInputError("value of '{0}' option is empty".format(key)) return key, value def prepare_options(cmdline_args, allowed_repeatable_options=()): """ Get a dict of options from cmdline key=value args iterable cmdline_args -- command line arguments iterable allowed_repeatable_options -- options that can be set several times Commandline options: no options """ options = dict() for arg in cmdline_args: name, value = split_option(arg) if name not in options: if name in allowed_repeatable_options: options[name] = [value] else: options[name] = value elif name in allowed_repeatable_options: options[name].append(value) elif options[name] != value: raise CmdLineInputError( "duplicate option '{0}' with different values '{1}' and '{2}'" .format(name, options[name], value) ) return options def prepare_options_allowed( cmdline_args, allowed_options, allowed_repeatable_options=() ): """ Get a dict of options from cmdline key=value args, raise on unallowed key iterable cmdline_args -- command line arguments iterable allowed_options -- list of allowed options iterable allowed_repeatable_options -- options that can be set several times Commandline options: no options """ parsed_options = prepare_options( cmdline_args, allowed_repeatable_options=allowed_repeatable_options ) unknown_options = ( frozenset(parsed_options.keys()) - frozenset(allowed_options) ) if unknown_options: raise CmdLineInputError( "Unknown option{s} '{options}'".format( s=("s" if len(unknown_options) > 1 else ""), options="', '".join(sorted(unknown_options)) ) ) return parsed_options def group_by_keywords( arg_list, keyword_set, implicit_first_group_key=None, keyword_repeat_allowed=True, group_repeated_keywords=None, only_found_keywords=False ): """ Return dictionary with keywords as keys and following arguments as value. For example when keywords are "first" and "seconds" then for arg_list ["first", 1, 2, "second", 3] it returns {"first": [1, 2], "second": [3]} list arg_list is commandline arguments containing keywords set keyword_set contain all expected keywords string implicit_first_group_key is the key for capturing of arguments before the occurrence of the first keyword. implicit_first_group_key is not a keyword => its occurence in args is considered as ordinary argument. bool keyword_repeat_allowed is the flag to turn on/off checking the uniqueness of each keyword in arg_list. list group_repeated_keywords contains keywords for which each occurence is packed separately. For example when keywords are "first" and "seconds" and group_repeated_keywords is ["first"] then for arg_list ["first", 1, 2, "second", 3, "first", 4] it returns {"first": [[1, 2], [4]], "second": [3]}. For these keywords is allowed repeating. bool only_found_keywords is flag for deciding to (not)contain keywords that do not appeared in arg_list. """ def get_keywords_for_grouping(): if not group_repeated_keywords: return [] #implicit_first_group_key is not keyword: when it is in #group_repeated_keywords but not in keyword_set is considered as #unknown. unknown_keywords = set(group_repeated_keywords) - set(keyword_set) if unknown_keywords: #to avoid developer mistake raise AssertionError( "Keywords in grouping not in keyword set: {0}" .format(", ".join(unknown_keywords)) ) return group_repeated_keywords def get_completed_groups(): completed_groups = groups.copy() if not only_found_keywords: for keyword in keyword_set: if keyword not in completed_groups: completed_groups[keyword] = [] if( implicit_first_group_key and implicit_first_group_key not in completed_groups ): completed_groups[implicit_first_group_key] = [] return completed_groups def is_acceptable_keyword_occurence(keyword): return ( keyword not in groups.keys() or keyword_repeat_allowed or keyword in keywords_for_grouping ) def process_keyword(keyword): if not is_acceptable_keyword_occurence(keyword): raise CmdLineInputError( "'{0}' cannot be used more than once".format(keyword) ) groups.setdefault(keyword, []) if keyword in keywords_for_grouping: groups[keyword].append([]) def process_non_keyword(keyword, arg): place = groups[keyword] if keyword in keywords_for_grouping: place = place[-1] place.append(arg) groups = {} keywords_for_grouping = get_keywords_for_grouping() if arg_list: current_keyword = None if arg_list[0] not in keyword_set: if not implicit_first_group_key: raise CmdLineInputError() process_keyword(implicit_first_group_key) current_keyword = implicit_first_group_key for arg in arg_list: if arg in keyword_set: process_keyword(arg) current_keyword = arg else: process_non_keyword(current_keyword, arg) return get_completed_groups() def parse_typed_arg(arg, allowed_types, default_type): """ Get (type, value) from a typed commandline argument. Split the argument by the type separator and return the type and the value. Raise CmdLineInputError in the argument format or type is not valid. string arg -- commandline argument Iterable allowed_types -- list of allowed argument types string default_type -- type to return if the argument doesn't specify a type """ if ARG_TYPE_DELIMITER not in arg: return default_type, arg arg_type, arg_value = arg.split(ARG_TYPE_DELIMITER, 1) if not arg_type: return default_type, arg_value if arg_type not in allowed_types: raise CmdLineInputError( "'{arg_type}' is not an allowed type for '{arg_full}', use {hint}" .format( arg_type=arg_type, arg_full=arg, hint=", ".join(sorted(allowed_types)) ) ) return arg_type, arg_value def is_num(arg): return arg.isdigit() or arg.lower() == "infinity" def is_negative_num(arg): return arg.startswith("-") and is_num(arg[1:]) def is_short_option_expecting_value(arg): return ( len(arg) == 2 and arg[0] == "-" and "{0}:".format(arg[1]) in PCS_SHORT_OPTIONS ) def is_long_option_expecting_value(arg): return ( len(arg) > 2 and arg[0:2] == "--" and "{0}=".format(arg[2:]) in PCS_LONG_OPTIONS ) def is_option_expecting_value(arg): return ( is_short_option_expecting_value(arg) or is_long_option_expecting_value(arg) ) def filter_out_non_option_negative_numbers(arg_list): """ Return arg_list without non-option negative numbers. Negative numbers following the option expecting value are kept. There is the problematic legacy. Argumet "--" has special meaning: can be used to signal that no more options will follow. This would solve the problem with negative numbers in a standard way: there would be no special approach to negative numbers, everything would be left in the hands of users. But now it would be backward incompatible change. list arg_list contains command line arguments """ args_without_negative_nums = [] for i, arg in enumerate(arg_list): prev_arg = arg_list[i-1] if i > 0 else "" if not is_negative_num(arg) or is_option_expecting_value(prev_arg): args_without_negative_nums.append(arg) return args_without_negative_nums def filter_out_options(arg_list): """ Return arg_list without options and its negative numbers. list arg_list contains command line arguments """ args_without_options = [] for i, arg in enumerate(arg_list): prev_arg = arg_list[i-1] if i > 0 else "" if( not is_option_expecting_value(prev_arg) and ( not arg.startswith("-") or arg == "-" or is_negative_num(arg) ) ): args_without_options.append(arg) return args_without_options class InputModifiers(): def __init__(self, options): self._defined_options = set(options.keys()) self._options = dict(options) self._options.update({ # boolean values "--all": "--all" in options, "--autodelete": "--autodelete" in options, "--config": "--config" in options, "--corosync": "--corosync" in options, "--debug": "--debug" in options, "--defaults": "--defaults" in options, "--disabled": "--disabled" in options, "--enable": "--enable" in options, "--expired": "--expired" in options, "--force": "--force" in options, "--full": "--full" in options, # TODO remove # used only in deprecated 'pcs resource|stonith show' "--groups": "--groups" in options, "--hide-inactive": "--hide-inactive" in options, "--interactive": "--interactive" in options, "--local": "--local" in options, "--master": "--master" in options, "--monitor": "--monitor" in options, "--no-default-ops": "--no-default-ops" in options, "--nodesc": "--nodesc" in options, "--no-keys-sync": "--no-keys-sync" in options, "--no-strict": "--no-strict" in options, "--no-watchdog-validation": "--no-watchdog-validation" in options, "--off": "--off" in options, "--pacemaker": "--pacemaker" in options, "--safe": "--safe" in options, "--simulate": "--simulate" in options, "--skip-offline": "--skip-offline" in options, "--start": "--start" in options, # string values "--after": options.get("--after", None), "--before": options.get("--before", None), "--booth-conf": options.get("--booth-conf", None), "--booth-key": options.get("--booth-key", None), "--corosync_conf": options.get("--corosync_conf", None), "--from": options.get("--from", None), "--group": options.get("--group", None), "--name": options.get("--name", None), "--node": options.get("--node", None), "--request-timeout": options.get("--request-timeout", None), "--to": options.get("--to", None), "--wait": options.get("--wait", False), "-f": options.get("-f", None), "-p": options.get("-p", None), "-u": options.get("-u", None), }) def get_subset(self, *options, **custom_options): opt_dict = { opt: self.get(opt) for opt in options if self.is_specified(opt) } opt_dict.update(custom_options) return InputModifiers(opt_dict) def ensure_only_supported( self, *supported_options, hint_syntax_changed=False ): unsupported_options = ( # --debug is supported in all commands self._defined_options - set(supported_options) - set(["--debug"]) ) if unsupported_options: pluralize = lambda word: format_plural(unsupported_options, word) raise CmdLineInputError( "Specified {option} {option_list} {_is} not supported in this " "command".format( option=pluralize("option"), option_list=format_list(sorted(unsupported_options)), _is=pluralize("is"), ), # Print error messages which point users to the changes section # in pcs manpage. # To be removed in the next significant version. hint=(HINT_SYNTAX_CHANGE if hint_syntax_changed else None) ) def ensure_not_mutually_exclusive(self, *mutually_exclusive): options_to_report = self._defined_options & set(mutually_exclusive) if len(options_to_report) > 1: raise CmdLineInputError( "Only one of {} can be used".format( format_list(sorted(options_to_report)) ) ) def ensure_not_incompatible(self, checked, incompatible): if not checked in self._defined_options: return disallowed = self._defined_options & set(incompatible) if disallowed: raise CmdLineInputError( "'{}' cannot be used with {}".format( checked, format_list(sorted(disallowed)) ) ) def is_specified(self, option): return option in self._defined_options def get(self, option, default=None): if option in self._defined_options: return self._options[option] if default is not None: return default if option in self._options: return self._options[option] raise AssertionError(f"Non existing default value for '{option}'") pcs-0.10.4/pcs/cli/common/printable_tree.py000066400000000000000000000026761356771603100206020ustar00rootroot00000000000000from typing import Sequence class PrintableTreeNode: @property def members(self) -> Sequence["PrintableTreeNode"]: raise NotImplementedError() @property def detail(self) -> Sequence[str]: raise NotImplementedError() @property def is_leaf(self) -> bool: raise NotImplementedError() def get_title(self, verbose: bool) -> str: raise NotImplementedError() def tree_to_lines( node: PrintableTreeNode, verbose: bool = False, title_prefix: str = "", indent: str = "", ) -> Sequence[str]: """ Return sequence of strings representing lines to print out tree structure on command line. """ result = [] note = "" if node.is_leaf: note = f" [displayed elsewhere]" title = node.get_title(verbose) result.append(f"{title_prefix}{title}{note}") if node.is_leaf: return result _indent = "| " if not node.members: _indent = " " for line in node.detail: result.append(f"{indent}{_indent}{line}") _indent = "| " _title_prefix = "|- " for member in node.members: if member == node.members[-1]: _indent = " " _title_prefix = "`- " result.extend( tree_to_lines( member, verbose, indent=f"{indent}{_indent}", title_prefix=f"{indent}{_title_prefix}", ) ) return result pcs-0.10.4/pcs/cli/common/reports.py000066400000000000000000000117761356771603100173020ustar00rootroot00000000000000import sys import inspect from functools import partial from pcs.cli.booth.console_report import ( CODE_TO_MESSAGE_BUILDER_MAP as BOOTH_CODE_TO_MESSAGE_BUILDER_MAP ) from pcs.cli.common.console_report import ( CODE_TO_MESSAGE_BUILDER_MAP, error, warn, ) from pcs.cli.constraint_all.console_report import ( CODE_TO_MESSAGE_BUILDER_MAP as CONSTRAINT_CODE_TO_MESSAGE_BUILDER_MAP ) from pcs.common import report_codes as codes from pcs.common.reports import SimpleReportProcessorInterface from pcs.lib.errors import LibraryError, ReportItemSeverity CODE_BUILDER_MAP = {} CODE_BUILDER_MAP.update(CODE_TO_MESSAGE_BUILDER_MAP) CODE_BUILDER_MAP.update(CONSTRAINT_CODE_TO_MESSAGE_BUILDER_MAP) CODE_BUILDER_MAP.update(BOOTH_CODE_TO_MESSAGE_BUILDER_MAP) def build_default_message_from_report(report_item, force_text): return "Unknown report: {0} info: {1}{2}".format( report_item.code, str(report_item.info), force_text, ) def build_message_from_report(code_builder_map, report_item, force_text=""): if report_item.code not in code_builder_map: return build_default_message_from_report(report_item, force_text) message = code_builder_map[report_item.code] #Sometimes report item info is not needed for message building. #In that case the message is a string. Otherwise the message is a callable. if not callable(message): return message + force_text try: # Object functools.partial cannot be used with inspect because it is not # regular python function. We have to use original function for that. if isinstance(message, partial): keywords = message.keywords if message.keywords is not None else {} args = inspect.getfullargspec(message.func).args del args[:len(message.args)] args = [arg for arg in args if arg not in keywords] else: args = inspect.getfullargspec(message).args if "force_text" in args: return message(report_item.info, force_text) return message(report_item.info) + force_text except(TypeError, KeyError): return build_default_message_from_report(report_item, force_text) build_report_message = partial(build_message_from_report, CODE_BUILDER_MAP) class LibraryReportProcessorToConsole(SimpleReportProcessorInterface): def __init__(self, debug=False): self.debug = debug self.items = [] def append(self, report_item): self.items.append(report_item) return self def extend(self, report_item_list): self.items.extend(report_item_list) return self @property def errors_count(self): return len([ item for item in self.items if item.severity == ReportItemSeverity.ERROR ]) def report_list(self, report_list): return self._send(report_list) def process(self, report_item): self.append(report_item) self.send() def process_list(self, report_item_list): self.extend(report_item_list) self.send() def _send(self, report_item_list, print_errors=True): errors = [] for report_item in report_item_list: if report_item.severity == ReportItemSeverity.ERROR: if print_errors: error(build_report_message( report_item, _prepare_force_text(report_item) )) errors.append(report_item) elif report_item.severity == ReportItemSeverity.WARNING: warn(build_report_message(report_item)) elif self.debug or report_item.severity != ReportItemSeverity.DEBUG: msg = build_report_message(report_item) if msg: print(msg) return errors def send(self): errors = self._send(self.items, print_errors=False) self.items = [] if errors: raise LibraryError(*errors) def _prepare_force_text(report_item): force_text_map = { codes.SKIP_OFFLINE_NODES: ", use --skip-offline to override", } if report_item.forceable: return force_text_map.get( report_item.forceable, ", use --force to override" ) return "" def process_library_reports(report_item_list): """ report_item_list list of ReportItem """ if not report_item_list: raise error("Errors have occurred, therefore pcs is unable to continue") critical_error = False for report_item in report_item_list: if report_item.severity == ReportItemSeverity.WARNING: print("Warning: " + build_report_message(report_item)) continue if report_item.severity != ReportItemSeverity.ERROR: print(build_report_message(report_item)) continue sys.stderr.write('Error: {0}\n'.format(build_report_message( report_item, _prepare_force_text(report_item) ))) critical_error = True if critical_error: sys.exit(1) pcs-0.10.4/pcs/cli/common/routing.py000066400000000000000000000015051356771603100172600ustar00rootroot00000000000000from pcs import utils from pcs.cli.common.errors import CmdLineInputError def create_router(cmd_map, usage_sub_cmd, default_cmd=None): def _router(lib, argv, modifiers): if argv: sub_cmd, *argv_next = argv else: if default_cmd is None: raise CmdLineInputError() sub_cmd, argv_next = default_cmd, [] try: if sub_cmd not in cmd_map: sub_cmd = "" raise CmdLineInputError() return cmd_map[sub_cmd](lib, argv_next, modifiers) except CmdLineInputError as e: if not usage_sub_cmd: raise utils.exit_on_cmdline_input_errror( e, usage_sub_cmd[0], (usage_sub_cmd[1:] + [sub_cmd]) ) return _router pcs-0.10.4/pcs/cli/constraint/000077500000000000000000000000001356771603100161125ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint/__init__.py000066400000000000000000000000001356771603100202110ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint/command.py000066400000000000000000000050671356771603100201120ustar00rootroot00000000000000from pcs.cli.constraint import parse_args, console_report from pcs.common.tools import indent def create_with_set(create_with_set_library_call, argv, modifiers): """ callable create_with_set_library_call create constraint with set list argv part of comandline args see usage for "constraint (colocation|resource|ticket) set" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Commandline options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ resource_set_list, constraint_options = parse_args.prepare_set_args(argv) create_with_set_library_call( resource_set_list, constraint_options, resource_in_clone_alowed=modifiers.get("--force"), duplication_alowed=modifiers.get("--force"), ) def show_constraints_with_set(constraint_list, show_detail, indent_step=2): """ return list of console lines with info about constraints list of dict constraint_list see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options int indent_step is count of spaces for indenting Commandline options: no options """ return ["Resource Sets:"] + indent( [ console_report.constraint_with_sets(constraint, with_id=show_detail) for constraint in constraint_list ], indent_step=indent_step ) def show(caption, load_constraints, format_options, modifiers): """ load constraints and return console lines list with info about constraints string caption for example "Ticket Constraints:" callable load_constraints which returns desired constraints as dictionary like {"plain": [], "with_resource_sets": []} callable format_options takes dict of options and show_detail flag (bool) and returns string with constraint formated for commandline modifiers dict like object with command modifiers Commandline options: * -f - CIB file * --full - print more details """ show_detail = modifiers.get("--full") constraints = load_constraints() line_list = [caption] line_list.extend([ " " + format_options(constraint_options_dict, show_detail) for constraint_options_dict in constraints["plain"] ]) if constraints["with_resource_sets"]: line_list.extend( indent(show_constraints_with_set( constraints["with_resource_sets"], show_detail )) ) return line_list pcs-0.10.4/pcs/cli/constraint/console_report.py000066400000000000000000000027541356771603100215310ustar00rootroot00000000000000def constraint_plain(constraint_type, constraint_info, with_id=False): return "{0} {1}".format( constraint_type, ' '.join(prepare_options(constraint_info['options'], with_id)) ) def resource_sets(set_list, with_id=True): """ list of dict set_list see resource set in pcs/lib/exchange_formats.md """ report = [] for resource_set in set_list: report.extend( ["set"] + resource_set["ids"] + options(resource_set["options"]) ) if with_id: report.append(id_from_options(resource_set["options"])) return report def options(options_dict): return [ key+"="+value for key, value in sorted(options_dict.items()) if key != "id" ] def id_from_options(options_dict): return "(id:"+options_dict.get("id", "")+")" def constraint_with_sets(constraint_info, with_id=True): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ options_dict = options(constraint_info["options"]) return " ".join( resource_sets(constraint_info["resource_sets"], with_id) + (["setoptions"] + options_dict if options_dict else []) + ([id_from_options(constraint_info["options"])] if with_id else []) ) def prepare_options(options_dict, with_id=True): return ( options(options_dict) + ([id_from_options(options_dict)] if with_id else []) ) pcs-0.10.4/pcs/cli/constraint/parse_args.py000066400000000000000000000023061356771603100206130ustar00rootroot00000000000000from pcs.cli.common import parse_args from pcs.cli.common.errors import CmdLineInputError def prepare_resource_sets(cmdline_args): return [ { "ids": [id for id in args if "=" not in id], "options": parse_args.prepare_options( [opt for opt in args if "=" in opt] ), } for args in parse_args.split_list(cmdline_args, "set") ] def prepare_set_args(argv): if argv.count("setoptions") > 1: raise CmdLineInputError( "Keyword 'setoptions' may be mentioned at most once" ) resource_set_args, constraint_options_args = ( parse_args.split_list(argv, "setoptions") if "setoptions" in argv else (argv, []) ) if not resource_set_args: raise CmdLineInputError() resource_set_list = prepare_resource_sets(resource_set_args) if( not resource_set_list or not all(resource_set["ids"] for resource_set in resource_set_list) ): raise CmdLineInputError() constraint_options = {} if constraint_options_args: constraint_options = parse_args.prepare_options(constraint_options_args) return (resource_set_list, constraint_options) pcs-0.10.4/pcs/cli/constraint_all/000077500000000000000000000000001356771603100167425ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint_all/__init__.py000066400000000000000000000000001356771603100210410ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint_all/console_report.py000066400000000000000000000041701356771603100223530ustar00rootroot00000000000000from pcs.cli.constraint.console_report import ( constraint_plain as constraint_plain_default, constraint_with_sets, ) from pcs.cli.constraint_colocation.console_report import ( constraint_plain as colocation_plain ) from pcs.cli.constraint_order.console_report import ( constraint_plain as order_plain ) from pcs.cli.constraint_ticket.console_report import ( constraint_plain as ticket_plain ) from pcs.common import report_codes as codes def constraint(constraint_type, constraint_info, with_id=True): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ if "resource_sets" in constraint_info: return constraint_with_sets(constraint_info, with_id) return constraint_plain(constraint_type, constraint_info, with_id) def constraint_plain(constraint_type, options_dict, with_id=False): """return console shape for any constraint_type of plain constraint""" type_report_map = { "rsc_colocation": colocation_plain, "rsc_order": order_plain, "rsc_ticket": ticket_plain, } if constraint_type not in type_report_map: return constraint_plain_default(constraint_type, options_dict, with_id) return type_report_map[constraint_type](options_dict, with_id) #Each value (a callable taking report_item.info) returns a message. #Force text will be appended if necessary. #If it is necessary to put the force text inside the string then the callable #must take the force_text parameter. CODE_TO_MESSAGE_BUILDER_MAP = { codes.DUPLICATE_CONSTRAINTS_EXIST: lambda info, force_text: "duplicate constraint already exists{0}\n".format(force_text) + "\n".join([ " " + constraint(info["constraint_type"], constraint_info) for constraint_info in info["constraint_info_list"] ]) , codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE: lambda info: ( "{resource_id} is a {parent_type} resource, you should use the" " {parent_type} id: {parent_id} when adding constraints" ).format( **info ) , } pcs-0.10.4/pcs/cli/constraint_colocation/000077500000000000000000000000001356771603100203245ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint_colocation/__init__.py000066400000000000000000000000001356771603100224230ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint_colocation/command.py000066400000000000000000000024521356771603100223170ustar00rootroot00000000000000from pcs.cli.common.errors import CmdLineInputError from pcs.cli.constraint import command from pcs.cli.constraint_colocation import console_report def create_with_set(lib, argv, modifiers): """ create colocation constraint with resource set object lib exposes library list argv see usage for "constraint colocation set" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ modifiers.ensure_only_supported("-f", "--force") command.create_with_set( lib.constraint_colocation.set, argv, modifiers, ) def show(lib, argv, modifiers): """ show all colocation constraints object lib exposes library list argv see usage for "constraint colocation show" dict like object modifiers can contain "full" Options: * --full - print more details * -f - CIB file """ modifiers.ensure_only_supported("-f", "--full") if argv: raise CmdLineInputError() print("\n".join(command.show( "Colocation Constraints:", lib.constraint_colocation.show, console_report.constraint_plain, modifiers, ))) pcs-0.10.4/pcs/cli/constraint_colocation/console_report.py000066400000000000000000000015001356771603100237270ustar00rootroot00000000000000def constraint_plain(constraint_info, with_id=False): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ options_dict = constraint_info["options"] co_resource1 = options_dict.get("rsc", "") co_resource2 = options_dict.get("with-rsc", "") co_id = options_dict.get("id", "") co_score = options_dict.get("score", "") score_text = "(score:" + co_score + ")" console_option_list = [ "(%s:%s)" % (option[0], option[1]) for option in sorted(options_dict.items()) if option[0] not in ("rsc", "with-rsc", "id", "score") ] if with_id: console_option_list.append("(id:%s)" % co_id) return " ".join( [co_resource1, "with", co_resource2, score_text] + console_option_list ) pcs-0.10.4/pcs/cli/constraint_order/000077500000000000000000000000001356771603100173055ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint_order/__init__.py000066400000000000000000000000001356771603100214040ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint_order/command.py000066400000000000000000000024151356771603100212770ustar00rootroot00000000000000from pcs.cli.common.errors import CmdLineInputError from pcs.cli.constraint import command from pcs.cli.constraint_order import console_report def create_with_set(lib, argv, modifiers): """ create order constraint with resource set object lib exposes library list argv see usage for "constraint colocation set" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ modifiers.ensure_only_supported("--force", "-f") command.create_with_set( lib.constraint_order.set, argv, modifiers ) def show(lib, argv, modifiers): """ show all order constraints object lib exposes library list argv see usage for "constraint colocation show" dict like object modifiers can contain "full" Options: * --full - print more details * -f - CIB file """ modifiers.ensure_only_supported("-f", "--full") if argv: raise CmdLineInputError() print("\n".join(command.show( "Ordering Constraints:", lib.constraint_order.show, console_report.constraint_plain, modifiers, ))) pcs-0.10.4/pcs/cli/constraint_order/console_report.py000066400000000000000000000031021356771603100227100ustar00rootroot00000000000000from pcs.lib.pacemaker.values import is_true def constraint_plain(constraint_info, with_id=False): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ options = constraint_info["options"] oc_resource1 = options.get("first", "") oc_resource2 = options.get("then", "") first_action = options.get("first-action", "") then_action = options.get("then-action", "") oc_id = options.get("id", "") oc_score = options.get("score", "") oc_kind = options.get("kind", "") oc_sym = "" oc_id_out = "" oc_options = "" if ( "symmetrical" in options and not is_true(options.get("symmetrical", "false")) ): oc_sym = "(non-symmetrical)" if oc_kind != "": score_text = "(kind:" + oc_kind + ")" elif oc_kind == "" and oc_score == "": score_text = "(kind:Mandatory)" else: score_text = "(score:" + oc_score + ")" if with_id: oc_id_out = "(id:"+oc_id+")" already_processed_options = ( "first", "then", "first-action", "then-action", "id", "score", "kind", "symmetrical" ) oc_options = " ".join([ "{0}={1}".format(name, value) for name, value in options.items() if name not in already_processed_options ]) if oc_options: oc_options = "(Options: " + oc_options + ")" return " ".join([arg for arg in [ first_action, oc_resource1, "then", then_action, oc_resource2, score_text, oc_sym, oc_options, oc_id_out ] if arg]) pcs-0.10.4/pcs/cli/constraint_ticket/000077500000000000000000000000001356771603100174555ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint_ticket/__init__.py000066400000000000000000000000001356771603100215540ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/constraint_ticket/command.py000066400000000000000000000051751356771603100214550ustar00rootroot00000000000000from pcs.cli.common.errors import CmdLineInputError from pcs.cli.constraint import command from pcs.cli.constraint_ticket import parse_args, console_report from pcs.cli.common.console_report import error def create_with_set(lib, argv, modifiers): """ create ticket constraint with resource set object lib exposes library list argv see usage for "constraint colocation set" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ modifiers.ensure_only_supported("--force", "-f") command.create_with_set( lib.constraint_ticket.set, argv, modifiers, ) def add(lib, argv, modifiers): """ create ticket constraint object lib exposes library list argv see usage for "constraint colocation add" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ modifiers.ensure_only_supported("--force", "-f") ticket, resource_id, resource_role, options = parse_args.parse_add(argv) if "rsc-role" in options: raise CmdLineInputError( "Resource role must not be specified among options" +", specify it before resource id" ) if resource_role: options["rsc-role"] = resource_role lib.constraint_ticket.add( ticket, resource_id, options, resource_in_clone_alowed=modifiers.get("--force"), duplication_alowed=modifiers.get("--force"), ) def remove(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 2: raise CmdLineInputError() ticket, resource_id = argv if not lib.constraint_ticket.remove(ticket, resource_id): raise error("no matching ticket constraint found") def show(lib, argv, modifiers): """ show all ticket constraints object lib exposes library list argv see usage for "constraint colocation show" dict like object modifiers can contain "full" Options: * --full - print more details * -f - CIB file """ modifiers.ensure_only_supported("-f", "--full") if argv: raise CmdLineInputError() print("\n".join(command.show( "Ticket Constraints:", lib.constraint_ticket.show, console_report.constraint_plain, modifiers, ))) pcs-0.10.4/pcs/cli/constraint_ticket/console_report.py000066400000000000000000000011621356771603100230640ustar00rootroot00000000000000from pcs.cli.constraint.console_report import prepare_options def constraint_plain(constraint_info, with_id=False): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ options = constraint_info["options"] role = options.get("rsc-role", "") role_prefix = "{0} ".format(role) if role else "" return role_prefix + " ".join([options.get("rsc", "")] + prepare_options( dict( (name, value) for name, value in options.items() if name not in ["rsc-role", "rsc"] ), with_id )) pcs-0.10.4/pcs/cli/constraint_ticket/parse_args.py000066400000000000000000000017751356771603100221670ustar00rootroot00000000000000from pcs.cli.common import parse_args from pcs.cli.common.errors import CmdLineInputError def separate_tail_option_candidates(arg_list): for i, arg in enumerate(arg_list): if "=" in arg: return arg_list[:i], arg_list[i:] return arg_list, [] def parse_add(arg_list): info, option_candidates = separate_tail_option_candidates(arg_list) if not info: raise CmdLineInputError("Ticket not specified") ticket, resource_specification = info[0], info[1:] if len(resource_specification) not in (1, 2): raise CmdLineInputError( "invalid resource specification: '{0}'" .format(" ".join(resource_specification)) ) if len(resource_specification) == 2: resource_role, resource_id = resource_specification else: resource_role = "" resource_id = resource_specification[0] return ( ticket, resource_id, resource_role, parse_args.prepare_options(option_candidates) ) pcs-0.10.4/pcs/cli/fencing_topology.py000066400000000000000000000006001356771603100176410ustar00rootroot00000000000000from pcs.common.fencing_topology import ( TARGET_TYPE_NODE, TARGET_TYPE_REGEXP, TARGET_TYPE_ATTRIBUTE, ) __target_type_map = { "attrib": TARGET_TYPE_ATTRIBUTE, "node": TARGET_TYPE_NODE, "regexp": TARGET_TYPE_REGEXP, } target_type_map_cli_to_lib = __target_type_map target_type_map_lib_to_cli = { value: key for key, value in __target_type_map.items() } pcs-0.10.4/pcs/cli/file/000077500000000000000000000000001356771603100146455ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/file/__init__.py000066400000000000000000000000001356771603100167440ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/file/metadata.py000066400000000000000000000017071356771603100170040ustar00rootroot00000000000000import os.path from pcs.common import file_type_codes as code from pcs.common.file import FileMetadata _metadata = { code.BOOTH_CONFIG: lambda path: FileMetadata( file_type_code=code.BOOTH_CONFIG, path=path, owner_user_name=None, owner_group_name=None, permissions=None, is_binary=False, ), code.BOOTH_KEY: lambda path: FileMetadata( file_type_code=code.BOOTH_KEY, path=path, owner_user_name=None, owner_group_name=None, permissions=0o600, is_binary=True, ), code.PCS_KNOWN_HOSTS: lambda: FileMetadata( file_type_code=code.PCS_KNOWN_HOSTS, path=os.path.join(os.path.expanduser("~/.pcs"), "known-hosts"), owner_user_name=None, owner_group_name=None, permissions=0o600, is_binary=False, ) } def for_file_type(file_type_code, *args, **kwargs): return _metadata[file_type_code](*args, **kwargs) pcs-0.10.4/pcs/cli/resource/000077500000000000000000000000001356771603100155555ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/resource/__init__.py000066400000000000000000000000001356771603100176540ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/resource/parse_args.py000066400000000000000000000165721356771603100202700ustar00rootroot00000000000000from pcs.cli.common.parse_args import group_by_keywords, prepare_options from pcs.cli.common.errors import CmdLineInputError, HINT_SYNTAX_CHANGE def parse_create_simple(arg_list): groups = group_by_keywords( arg_list, set(["op", "meta"]), implicit_first_group_key="options", group_repeated_keywords=["op"], ) parts = { "meta": prepare_options(groups.get("meta", [])), "options": prepare_options(groups.get("options", [])), "op": [ prepare_options(op) for op in build_operations(groups.get("op", [])) ], } return parts def parse_create(arg_list): groups = group_by_keywords( arg_list, set(["op", "meta", "clone", "promotable", "bundle"]), implicit_first_group_key="options", group_repeated_keywords=["op"], only_found_keywords=True, ) try: parts = { "meta": prepare_options(groups.get("meta", [])), "options": prepare_options(groups.get("options", [])), "op": [ prepare_options(op) for op in build_operations(groups.get("op", [])) ], } if "clone" in groups: parts["clone"] = prepare_options(groups["clone"]) if "promotable" in groups: parts["promotable"] = prepare_options(groups["promotable"]) if "bundle" in groups: parts["bundle"] = groups["bundle"] except CmdLineInputError as e: # Print error messages which point users to the changes section in pcs # manpage. # To be removed in the next significant version. if e.message == "missing value of 'master' option": raise CmdLineInputError(message=e.message, hint=HINT_SYNTAX_CHANGE) raise e return parts def _parse_bundle_groups(arg_list): """ Commandline options: no options """ repeatable_keyword_list = ["port-map", "storage-map"] keyword_list = ["meta", "container", "network"] + repeatable_keyword_list groups = group_by_keywords( arg_list, set(keyword_list), group_repeated_keywords=repeatable_keyword_list, only_found_keywords=True, ) for keyword in keyword_list: if keyword not in groups: continue if keyword in repeatable_keyword_list: for repeated_section in groups[keyword]: if not repeated_section: raise CmdLineInputError( "No {0} options specified".format(keyword) ) else: if not groups[keyword]: raise CmdLineInputError( "No {0} options specified".format(keyword) ) return groups def parse_bundle_create_options(arg_list): """ Commandline options: no options """ groups = _parse_bundle_groups(arg_list) container_options = groups.get("container", []) container_type = "" if container_options and "=" not in container_options[0]: container_type = container_options.pop(0) parts = { "container_type": container_type, "container": prepare_options(container_options), "network": prepare_options(groups.get("network", [])), "port_map": [ prepare_options(port_map) for port_map in groups.get("port-map", []) ], "storage_map": [ prepare_options(storage_map) for storage_map in groups.get("storage-map", []) ], "meta": prepare_options(groups.get("meta", [])) } return parts def parse_bundle_reset_options(arg_list): """ Commandline options: no options """ groups = _parse_bundle_groups(arg_list) container_options = groups.get("container", []) parts = { "container": prepare_options(container_options), "network": prepare_options(groups.get("network", [])), "port_map": [ prepare_options(port_map) for port_map in groups.get("port-map", []) ], "storage_map": [ prepare_options(storage_map) for storage_map in groups.get("storage-map", []) ], "meta": prepare_options(groups.get("meta", [])) } return parts def _split_bundle_map_update_op_and_options( map_arg_list, result_parts, map_name ): """ Commandline options: no options """ if len(map_arg_list) < 2: raise _bundle_map_update_not_valid(map_name) op, options = map_arg_list[0], map_arg_list[1:] if op == "add": result_parts["add"].append(prepare_options(options)) elif op in {"delete", "remove"}: result_parts["remove"].extend(options) else: raise _bundle_map_update_not_valid(map_name) def _bundle_map_update_not_valid(map_name): """ Commandline options: no options """ return CmdLineInputError( ( "When using '{map}' you must specify either 'add' and options or " "either of 'delete' or 'remove' and id(s)" ).format(map=map_name) ) def parse_bundle_update_options(arg_list): """ Commandline options: no options """ groups = _parse_bundle_groups(arg_list) port_map = {"add": [], "remove": []} for map_group in groups.get("port-map", []): _split_bundle_map_update_op_and_options( map_group, port_map, "port-map" ) storage_map = {"add": [], "remove": []} for map_group in groups.get("storage-map", []): _split_bundle_map_update_op_and_options( map_group, storage_map, "storage-map" ) parts = { "container": prepare_options(groups.get("container", [])), "network": prepare_options(groups.get("network", [])), "port_map_add": port_map["add"], "port_map_remove": port_map["remove"], "storage_map_add": storage_map["add"], "storage_map_remove": storage_map["remove"], "meta": prepare_options(groups.get("meta", [])) } return parts def build_operations(op_group_list): """ Return a list of dicts. Each dict represents one operation. list of list op_group_list contains items that have parameters after "op" (so item can contain multiple operations) for example: [ [monitor timeout=1 start timeout=2], [monitor timeout=3 interval=10], ] """ operation_list = [] for op_group in op_group_list: #empty operation is not allowed if not op_group: raise __not_enough_parts_in_operation() #every operation group needs to start with operation name if "=" in op_group[0]: raise __every_operation_needs_name() for arg in op_group: if "=" not in arg: operation_list.append(["name={0}".format(arg)]) else: operation_list[-1].append(arg) #every operation needs at least name and one option #there can be more than one operation in op_group: check is after processing if any([len(operation) < 2 for operation in operation_list]): raise __not_enough_parts_in_operation() return operation_list def __not_enough_parts_in_operation(): return CmdLineInputError( "When using 'op' you must specify an operation name" " and at least one option" ) def __every_operation_needs_name(): return CmdLineInputError( "When using 'op' you must specify an operation name after 'op'" ) pcs-0.10.4/pcs/cli/resource/relations.py000066400000000000000000000160011356771603100201250ustar00rootroot00000000000000from typing import ( Any, Iterable, List, Mapping, Sequence, Union, ) from pcs.common.pacemaker.resource.relations import ( RelationEntityDto, ResourceRelationDto, ResourceRelationType, ) from pcs.cli.common.console_report import format_optional from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import InputModifiers from pcs.cli.common.printable_tree import ( tree_to_lines, PrintableTreeNode, ) def show_resource_relations_cmd( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * -f - CIB file * --full - show constraint ids and resource types """ modifiers.ensure_only_supported("-f", "--full") if len(argv) != 1: raise CmdLineInputError() tree = ResourcePrintableNode.from_dto( ResourceRelationDto.from_dict( lib.resource.get_resource_relations_tree(argv[0]) ) ) for line in tree_to_lines(tree, verbose=modifiers.get("--full")): print(line) class ResourceRelationBase(PrintableTreeNode): def __init__( self, relation_entity: RelationEntityDto, members: Sequence["ResourceRelationBase"], is_leaf: bool, ): self._relation_entity = relation_entity self._members = members self._is_leaf = is_leaf @property def is_leaf(self) -> bool: return self._is_leaf @property def relation_entity(self) -> RelationEntityDto: return self._relation_entity @property def members(self) -> Sequence["ResourceRelationBase"]: return self._members @property def detail(self) -> Sequence[str]: raise NotImplementedError() def get_title(self, verbose: bool) -> str: raise NotImplementedError() class ResourcePrintableNode(ResourceRelationBase): @classmethod def from_dto( cls, resource_dto: ResourceRelationDto ) -> "ResourcePrintableNode": def _relation_comparator(item: ResourceRelationBase) -> str: type_priorities = ( ResourceRelationType.INNER_RESOURCES, ResourceRelationType.OUTER_RESOURCE, ResourceRelationType.ORDER, ResourceRelationType.ORDER_SET, ) priority_map = { _type: value for value, _type in enumerate(type_priorities) } return "{_type}_{_id}".format( _type=priority_map.get( # Hardcoded number 9 is intentional. If there is more than # 10 items, it would be required to also prepend zeros for # lower numbers. E.g: if there is 100 options, it should # starts as 000, 001, ... item.relation_entity.type, 9 # type: ignore ), _id=item.relation_entity.id ) return cls( resource_dto.relation_entity, sorted( [ RelationPrintableNode.from_dto(member_dto) for member_dto in resource_dto.members ], key=_relation_comparator, ), resource_dto.is_leaf ) def get_title(self, verbose: bool) -> str: rsc_type = self._relation_entity.type metadata = self._relation_entity.metadata if rsc_type == "primitive": rsc_type = "{_class}{_provider}{_type}".format( _class=format_optional(metadata.get("class"), "{}:"), _provider=format_optional(metadata.get("provider"), "{}:"), _type=metadata.get("type"), ) detail = f" (resource: {rsc_type})" if verbose else "" return f"{self._relation_entity.id}{detail}" @property def detail(self) -> Sequence[str]: return [] class RelationPrintableNode(ResourceRelationBase): @classmethod def from_dto( cls, relation_dto: ResourceRelationDto ) -> "RelationPrintableNode": return cls( relation_dto.relation_entity, sorted( [ ResourcePrintableNode.from_dto(member_dto) for member_dto in relation_dto.members ], key=lambda item: item.relation_entity.id, ), relation_dto.is_leaf ) def get_title(self, verbose: bool) -> str: rel_type_map: Mapping[Union[str, ResourceRelationType], str] = { ResourceRelationType.ORDER: "order", ResourceRelationType.ORDER_SET: "order set", ResourceRelationType.INNER_RESOURCES: "inner resource(s)", ResourceRelationType.OUTER_RESOURCE: "outer resource", } detail = ( " ({})".format(self._relation_entity.metadata.get("id")) if verbose else "" ) return "{type}{detail}".format( type=rel_type_map.get(self._relation_entity.type, ""), detail=detail, ) @property def detail(self) -> Sequence[str]: ent = self._relation_entity if ent.type is ResourceRelationType.ORDER: return _order_metadata_to_str(ent.metadata) if ent.type is ResourceRelationType.ORDER_SET: return _order_set_metadata_to_str(ent.metadata) if ( ent.type is ResourceRelationType.INNER_RESOURCES and len(ent.members) > 1 ): return ["members: {}".format(" ".join(ent.members))] return [] def _order_metadata_to_str(metadata: Mapping[str, Any]) -> Sequence[str]: return [ "{action1} {resource1} then {action2} {resource2}".format( action1=metadata["first-action"], resource1=metadata["first"], action2=metadata["then-action"], resource2=metadata["then"], ) ] + _order_common_metadata_to_str(metadata) def _order_set_metadata_to_str(metadata: Mapping[str, Any]) -> Sequence[str]: result = [] for res_set in metadata["sets"]: result.append(" set {resources}{options}".format( resources=" ".join(res_set["members"]), options=_resource_set_options_to_str(res_set["metadata"]), )) return _order_common_metadata_to_str(metadata) + result def _resource_set_options_to_str(metadata: Mapping[str, Any]) -> str: supported_keys = ( "sequential", "require-all", "ordering", "action", "role", "kind", "score", ) result = _filter_supported_keys(metadata, supported_keys) return f" ({result})" if result else "" def _filter_supported_keys( data: Mapping[str, Any], supported_keys: Iterable[str] ) -> str: return " ".join([ f"{key}={value}" for key, value in sorted(data.items()) if key in supported_keys ]) def _order_common_metadata_to_str(metadata: Mapping[str, Any]) -> List[str]: result = _filter_supported_keys( metadata, ("symmetrical", "kind", "require-all", "score") ) return [result] if result else [] pcs-0.10.4/pcs/cli/routing/000077500000000000000000000000001356771603100154155ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/routing/__init__.py000066400000000000000000000000001356771603100175140ustar00rootroot00000000000000pcs-0.10.4/pcs/cli/routing/acl.py000066400000000000000000000025401356771603100165270ustar00rootroot00000000000000from pcs import ( acl, usage, ) from pcs.cli.common.routing import create_router acl_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.acl(argv), "show": acl.show_acl_config, "enable": acl.acl_enable, "disable": acl.acl_disable, "role": create_router( { "create": acl.role_create, "delete": acl.role_delete, "remove": acl.role_delete, "assign": acl.role_assign, "unassign": acl.role_unassign, }, ["acl", "role"], ), "user": create_router( { "create": acl.user_create, "delete": acl.user_delete, "remove": acl.user_delete, }, ["acl", "user"] ), "group": create_router( { "create": acl.group_create, "delete": acl.group_delete, "remove": acl.group_delete, }, ["acl", "group"] ), "permission": create_router( { "add": acl.permission_add, "delete": acl.run_permission_delete, "remove": acl.run_permission_delete, }, ["acl", "permission"] ), }, ["acl"], default_cmd="show" ) pcs-0.10.4/pcs/cli/routing/alert.py000066400000000000000000000016241356771603100171010ustar00rootroot00000000000000from pcs import ( alert, usage, ) from pcs.cli.common.routing import create_router alert_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.alert(argv), "create": alert.alert_add, "update": alert.alert_update, "delete": alert.alert_remove, "remove": alert.alert_remove, "config": alert.print_alert_config, "show": alert.print_alert_config, "recipient": create_router( { "help": lambda lib, argv, modifiers: usage.alert(["recipient"]), "add": alert.recipient_add, "update": alert.recipient_update, "delete": alert.recipient_remove, "remove": alert.recipient_remove, }, ["alert", "recipient"], ), "get_all_alerts": alert.print_alerts_in_json, }, ["alert"], default_cmd="config", ) pcs-0.10.4/pcs/cli/routing/booth.py000066400000000000000000000024411356771603100171030ustar00rootroot00000000000000from pcs import usage from pcs.cli.booth import command from pcs.cli.common.routing import create_router from pcs.resource import resource_remove, resource_restart booth_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.booth(argv), "config": command.config_show, "setup": command.config_setup, "destroy": command.config_destroy, "ticket": create_router( { "help": lambda lib, argv, modifiers: usage.booth(["ticket"]), "add": command.config_ticket_add, "delete": command.config_ticket_remove, "remove": command.config_ticket_remove, "grant": command.ticket_grant, "revoke": command.ticket_revoke, }, ["booth", "ticket"] ), "create": command.create_in_cluster, "delete": command.get_remove_from_cluster(resource_remove), "remove": command.get_remove_from_cluster(resource_remove), "restart": command.get_restart(resource_restart), "sync": command.sync, "pull": command.pull, "enable": command.enable, "disable": command.disable, "start": command.start, "stop": command.stop, "status": command.status, }, ["booth"] ) pcs-0.10.4/pcs/cli/routing/client.py000066400000000000000000000002621356771603100172450ustar00rootroot00000000000000from pcs import client from pcs.cli.common.routing import create_router client_cmd = create_router( { "local-auth": client.local_auth_cmd, }, ["client"], ) pcs-0.10.4/pcs/cli/routing/cluster.py000066400000000000000000000070771356771603100174630ustar00rootroot00000000000000from pcs import ( cluster, pcsd, resource, status, usage, ) import pcs.cli.cluster.command as cluster_command from pcs.cli.common.errors import raise_command_replaced from pcs.cli.common.routing import create_router cluster_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.cluster(argv), "setup": cluster.cluster_setup, "sync": create_router( { "corosync": cluster.sync_nodes, }, ["cluster", "sync"], default_cmd="corosync", ), "status": status.cluster_status, "pcsd-status": status.cluster_pcsd_status, "certkey": pcsd.pcsd_certkey, "auth": cluster.cluster_auth_cmd, "start": cluster.cluster_start_cmd, "stop": cluster.cluster_stop_cmd, "kill": cluster.kill_cluster, "enable": cluster.cluster_enable_cmd, "disable": cluster.cluster_disable_cmd, "cib": cluster.get_cib, "cib-push": cluster.cluster_push, "cib-upgrade": cluster.cluster_cib_upgrade_cmd, "edit": cluster.cluster_edit, "link": create_router( { "add": cluster.link_add, "delete": cluster.link_remove, "remove": cluster.link_remove, "update": cluster.link_update, }, ["cluster", "link"] ), "node": create_router( { "add": cluster.node_add, "add-guest": cluster_command.node_add_guest, "add-outside": cluster.node_add_outside_cluster, "add-remote": cluster_command.node_add_remote, "clear": cluster_command.node_clear, "delete": cluster.node_remove, "delete-guest": cluster_command.node_remove_guest, "delete-remote": cluster_command.create_node_remove_remote( resource.resource_remove ), "remove": cluster.node_remove, "remove-guest": cluster_command.node_remove_guest, "remove-remote": cluster_command.create_node_remove_remote( resource.resource_remove ), }, ["cluster", "node"] ), "uidgid": cluster.cluster_uidgid, "corosync": cluster.cluster_get_corosync_conf, "reload": cluster.cluster_reload, "destroy": cluster.cluster_destroy, "verify": cluster.cluster_verify, "report": cluster.cluster_report, "remove_nodes_from_cib": cluster.remove_nodes_from_cib, # removed commands # These print error messages which point users to the changes section in # pcs manpage. # To be removed in the next significant version. "quorum": lambda lib, argv, modifiers: raise_command_replaced( "pcs quorum" ), "remote-node": create_router( { "add": lambda lib, argv, modifiers: raise_command_replaced( "pcs cluster node add-guest", ), "remove": lambda lib, argv, modifiers: raise_command_replaced( "pcs cluster node delete-guest", "pcs cluster node remove-guest", ), }, ["cluster", "node"] ), "standby": lambda lib, argv, modifiers: raise_command_replaced( "pcs node standby" ), "unstandby": lambda lib, argv, modifiers: raise_command_replaced( "pcs node unstandby" ), }, ["cluster"] ) pcs-0.10.4/pcs/cli/routing/config.py000066400000000000000000000022071356771603100172350ustar00rootroot00000000000000from pcs import ( config, usage, ) from pcs.cli.common.routing import create_router config_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.config(argv), "show": config.config_show, "backup": config.config_backup, "restore": config.config_restore, "checkpoint": create_router( { "list": config.config_checkpoint_list, "view": config.config_checkpoint_view, "restore": config.config_checkpoint_restore, "diff": config.config_checkpoint_diff, }, ["config", "checkpoint"], default_cmd="list" ), "import-cman": config.config_import_cman, "export": create_router( { "pcs-commands": config.config_export_pcs_commands, "pcs-commands-verbose": lambda lib, argv, modifiers: config.config_export_pcs_commands( lib, argv, modifiers, verbose=True ) }, ["config", "export"] ) }, ["config"], default_cmd="show", ) pcs-0.10.4/pcs/cli/routing/constraint.py000066400000000000000000000030171356771603100201540ustar00rootroot00000000000000from pcs import ( constraint, usage, ) from pcs.cli.common.routing import create_router import pcs.cli.constraint_colocation.command as colocation_command from pcs.cli.constraint_ticket import command as ticket_command constraint_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.constraint(argv), "location": constraint.constraint_location_cmd, "order": constraint.constraint_order_cmd, "ticket": create_router( { "set": ticket_command.create_with_set, "add": ticket_command.add, "delete": ticket_command.remove, "remove": ticket_command.remove, "show": ticket_command.show, }, ["constraint", "ticket"], default_cmd="show" ), "colocation": create_router( { "add": constraint.colocation_add, "remove": constraint.colocation_rm, "delete": constraint.colocation_rm, "set": colocation_command.create_with_set, "show": colocation_command.show, }, ["constraint", "colocation"], default_cmd="show" ), "remove": constraint.constraint_rm, "delete": constraint.constraint_rm, "show": constraint.constraint_show, "list": constraint.constraint_show, "ref": constraint.constraint_ref, "rule": constraint.constraint_rule, }, ["constraint"], default_cmd="list" ) pcs-0.10.4/pcs/cli/routing/host.py000066400000000000000000000004231356771603100167430ustar00rootroot00000000000000from pcs import ( host, usage, ) from pcs.cli.common.routing import create_router host_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.host(argv), "auth": host.auth_cmd, "deauth": host.deauth_cmd, }, ["host"] ) pcs-0.10.4/pcs/cli/routing/node.py000066400000000000000000000012751356771603100167210ustar00rootroot00000000000000from functools import partial from pcs import ( node, usage, ) from pcs.cli.common.routing import create_router node_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.node(argv), "maintenance": partial(node.node_maintenance_cmd, enable=True), "unmaintenance": partial(node.node_maintenance_cmd, enable=False), "standby": partial(node.node_standby_cmd, enable=True), "unstandby": partial(node.node_standby_cmd, enable=False), "attribute": node.node_attribute_cmd, "utilization": node.node_utilization_cmd, # pcs-to-pcsd use only "pacemaker-status": node.node_pacemaker_status, }, ["node"] ) pcs-0.10.4/pcs/cli/routing/pcsd.py000066400000000000000000000013131356771603100167160ustar00rootroot00000000000000from pcs import ( pcsd, usage, ) from pcs.cli.common.errors import raise_command_replaced from pcs.cli.common.routing import create_router pcsd_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.pcsd(argv), "deauth": pcsd.pcsd_deauth, "certkey": pcsd.pcsd_certkey, "sync-certificates": pcsd.pcsd_sync_certs, # removed commands # These print error messages which point users to the changes section in # pcs manpage. # To be removed in the next significant version. "clear-auth": lambda lib, argv, modifiers: raise_command_replaced( "pcs host deauth", "pcs pcsd deauth" ), }, ["pcsd"] ) pcs-0.10.4/pcs/cli/routing/prop.py000066400000000000000000000007541356771603100167550ustar00rootroot00000000000000from pcs import ( prop, usage, ) from pcs.cli.common.routing import create_router property_cmd = create_router( { "help": lambda _lib, _argv, _modifiers: usage.property(_argv), "set": prop.set_property, "unset": prop.unset_property, "list": prop.list_property, "show": prop.list_property, "get_cluster_properties_definition": prop.print_cluster_properties_definition, }, ["property"], default_cmd="list" ) pcs-0.10.4/pcs/cli/routing/qdevice.py000066400000000000000000000020641356771603100174110ustar00rootroot00000000000000from pcs import ( qdevice, usage, ) from pcs.cli.common.routing import create_router qdevice_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.qdevice(argv), "status": qdevice.qdevice_status_cmd, "setup": qdevice.qdevice_setup_cmd, "destroy": qdevice.qdevice_destroy_cmd, "start": qdevice.qdevice_start_cmd, "stop": qdevice.qdevice_stop_cmd, "kill": qdevice.qdevice_kill_cmd, "enable": qdevice.qdevice_enable_cmd, "disable": qdevice.qdevice_disable_cmd, # following commands are internal use only, called from pcsd "sign-net-cert-request": qdevice.qdevice_sign_net_cert_request_cmd, "net-client": create_router( { "setup": qdevice.qdevice_net_client_setup_cmd, "import-certificate": qdevice.qdevice_net_client_import_certificate_cmd, "destroy": qdevice.qdevice_net_client_destroy, }, ["qdevice", "net-client"] ), }, ["qdevice"] ) pcs-0.10.4/pcs/cli/routing/quorum.py000066400000000000000000000023271356771603100173230ustar00rootroot00000000000000from pcs import ( quorum, usage, ) from pcs.cli.common.routing import create_router quorum_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.quorum(argv), "config": quorum.quorum_config_cmd, "expected-votes": quorum.quorum_expected_votes_cmd, "status": quorum.quorum_status_cmd, "device": create_router( { "add": quorum.quorum_device_add_cmd, "heuristics": create_router( { "delete": quorum.quorum_device_heuristics_remove_cmd, "remove": quorum.quorum_device_heuristics_remove_cmd, }, ["quorum", "device", "heuristics"] ), "delete": quorum.quorum_device_remove_cmd, "remove": quorum.quorum_device_remove_cmd, "status": quorum.quorum_device_status_cmd, "update": quorum.quorum_device_update_cmd, }, ["quorum", "device"] ), # TODO switch to new architecture "unblock": quorum.quorum_unblock_cmd, "update": quorum.quorum_update_cmd, }, ["quorum"], default_cmd="config", ) pcs-0.10.4/pcs/cli/routing/resource.py000066400000000000000000000103721356771603100176210ustar00rootroot00000000000000from functools import partial from pcs import ( resource, usage, ) from pcs.cli.common.errors import raise_command_replaced from pcs.cli.common.routing import create_router from pcs.cli.resource.relations import show_resource_relations_cmd resource_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.resource(argv), "list": resource.resource_list_available, "describe": resource.resource_list_options, "create": resource.resource_create, "move": resource.resource_move, "ban": resource.resource_ban, "clear": resource.resource_unmove_unban, "standards": resource.resource_standards, "providers": resource.resource_providers, "agents": resource.resource_agents, "update": resource.resource_update, "meta": resource.resource_meta, "delete": resource.resource_remove_cmd, "remove": resource.resource_remove_cmd, # TODO remove, deprecated command # replaced with 'resource status' and 'resource config' "show": resource.resource_show, "status": resource.resource_status, "config": resource.resource_config, "group": create_router( { "add": resource.resource_group_add_cmd, "list": resource.resource_group_list, "remove": resource.resource_group_rm_cmd, "delete": resource.resource_group_rm_cmd, }, ["resource", "group"], ), "ungroup": resource.resource_group_rm_cmd, "clone": resource.resource_clone, "promotable": partial(resource.resource_clone, promotable=True), "unclone": resource.resource_clone_master_remove, "enable": resource.resource_enable_cmd, "disable": resource.resource_disable_cmd, "safe-disable": resource.resource_safe_disable_cmd, "restart": resource.resource_restart, "debug-start": partial( resource.resource_force_action, action="debug-start" ), "debug-stop": partial( resource.resource_force_action, action="debug-stop" ), "debug-promote": partial( resource.resource_force_action, action="debug-promote" ), "debug-demote": partial( resource.resource_force_action, action="debug-demote" ), "debug-monitor": partial( resource.resource_force_action, action="debug-monitor" ), "manage": resource.resource_manage_cmd, "unmanage": resource.resource_unmanage_cmd, "failcount": resource.resource_failcount, "op": create_router( { "defaults": resource.resource_op_defaults_cmd, "add": resource.resource_op_add_cmd, "remove": resource.resource_op_delete_cmd, "delete": resource.resource_op_delete_cmd, }, ["resource", "op"] ), "defaults": resource.resource_defaults_cmd, "cleanup": resource.resource_cleanup, "refresh": resource.resource_refresh, "relocate": create_router( { "show": resource.resource_relocate_show_cmd, "dry-run": resource.resource_relocate_dry_run_cmd, "run": resource.resource_relocate_run_cmd, "clear": resource.resource_relocate_clear_cmd, }, ["resource", "relocate"] ), "utilization": resource.resource_utilization_cmd, "bundle": create_router( { "create": resource.resource_bundle_create_cmd, "reset": resource.resource_bundle_reset_cmd, "update": resource.resource_bundle_update_cmd, }, ["resource", "bundle"] ), # internal use only "get_resource_agent_info": resource.get_resource_agent_info, # removed commands # These print error messages which point users to the changes section in # pcs manpage. # To be removed in the next significant version. "master": lambda lib, argv, modifiers: raise_command_replaced( "pcs resource promotable" ), "relations": show_resource_relations_cmd, }, ["resource"], default_cmd="status" ) pcs-0.10.4/pcs/cli/routing/status.py000066400000000000000000000022411356771603100173110ustar00rootroot00000000000000from pcs import ( status, usage, ) from pcs.cli.common.errors import raise_command_replaced from pcs.cli.common.routing import create_router from pcs.qdevice import qdevice_status_cmd from pcs.quorum import quorum_status_cmd from pcs.resource import resource_status from pcs.cli.booth.command import status as booth_status_cmd status_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.status(argv), "booth": booth_status_cmd, "corosync": status.corosync_status, "cluster": status.cluster_status, "nodes": status.nodes_status, "pcsd": status.cluster_pcsd_status, "qdevice": qdevice_status_cmd, "quorum": quorum_status_cmd, "resources": resource_status, "xml": status.xml_status, "status": status.full_status, # removed commands # These print error messages which point users to the changes section in # pcs manpage. # To be removed in the next significant version. "groups": lambda lib, argv, modifiers: raise_command_replaced( "pcs resource group list" ), }, ["status"], default_cmd="status", ) pcs-0.10.4/pcs/cli/routing/stonith.py000066400000000000000000000057041356771603100174650ustar00rootroot00000000000000from pcs import ( resource, stonith, usage, ) from pcs.cli.common.routing import create_router stonith_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.stonith(argv), "list": stonith.stonith_list_available, "describe": stonith.stonith_list_options, "create": stonith.stonith_create, "update": resource.resource_update, "delete": resource.resource_remove_cmd, "remove": resource.resource_remove_cmd, # TODO remove, deprecated command # replaced with 'stonith status' and 'stonith config' "show": stonith.stonith_show_cmd, "status": stonith.stonith_status_cmd, "config": stonith.stonith_config_cmd, "level": create_router( { "add": stonith.stonith_level_add_cmd, "clear": stonith.stonith_level_clear_cmd, "config": stonith.stonith_level_config_cmd, "remove": stonith.stonith_level_remove_cmd, "delete": stonith.stonith_level_remove_cmd, "verify": stonith.stonith_level_verify_cmd, }, ["stonith", "level"], default_cmd="config" ), "fence": stonith.stonith_fence, "cleanup": resource.resource_cleanup, "refresh": resource.resource_refresh, "confirm": stonith.stonith_confirm, "sbd": create_router( { "enable": stonith.sbd_enable, "disable": stonith.sbd_disable, "status": stonith.sbd_status, "config": stonith.sbd_config, "device": create_router( { "setup": stonith.sbd_setup_block_device, "message": stonith.sbd_message, }, ["stonith", "sbd", "device"] ), "watchdog": create_router( { "list": stonith.sbd_watchdog_list, "test": stonith.sbd_watchdog_test, # internal use only "list_json": stonith.sbd_watchdog_list_json, }, ["stonith", "sbd", "watchdog"] ), # internal use only "local_config_in_json": stonith.local_sbd_config, }, ["stonith", "sbd"] ), "enable": resource.resource_enable_cmd, "disable": resource.resource_disable_cmd, "history": create_router( { "show": stonith.stonith_history_show_cmd, "cleanup": stonith.stonith_history_cleanup_cmd, "update": stonith.stonith_history_update_cmd, }, ["stonith", "history"], default_cmd="show" ), # internal use only "get_fence_agent_info": stonith.get_fence_agent_info, }, ["stonith"], default_cmd="status" ) pcs-0.10.4/pcs/client.py000066400000000000000000000013361356771603100150120ustar00rootroot00000000000000from pcs import settings, utils from pcs.cli.common.errors import CmdLineInputError def local_auth_cmd(lib, argv, modifiers): """ Options: * -u - username * -p - password * --request-timeout - timeout for HTTP requests """ del lib modifiers.ensure_only_supported("-u", "-p", "--request-timeout") if len(argv) > 1: raise CmdLineInputError() port = argv[0] if argv else settings.pcsd_default_port username, password = utils.get_user_and_pass() utils.auth_hosts( { "localhost": { "username": username, "password": password, "dest_list": [{"addr": "localhost", "port": port}] } } ) pcs-0.10.4/pcs/cluster.py000066400000000000000000001545771356771603100152350ustar00rootroot00000000000000# pylint: disable=too-many-lines import datetime import json import math import os import re import subprocess import sys import tempfile import time import xml.dom.minidom from pcs import ( settings, utils, ) from pcs.utils import parallel_for_nodes from pcs.cli.common import parse_args from pcs.cli.common.errors import ( CmdLineInputError, ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE, HINT_SYNTAX_CHANGE, msg_command_replaced, ) from pcs.cli.common.reports import process_library_reports, build_report_message from pcs.common import report_codes from pcs.common.node_communicator import ( HostNotFound, Request, RequestData, ) from pcs.common.reports import SimpleReportProcessor from pcs.common.tools import Version from pcs.lib import ( sbd as lib_sbd, reports, ) from pcs.lib.cib.tools import VERSION_FORMAT from pcs.lib.commands.remote_node import _destroy_pcmk_remote_env from pcs.lib.communication.nodes import CheckAuth from pcs.lib.communication.tools import ( run_and_raise, run as run_com_cmd, RunRemotelyBase, ) from pcs.lib.corosync import ( live as corosync_live, qdevice_net, ) from pcs.cli.common.console_report import error, warn from pcs.lib.errors import ( LibraryError, ReportItem, ReportItemSeverity, ) from pcs.lib.external import disable_service from pcs.lib.env import MIN_FEATURE_SET_VERSION_FOR_DIFF from pcs.lib.node import get_existing_nodes_names import pcs.lib.pacemaker.live as lib_pacemaker # pylint: disable=too-many-branches, too-many-statements def cluster_cib_upgrade_cmd(lib, argv, modifiers): """ Options: * -f - CIB file """ del lib modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() utils.cluster_upgrade() def cluster_disable_cmd(lib, argv, modifiers): """ Options: * --all - disable all cluster nodes * --request-timeout - timeout for HTTP requests - effective only when at least one node has been specified or --all has been used """ del lib modifiers.ensure_only_supported("--all", "--request-timeout") if modifiers.get("--all"): if argv: utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE) disable_cluster_all() else: disable_cluster(argv) def cluster_enable_cmd(lib, argv, modifiers): """ Options: * --all - enable all cluster nodes * --request-timeout - timeout for HTTP requests - effective only when at least one node has been specified or --all has been used """ del lib modifiers.ensure_only_supported("--all", "--request-timeout") if modifiers.get("--all"): if argv: utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE) enable_cluster_all() else: enable_cluster(argv) def cluster_stop_cmd(lib, argv, modifiers): """ Options: * --force - no error when possible quorum loss * --request-timeout - timeout for HTTP requests - effective only when at least one node has been specified * --pacemaker - stop pacemaker, only effective when no node has been specified * --corosync - stop corosync, only effective when no node has been specified * --all - stop all cluster nodes """ del lib modifiers.ensure_only_supported( "--wait", "--request-timeout", "--pacemaker", "--corosync", "--all", "--force", ) if modifiers.get("--all"): if argv: utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE) stop_cluster_all() else: stop_cluster(argv) def cluster_start_cmd(lib, argv, modifiers): """ Options: * --wait * --request-timeout - timeout for HTTP requests, have effect only if at least one node have been specified * --all - start all cluster nodes """ del lib modifiers.ensure_only_supported( "--wait", "--request-timeout", "--all", "--corosync_conf" ) if modifiers.get("--all"): if argv: utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE) start_cluster_all() else: start_cluster(argv) def sync_nodes(lib, argv, modifiers): """ Options: * --request-timeout - timeout for HTTP requests """ del lib modifiers.ensure_only_supported("--request-timeout") if argv: raise CmdLineInputError() config = utils.getCorosyncConf() nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade(conf_text=config) ) if not nodes: report_list.append(reports.corosync_config_no_nodes_defined()) if report_list: process_library_reports(report_list) for node in nodes: utils.setCorosyncConfig(node, config) def start_cluster(argv): """ Commandline options: * --wait * --request-timeout - timeout for HTTP requests, have effect only if at least one node have been specified """ wait = False wait_timeout = None if "--wait" in utils.pcs_options: wait_timeout = utils.validate_wait_get_timeout(False) wait = True if argv: nodes = set(argv) # unique start_cluster_nodes(nodes) if wait: wait_for_nodes_started(nodes, wait_timeout) return print("Starting Cluster...") service_list = ["corosync"] if utils.need_to_handle_qdevice_service(): service_list.append("corosync-qdevice") service_list.append("pacemaker") for service in service_list: output, retval = utils.start_service(service) if retval != 0: print(output) utils.err("unable to start {0}".format(service)) if wait: wait_for_nodes_started([], wait_timeout) def start_cluster_all(): """ Commandline options: * --wait * --request-timeout - timeout for HTTP requests """ wait = False wait_timeout = None if "--wait" in utils.pcs_options: wait_timeout = utils.validate_wait_get_timeout(False) wait = True all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not all_nodes: report_list.append(reports.corosync_config_no_nodes_defined()) if report_list: process_library_reports(report_list) start_cluster_nodes(all_nodes) if wait: wait_for_nodes_started(all_nodes, wait_timeout) def start_cluster_nodes(nodes): """ Commandline options: * --request-timeout - timeout for HTTP requests """ # Large clusters take longer time to start up. So we make the timeout longer # for each 8 nodes: # 1 - 8 nodes: 1 * timeout # 9 - 16 nodes: 2 * timeout # 17 - 24 nodes: 3 * timeout # and so on # Users can override this and set their own timeout by specifying # the --request-timeout option (see utils.sendHTTPRequest). timeout = int( settings.default_request_timeout * math.ceil(len(nodes) / 8.0) ) utils.read_known_hosts_file() # cache known hosts node_errors = parallel_for_nodes( utils.startCluster, nodes, quiet=True, timeout=timeout ) if node_errors: utils.err( "unable to start all nodes\n" + "\n".join(node_errors.values()) ) def is_node_fully_started(node_status): """ Commandline options: no options """ return ( "online" in node_status and "pending" in node_status and node_status["online"] and not node_status["pending"] ) def wait_for_local_node_started(stop_at, interval): """ Commandline options: no options """ try: while True: time.sleep(interval) node_status = lib_pacemaker.get_local_node_status( utils.cmd_runner() ) if is_node_fully_started(node_status): return 0, "Started" if datetime.datetime.now() > stop_at: return 1, "Waiting timeout" except LibraryError as e: return 1, "Unable to get node status: {0}".format( "\n".join([build_report_message(item) for item in e.args]) ) def wait_for_remote_node_started(node, stop_at, interval): """ Commandline options: * --request-timeout - timeout for HTTP requests """ while True: time.sleep(interval) code, output = utils.getPacemakerNodeStatus(node) # HTTP error, permission denied or unable to auth # there is no point in trying again as it won't get magically fixed if code in [1, 3, 4]: return 1, output if code == 0: try: node_status = json.loads(output) if is_node_fully_started(node_status): return 0, "Started" except (ValueError, KeyError): # this won't get fixed either return 1, "Unable to get node status" if datetime.datetime.now() > stop_at: return 1, "Waiting timeout" def wait_for_nodes_started(node_list, timeout=None): """ Commandline options: * --request-timeout - timeout for HTTP request, effective only if node_list is not empty list """ timeout = 60 * 15 if timeout is None else timeout interval = 2 stop_at = datetime.datetime.now() + datetime.timedelta(seconds=timeout) print("Waiting for node(s) to start...") if not node_list: code, output = wait_for_local_node_started(stop_at, interval) if code != 0: utils.err(output) else: print(output) else: utils.read_known_hosts_file() # cache known hosts node_errors = parallel_for_nodes( wait_for_remote_node_started, node_list, stop_at, interval ) if node_errors: utils.err("unable to verify all nodes have started") def stop_cluster_all(): """ Commandline options: * --force - no error when possible quorum loss * --request-timeout - timeout for HTTP requests """ all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not all_nodes: report_list.append(reports.corosync_config_no_nodes_defined()) if report_list: process_library_reports(report_list) stop_cluster_nodes(all_nodes) def stop_cluster_nodes(nodes): """ Commandline options: * --force - no error when possible quorum loss * --request-timeout - timeout for HTTP requests """ all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) unknown_nodes = set(nodes) - set(all_nodes) if unknown_nodes: if report_list: process_library_reports(report_list) utils.err( "nodes '%s' do not appear to exist in configuration" % "', '".join(sorted(unknown_nodes)) ) utils.read_known_hosts_file() # cache known hosts stopping_all = set(nodes) >= set(all_nodes) if "--force" not in utils.pcs_options and not stopping_all: error_list = [] for node in nodes: retval, data = utils.get_remote_quorumtool_output(node) if retval != 0: error_list.append(node + ": " + data) continue try: quorum_status = corosync_live.QuorumStatus.from_string(data) if not quorum_status.is_quorate: # Get quorum status from a quorate node, non-quorate nodes # may provide inaccurate info. If no node is quorate, there # is no quorum to be lost and therefore no error to be # reported. continue if quorum_status.stopping_nodes_cause_quorum_loss(nodes): utils.err( "Stopping the node(s) will cause a loss of the quorum" + ", use --force to override" ) else: # We have the info, no need to print errors error_list = [] break except corosync_live.QuorumStatusException: if not utils.is_node_offline_by_quorumtool_output(data): error_list.append(node + ": Unable to get quorum status") # else the node seems to be stopped already if error_list: utils.err( "Unable to determine whether stopping the nodes will cause " + "a loss of the quorum, use --force to override\n" + "\n".join(error_list) ) was_error = False node_errors = parallel_for_nodes( utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True ) accessible_nodes = [ node for node in nodes if node not in node_errors.keys() ] if node_errors: utils.err( "unable to stop all nodes\n" + "\n".join(node_errors.values()), exit_after_error=not accessible_nodes ) was_error = True for node in node_errors: print("{0}: Not stopping cluster - node is unreachable".format(node)) node_errors = parallel_for_nodes( utils.stopCorosync, accessible_nodes, quiet=True ) if node_errors: utils.err( "unable to stop all nodes\n" + "\n".join(node_errors.values()) ) if was_error: utils.err("unable to stop all nodes") def enable_cluster(argv): """ Commandline options: * --request-timeout - timeout for HTTP requests, effective only if at least one node has been specified """ if argv: enable_cluster_nodes(argv) return try: utils.enableServices() except LibraryError as e: process_library_reports(e.args) def disable_cluster(argv): """ Commandline options: * --request-timeout - timeout for HTTP requests, effective only if at least one node has been specified """ if argv: disable_cluster_nodes(argv) return try: utils.disableServices() except LibraryError as e: process_library_reports(e.args) def enable_cluster_all(): """ Commandline options: * --request-timeout - timeout for HTTP requests """ all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not all_nodes: report_list.append(reports.corosync_config_no_nodes_defined()) if report_list: process_library_reports(report_list) enable_cluster_nodes(all_nodes) def disable_cluster_all(): """ Commandline options: * --request-timeout - timeout for HTTP requests """ all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not all_nodes: report_list.append(reports.corosync_config_no_nodes_defined()) if report_list: process_library_reports(report_list) disable_cluster_nodes(all_nodes) def enable_cluster_nodes(nodes): """ Commandline options: * --request-timeout - timeout for HTTP requests """ error_list = utils.map_for_error_list(utils.enableCluster, nodes) if error_list: utils.err("unable to enable all nodes\n" + "\n".join(error_list)) def disable_cluster_nodes(nodes): """ Commandline options: * --request-timeout - timeout for HTTP requests """ error_list = utils.map_for_error_list(utils.disableCluster, nodes) if error_list: utils.err("unable to disable all nodes\n" + "\n".join(error_list)) def destroy_cluster(argv): """ Commandline options: * --request-timeout - timeout for HTTP requests """ if argv: utils.read_known_hosts_file() # cache known hosts # stop pacemaker and resources while cluster is still quorate nodes = argv node_errors = parallel_for_nodes( utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True ) # proceed with destroy regardless of errors # destroy will stop any remaining cluster daemons node_errors = parallel_for_nodes( utils.destroyCluster, nodes, quiet=True ) if node_errors: utils.err( "unable to destroy cluster\n" + "\n".join(node_errors.values()) ) def stop_cluster(argv): """ Commandline options: * --force - no error when possible quorum loss * --request-timeout - timeout for HTTP requests - effective only when at least one node has been specified * --pacemaker - stop pacemaker, only effective when no node has been specified """ if argv: stop_cluster_nodes(argv) return if "--force" not in utils.pcs_options: # corosync 3.0.1 and older: # - retval is 0 on success if a node is not in a partition with quorum # - retval is 1 on error OR on success if a node has quorum # corosync 3.0.2 and newer: # - retval is 0 on success if a node has quorum # - retval is 1 on error # - retval is 2 on success if a node is not in a partition with quorum output, dummy_retval = utils.run(["corosync-quorumtool", "-p", "-s"]) try: if ( corosync_live.QuorumStatus.from_string(output) .stopping_local_node_cause_quorum_loss() ): utils.err( "Stopping the node will cause a loss of the quorum" + ", use --force to override" ) except corosync_live.QuorumStatusException: if not utils.is_node_offline_by_quorumtool_output(output): utils.err( "Unable to determine whether stopping the node will cause " + "a loss of the quorum, use --force to override" ) # else the node seems to be stopped already, proceed to be sure stop_all = ( "--pacemaker" not in utils.pcs_options and "--corosync" not in utils.pcs_options ) if stop_all or "--pacemaker" in utils.pcs_options: stop_cluster_pacemaker() if stop_all or "--corosync" in utils.pcs_options: stop_cluster_corosync() def stop_cluster_pacemaker(): """ Commandline options: no options """ print("Stopping Cluster (pacemaker)...") output, retval = utils.stop_service("pacemaker") if retval != 0: print(output) utils.err("unable to stop pacemaker") def stop_cluster_corosync(): """ Commandline options: no options """ print("Stopping Cluster (corosync)...") service_list = [] if utils.need_to_handle_qdevice_service(): service_list.append("corosync-qdevice") service_list.append("corosync") for service in service_list: output, retval = utils.stop_service(service) if retval != 0: print(output) utils.err("unable to stop {0}".format(service)) def kill_cluster(lib, argv, modifiers): """ Options: no options """ del lib if argv: raise CmdLineInputError() modifiers.ensure_only_supported() dummy_output, dummy_retval = kill_local_cluster_services() # if dummy_retval != 0: # print "Error: unable to execute killall -9" # print output # sys.exit(1) def kill_local_cluster_services(): """ Commandline options: no options """ all_cluster_daemons = [ # Daemons taken from cluster-clean script in pacemaker "pacemaker-attrd", "pacemaker-based", "pacemaker-controld", "pacemaker-execd", "pacemaker-fenced", "pacemaker-remoted", "pacemaker-schedulerd", "pacemakerd", "dlm_controld", "gfs_controld", # Corosync daemons "corosync-qdevice", "corosync", ] return utils.run(["/usr/bin/killall", "-9"] + all_cluster_daemons) def cluster_push(lib, argv, modifiers): """ Options: * --wait * --config - push only configuration section of CIB * -f - CIB file """ # pylint: disable=too-many-locals, del lib modifiers.ensure_only_supported("--wait", "--config", "-f") if len(argv) > 2: raise CmdLineInputError() filename = None scope = None timeout = None diff_against = None if modifiers.get("--wait"): timeout = utils.validate_wait_get_timeout() for arg in argv: if "=" not in arg: filename = arg else: arg_name, arg_value = arg.split("=", 1) if arg_name == "scope": if modifiers.get("--config"): utils.err("Cannot use both scope and --config") if not utils.is_valid_cib_scope(arg_value): utils.err("invalid CIB scope '%s'" % arg_value) else: scope = arg_value elif arg_name == "diff-against": diff_against = arg_value else: raise CmdLineInputError() if modifiers.get("--config"): scope = "configuration" if diff_against and scope: utils.err("Cannot use both scope and diff-against") if not filename: raise CmdLineInputError() try: new_cib_dom = xml.dom.minidom.parse(filename) if scope and not new_cib_dom.getElementsByTagName(scope): utils.err( "unable to push cib, scope '%s' not present in new cib" % scope ) except (EnvironmentError, xml.parsers.expat.ExpatError) as e: utils.err("unable to parse new cib: %s" % e) if diff_against: try: original_cib = xml.dom.minidom.parse(diff_against) except (EnvironmentError, xml.parsers.expat.ExpatError) as e: utils.err("unable to parse original cib: %s" % e) def unable_to_diff(reason): return error( "unable to diff against original cib '{0}': {1}" .format(diff_against, reason) ) cib_element_list = original_cib.getElementsByTagName("cib") if len(cib_element_list) != 1: raise unable_to_diff("there is not exactly one 'cib' element") crm_feature_set = cib_element_list[0].getAttribute("crm_feature_set") if not crm_feature_set: raise unable_to_diff( "the 'cib' element is missing 'crm_feature_set' value" ) match = re.match(VERSION_FORMAT, crm_feature_set) if not match: raise unable_to_diff( "the attribute 'crm_feature_set' of the element 'cib' has an" " invalid value: '{0}'".format(crm_feature_set) ) crm_feature_set_version = Version( int(match.group("major")), int(match.group("minor")), int(match.group("rev")) if match.group("rev") else None ) if crm_feature_set_version < MIN_FEATURE_SET_VERSION_FOR_DIFF: raise unable_to_diff( ( "the 'crm_feature_set' version is '{0}'" " but at least version '{1}' is required" ).format( crm_feature_set_version, MIN_FEATURE_SET_VERSION_FOR_DIFF, ) ) runner = utils.cmd_runner() command = [ os.path.join(settings.pacemaker_binaries, "crm_diff"), "--original", diff_against, "--new", filename, "--no-version" ] patch, stderr, retval = runner.run(command) # 0 (CRM_EX_OK) - success with no difference # 1 (CRM_EX_ERROR) - success with difference # 64 (CRM_EX_USAGE) - usage error # 65 (CRM_EX_DATAERR) - XML fragments not parseable if retval > 1: utils.err("unable to diff the CIBs:\n" + stderr) if retval == 0: print( "The new CIB is the same as the original CIB, nothing to push." ) sys.exit(0) command = [ os.path.join(settings.pacemaker_binaries, "cibadmin"), "--patch", "--xml-pipe" ] output, stderr, retval = runner.run(command, patch) if retval != 0: utils.err("unable to push cib\n" + stderr + output) else: command = ["cibadmin", "--replace", "--xml-file", filename] if scope: command.append("--scope=%s" % scope) output, retval = utils.run(command) if retval != 0: utils.err("unable to push cib\n" + output) print("CIB updated") if not modifiers.is_specified("--wait"): return cmd = ["crm_resource", "--wait"] if timeout: cmd.extend(["--timeout", str(timeout)]) output, retval = utils.run(cmd) if retval != 0: msg = [] if retval == settings.pacemaker_wait_timeout_status: msg.append("waiting timeout") if output: msg.append("\n" + output) utils.err("\n".join(msg).strip()) def cluster_edit(lib, argv, modifiers): """ Options: * --config - edit configuration section of CIB * -f - CIB file * --wait """ modifiers.ensure_only_supported("--config", "--wait", "-f") if 'EDITOR' in os.environ: if len(argv) > 1: raise CmdLineInputError() scope = None scope_arg = "" for arg in argv: if "=" not in arg: raise CmdLineInputError() arg_name, arg_value = arg.split("=", 1) if arg_name == "scope" and not modifiers.get("--config"): if not utils.is_valid_cib_scope(arg_value): utils.err("invalid CIB scope '%s'" % arg_value) else: scope_arg = arg scope = arg_value else: raise CmdLineInputError() if modifiers.get("--config"): scope = "configuration" # Leave scope_arg empty as cluster_push will pick up a --config # option from utils.pcs_options scope_arg = "" editor = os.environ['EDITOR'] tempcib = tempfile.NamedTemporaryFile(mode="w+", suffix=".pcs") cib = utils.get_cib(scope) tempcib.write(cib) tempcib.flush() try: subprocess.call([editor, tempcib.name]) except OSError: utils.err("unable to open file with $EDITOR: " + editor) tempcib.seek(0) newcib = "".join(tempcib.readlines()) if newcib == cib: print("CIB not updated, no changes detected") else: cluster_push( lib, [arg for arg in [tempcib.name, scope_arg] if arg], modifiers.get_subset("--wait", "--config", "-f"), ) else: utils.err("$EDITOR environment variable is not set") def get_cib(lib, argv, modifiers): """ Options: * --config show configuration section of CIB * -f - CIB file """ del lib modifiers.ensure_only_supported("--config", "-f") if len(argv) > 2: raise CmdLineInputError() filename = None scope = None for arg in argv: if "=" not in arg: filename = arg else: arg_name, arg_value = arg.split("=", 1) if arg_name == "scope" and not modifiers.get("--config"): if not utils.is_valid_cib_scope(arg_value): utils.err("invalid CIB scope '%s'" % arg_value) else: scope = arg_value else: raise CmdLineInputError() if modifiers.get("--config"): scope = "configuration" if not filename: print(utils.get_cib(scope).rstrip()) else: try: cib_file = open(filename, 'w') output = utils.get_cib(scope) if output != "": cib_file.write(output) else: utils.err("No data in the CIB") except IOError as e: utils.err( "Unable to write to file '%s', %s" % (filename, e.strerror) ) class RemoteAddNodes(RunRemotelyBase): def __init__(self, report_processor, target, data): super().__init__(report_processor) self._target = target self._data = data self._success = False def get_initial_request_list(self): return [ Request( self._target, RequestData( "remote/cluster_add_nodes", [("data_json", json.dumps(self._data))], ) ) ] def _process_response(self, response): node_label = response.request.target.label report = self._get_response_report(response) if report is not None: self._report(report) return try: output = json.loads(response.data) for report_dict in output["report_list"]: self._report(ReportItem.from_dict(report_dict)) if output["status"] == "success": self._success = True elif output["status"] != "error": print("Error: {}".format(output["status_msg"])) except (KeyError, json.JSONDecodeError): self._report( reports.invalid_response_format( node_label, severity=ReportItemSeverity.WARNING ) ) def on_complete(self): return self._success def node_add_outside_cluster(lib, argv, modifiers): """ Options: * --wait - wait until new node will start up, effective only when --start is specified * --start - start new node * --enable - enable new node * --force - treat validation issues and not resolvable addresses as warnings instead of errors * --skip-offline - skip unreachable nodes * --no-watchdog-validation - do not validatate watchdogs * --request-timeout - HTTP request timeout """ del lib modifiers.ensure_only_supported( "--wait", "--start", "--enable", "--force", "--skip-offline", "--no-watchdog-validation", "--request-timeout", ) if len(argv) < 2: raise CmdLineInputError( "Usage: pcs cluster node add-outside " "[addr=]... [watchdog=] " "[device=]... [--start [--wait[=]]] [--enable] " "[--no-watchdog-validation]" ) cluster_node, *argv = argv node_dict = _parse_add_node(argv) force_flags = [] if modifiers.get("--force"): force_flags.append(report_codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(report_codes.SKIP_OFFLINE_NODES) cmd_data = dict( nodes=[node_dict], wait=modifiers.get("--wait"), start=modifiers.get("--start"), enable=modifiers.get("--enable"), no_watchdog_validation=modifiers.get("--no-watchdog-validation"), force_flags=force_flags, ) lib_env = utils.get_lib_env() report_processor = SimpleReportProcessor(lib_env.report_processor) target_factory = lib_env.get_node_target_factory() report_list, target_list = target_factory.get_target_list_with_reports( [cluster_node], skip_non_existing=False, allow_skip=False, ) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() com_cmd = RemoteAddNodes(report_processor, target_list[0], cmd_data) was_successfull = run_com_cmd(lib_env.get_node_communicator(), com_cmd) if not was_successfull: raise LibraryError() def node_remove(lib, argv, modifiers): """ Options: * --force - continue even though the action may cause qourum loss * --skip-offline - skip unreachable nodes * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported( "--force", "--skip-offline", "--request-timeout", ) if not argv: raise CmdLineInputError() force_flags = [] if modifiers.get("--force"): force_flags.append(report_codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(report_codes.SKIP_OFFLINE_NODES) lib.cluster.remove_nodes(argv, force_flags=force_flags) def cluster_uidgid(lib, argv, modifiers, silent_list=False): """ Options: no options """ # pylint: disable=too-many-locals, del lib modifiers.ensure_only_supported() if not argv: found = False uid_gid_files = os.listdir(settings.corosync_uidgid_dir) for ug_file in uid_gid_files: uid_gid_dict = utils.read_uid_gid_file(ug_file) if "uid" in uid_gid_dict or "gid" in uid_gid_dict: line = "UID/GID: uid=" if "uid" in uid_gid_dict: line += uid_gid_dict["uid"] line += " gid=" if "gid" in uid_gid_dict: line += uid_gid_dict["gid"] print(line) found = True if not found and not silent_list: print("No uidgids configured") return command = argv.pop(0) uid = "" gid = "" if command in {"add", "delete", "remove", "rm"} and argv: for arg in argv: if arg.find('=') == -1: utils.err( "uidgid options must be of the form uid= gid=" ) (key, value) = arg.split('=', 1) if key not in {"uid", "gid"}: utils.err("%s is not a valid key, you must use uid or gid" %key) if key == "uid": uid = value if key == "gid": gid = value if uid == "" and gid == "": utils.err("you must set either uid or gid") if command == "add": utils.write_uid_gid_file(uid, gid) elif command in {"delete", "remove", "rm"}: if command == "rm": sys.stderr.write( "'pcs cluster uidgid rm' has been deprecated, use 'pcs " "cluster uidgid delete' or 'pcs cluster uidgid remove' " "instead\n" ) file_removed = utils.remove_uid_gid_file(uid, gid) if not file_removed: utils.err( "no uidgid files with uid=%s and gid=%s found" % (uid, gid) ) else: # The hint is defined to print error messages which point users to the # changes section in pcs manpage. # To be removed in the next significant version. raise CmdLineInputError(hint=( msg_command_replaced( "pcs cluster uidgid delete", "pcs cluster uidgid remove", ) if command == "rm" else None )) def cluster_get_corosync_conf(lib, argv, modifiers): """ Options: * --request-timeout - timeout for HTTP requests, effetive only when at least one node has been specified """ del lib modifiers.ensure_only_supported("--request-timeout") if len(argv) > 1: raise CmdLineInputError() if not argv: print(utils.getCorosyncConf().rstrip()) return node = argv[0] retval, output = utils.getCorosyncConfig(node) if retval != 0: utils.err(output) else: print(output.rstrip()) def cluster_reload(lib, argv, modifiers): """ Options: no options """ del lib modifiers.ensure_only_supported() if len(argv) != 1 or argv[0] != "corosync": raise CmdLineInputError() output, retval = utils.reloadCorosync() if retval != 0 or "invalid option" in output: utils.err(output.rstrip()) print("Corosync reloaded") # Completely tear down the cluster & remove config files # Code taken from cluster-clean script in pacemaker def cluster_destroy(lib, argv, modifiers): """ Options: * --all - destroy cluster on all cluster nodes => destroy whole cluster * --request-timeout - timeout of HTTP requests, effective only with --all """ # pylint: disable=bare-except del lib modifiers.ensure_only_supported("--all", "--request-timeout") if argv: raise CmdLineInputError() if modifiers.get("--all"): # load data cib = None lib_env = utils.get_lib_env() try: cib = lib_env.get_cib() except LibraryError as e: warn( "Unable to load CIB to get guest and remote nodes from it, " "those nodes will not be deconfigured." ) corosync_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not corosync_nodes: report_list.append(reports.corosync_config_no_nodes_defined()) if report_list: process_library_reports(report_list) # destroy remote and guest nodes if cib is not None: try: all_remote_nodes, report_list = get_existing_nodes_names( cib=cib ) if report_list: process_library_reports(report_list) if all_remote_nodes: _destroy_pcmk_remote_env( lib_env, all_remote_nodes, skip_offline_nodes=True, allow_fails=True ) except LibraryError as e: process_library_reports(e.args) # destroy full-stack nodes destroy_cluster(corosync_nodes) else: print("Shutting down pacemaker/corosync services...") for service in ["pacemaker", "corosync-qdevice", "corosync"]: # Returns an error if a service is not running. It is safe to # ignore it since we want it not to be running anyways. utils.stop_service(service) print("Killing any remaining services...") kill_local_cluster_services() try: utils.disableServices() except: # previously errors were suppressed in here, let's keep it that way # for now pass try: disable_service(utils.cmd_runner(), lib_sbd.get_sbd_service_name()) except: # it's not a big deal if sbd disable fails pass print("Removing all cluster configuration files...") dummy_output, dummy_retval = utils.run([ "/bin/rm", "-f", settings.corosync_conf_file, settings.corosync_authkey_file, settings.pacemaker_authkey_file, ]) state_files = [ "cib-*", "cib.*", "cib.xml*", "core.*", "cts.*", "hostcache", "pe*.bz2", ] for name in state_files: dummy_output, dummy_retval = utils.run([ "/usr/bin/find", "/var/lib/pacemaker", "-name", name, "-exec", "/bin/rm", "-f", "{}", ";" ]) try: qdevice_net.client_destroy() except: # errors from deleting other files are suppressed as well # we do not want to fail if qdevice was not set up pass def cluster_verify(lib, argv, modifiers): """ Options: * -f - CIB file * --full - more verbose output """ modifiers.ensure_only_supported("-f", "--full") if argv: raise CmdLineInputError() lib.cluster.verify(verbose=modifiers.get("--full")) def cluster_report(lib, argv, modifiers): """ Options: * --force - overwrite existing file * --from - timestamp * --to - timestamp """ del lib modifiers.ensure_only_supported("--force", "--from", "--to") if len(argv) != 1: raise CmdLineInputError() outfile = argv[0] dest_outfile = outfile + ".tar.bz2" if os.path.exists(dest_outfile): if not modifiers.get("--force"): utils.err( dest_outfile + " already exists, use --force to overwrite" ) else: try: os.remove(dest_outfile) except OSError as e: utils.err( "Unable to remove " + dest_outfile + ": " + e.strerror ) crm_report_opts = [] crm_report_opts.append("-f") if modifiers.is_specified("--from"): crm_report_opts.append(modifiers.get("--from")) if modifiers.is_specified("--to"): crm_report_opts.append("-t") crm_report_opts.append(modifiers.get("--to")) else: yesterday = datetime.datetime.now() - datetime.timedelta(1) crm_report_opts.append(yesterday.strftime("%Y-%m-%d %H:%M")) crm_report_opts.append(outfile) output, retval = utils.run([settings.crm_report] + crm_report_opts) if ( retval != 0 and ( "ERROR: Cannot determine nodes; specify --nodes or --single-node" in output ) ): utils.err("cluster is not configured on this node") newoutput = "" for line in output.split("\n"): if ( line.startswith("cat:") or line.startswith("grep") or line.startswith("tail") ): continue if "We will attempt to remove" in line: continue if "-p option" in line: continue if "However, doing" in line: continue if "to diagnose" in line: continue if "--dest" in line: line = line.replace("--dest", "") newoutput = newoutput + line + "\n" if retval != 0: utils.err(newoutput) print(newoutput) def send_local_configs( node_name_list, clear_local_cluster_permissions=False, force=False ): """ Commandline options: * --request-timeout - timeout of HTTP requests """ # pylint: disable=bare-except pcsd_data = { "nodes": node_name_list, "force": force, "clear_local_cluster_permissions": clear_local_cluster_permissions, } err_msgs = [] output, retval = utils.run_pcsdcli("send_local_configs", pcsd_data) if retval == 0 and output["status"] == "ok" and output["data"]: try: for node_name in node_name_list: node_response = output["data"][node_name] if node_response["status"] == "notauthorized": err_msgs.append( ( "Unable to authenticate to {0}, try running 'pcs " "host auth {0}'" ).format(node_name) ) if node_response["status"] not in ["ok", "not_supported"]: err_msgs.append( "Unable to set pcsd configs on {0}".format(node_name) ) except: err_msgs.append("Unable to communicate with pcsd") else: err_msgs.append("Unable to set pcsd configs") return err_msgs def cluster_auth_cmd(lib, argv, modifiers): """ Options: * --corosync_conf - corosync.conf file * --request-timeout - timeout of HTTP requests * -u - username * -p - password """ # pylint: disable=too-many-locals, del lib modifiers.ensure_only_supported( "--corosync_conf", "--request-timeout", "-u", "-p" ) if argv: # The hint is defined to print error messages which point users to the # changes section in pcs manpage. # To be removed in the next significant version. raise CmdLineInputError(hint=HINT_SYNTAX_CHANGE) lib_env = utils.get_lib_env() target_factory = lib_env.get_node_target_factory() cluster_node_list = lib_env.get_corosync_conf().get_nodes() cluster_node_names = [] missing_name = False for node in cluster_node_list: if node.name: cluster_node_names.append(node.name) else: missing_name = True if missing_name: print( "Warning: Skipping nodes which do not have their name defined in " "corosync.conf, use the 'pcs host auth' command to authenticate " "them" ) target_list = [] not_authorized_node_name_list = [] for node_name in cluster_node_names: try: target_list.append(target_factory.get_target(node_name)) except HostNotFound: print("{}: Not authorized".format(node_name)) not_authorized_node_name_list.append(node_name) com_cmd = CheckAuth(lib_env.report_processor) com_cmd.set_targets(target_list) not_authorized_node_name_list.extend(run_and_raise( lib_env.get_node_communicator(), com_cmd )) if not_authorized_node_name_list: print("Nodes to authorize: {}".format( ", ".join(not_authorized_node_name_list) )) username, password = utils.get_user_and_pass() not_auth_node_list = [] for node_name in not_authorized_node_name_list: for node in cluster_node_list: if node.name == node_name: if node.addrs_plain(): not_auth_node_list.append(node) else: print( f"{node.name}: No addresses defined in " "corosync.conf, use the 'pcs host auth' command to " "authenticate the node" ) nodes_to_auth_data = { node.name: dict( username=username, password=password, dest_list=[dict( addr=node.addrs_plain()[0], port=settings.pcsd_default_port, )], ) for node in not_auth_node_list } utils.auth_hosts(nodes_to_auth_data) else: print("Sending cluster config files to the nodes...") msgs = send_local_configs(cluster_node_names, force=True) for msg in msgs: print("Warning: {0}".format(msg)) def _parse_node_options( node, options, additional_options=(), additional_repeatable_options=() ): """ Commandline options: no options """ # pylint: disable=invalid-name ADDR_OPT_KEYWORD = "addr" supported_options = {ADDR_OPT_KEYWORD} | set(additional_options) repeatable_options = {ADDR_OPT_KEYWORD} | set(additional_repeatable_options) parsed_options = parse_args.prepare_options(options, repeatable_options) unknown_options = set(parsed_options.keys()) - supported_options if unknown_options: raise CmdLineInputError( "Unknown options '{}' for node '{}'".format( "', '".join(sorted(unknown_options)), node ) ) parsed_options["name"] = node if ADDR_OPT_KEYWORD in parsed_options: parsed_options["addrs"] = parsed_options[ADDR_OPT_KEYWORD] del parsed_options[ADDR_OPT_KEYWORD] return parsed_options TRANSPORT_KEYWORD = "transport" TRANSPORT_DEFAULT_SECTION = "__default__" LINK_KEYWORD = "link" def _parse_transport(transport_args): """ Commandline options: no options """ if not transport_args: raise CmdLineInputError( "{} type not defined".format(TRANSPORT_KEYWORD.capitalize()) ) transport_type, *transport_options = transport_args keywords = {"compression", "crypto", LINK_KEYWORD} parsed_options = parse_args.group_by_keywords( transport_options, keywords, implicit_first_group_key=TRANSPORT_DEFAULT_SECTION, group_repeated_keywords=[LINK_KEYWORD], ) options = { section: parse_args.prepare_options(parsed_options[section]) for section in keywords | {TRANSPORT_DEFAULT_SECTION} if section != LINK_KEYWORD } options[LINK_KEYWORD] = [ parse_args.prepare_options(link_options) for link_options in parsed_options[LINK_KEYWORD] ] return transport_type, options def cluster_setup(lib, argv, modifiers): """ Options: * --wait - only effective when used with --start * --start - start cluster * --enable - enable cluster * --force - some validation issues and unresolvable addresses are treated as warnings * --no-keys-sync - do not create and distribute pcsd ssl cert and key, corosync and pacemaker authkeys """ modifiers.ensure_only_supported( "--wait", "--start", "--enable", "--force", "--no-keys-sync", # The hint is defined to print error messages which point users to the # changes section in pcs manpage. # To be removed in the next significant version. hint_syntax_changed=modifiers.is_specified("--name") ) # pylint: disable=invalid-name if len(argv) < 2: raise CmdLineInputError() cluster_name, *argv = argv keywords = [TRANSPORT_KEYWORD, "totem", "quorum"] parsed_args = parse_args.group_by_keywords( argv, keywords, implicit_first_group_key="nodes", keyword_repeat_allowed=False, only_found_keywords=True, ) nodes = [ _parse_node_options(node, options) for node, options in parse_args.split_list_by_any_keywords( parsed_args["nodes"], "node name", ).items() ] transport_type = None transport_options = {} if TRANSPORT_KEYWORD in parsed_args: transport_type, transport_options = _parse_transport( parsed_args[TRANSPORT_KEYWORD] ) force_flags = [] if modifiers.get("--force"): force_flags.append(report_codes.FORCE) lib.cluster.setup( cluster_name, nodes, transport_type=transport_type, transport_options=transport_options.get(TRANSPORT_DEFAULT_SECTION, {}), link_list=transport_options.get(LINK_KEYWORD, []), compression_options=transport_options.get("compression", {}), crypto_options=transport_options.get("crypto", {}), totem_options=parse_args.prepare_options(parsed_args.get("totem", [])), quorum_options=parse_args.prepare_options( parsed_args.get("quorum", []) ), wait=modifiers.get("--wait"), start=modifiers.get("--start"), enable=modifiers.get("--enable"), no_keys_sync=modifiers.get("--no-keys-sync"), force_flags=force_flags, ) def _parse_add_node(argv): # pylint: disable=invalid-name DEVICE_KEYWORD = "device" WATCHDOG_KEYWORD = "watchdog" hostname, *argv = argv node_dict = _parse_node_options( hostname, argv, additional_options={DEVICE_KEYWORD, WATCHDOG_KEYWORD}, additional_repeatable_options={DEVICE_KEYWORD} ) if DEVICE_KEYWORD in node_dict: node_dict[f"{DEVICE_KEYWORD}s"] = node_dict[DEVICE_KEYWORD] del node_dict[DEVICE_KEYWORD] return node_dict def node_add(lib, argv, modifiers): """ Options: * --wait - wait until new node will start up, effective only when --start is specified * --start - start new node * --enable - enable new node * --force - treat validation issues and not resolvable addresses as warnings instead of errors * --skip-offline - skip unreachable nodes * --no-watchdog-validation - do not validatate watchdogs * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported( "--wait", "--start", "--enable", "--force", "--skip-offline", "--no-watchdog-validation", "--request-timeout", ) if not argv: raise CmdLineInputError() node_dict = _parse_add_node(argv) force_flags = [] if modifiers.get("--force"): force_flags.append(report_codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(report_codes.SKIP_OFFLINE_NODES) lib.cluster.add_nodes( nodes=[node_dict], wait=modifiers.get("--wait"), start=modifiers.get("--start"), enable=modifiers.get("--enable"), no_watchdog_validation=modifiers.get("--no-watchdog-validation"), force_flags=force_flags, ) def remove_nodes_from_cib(lib, argv, modifiers): """ Options: no options """ modifiers.ensure_only_supported() if not argv: raise CmdLineInputError("No nodes specified") lib.cluster.remove_nodes_from_cib(argv) def link_add(lib, argv, modifiers): """ Options: * --force - treat validation issues and not resolvable addresses as warnings instead of errors * --skip-offline - skip unreachable nodes * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported( "--force", "--request-timeout", "--skip-offline" ) if not argv: raise CmdLineInputError() force_flags = [] if modifiers.get("--force"): force_flags.append(report_codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(report_codes.SKIP_OFFLINE_NODES) parsed = parse_args.group_by_keywords( argv, {"options"}, implicit_first_group_key="nodes", keyword_repeat_allowed=False ) lib.cluster.add_link( parse_args.prepare_options(parsed["nodes"]), parse_args.prepare_options(parsed["options"]), force_flags=force_flags, ) def link_remove(lib, argv, modifiers): """ Options: * --skip-offline - skip unreachable nodes * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported("--request-timeout", "--skip-offline") if not argv: raise CmdLineInputError() force_flags = [] if modifiers.get("--skip-offline"): force_flags.append(report_codes.SKIP_OFFLINE_NODES) lib.cluster.remove_links(argv, force_flags=force_flags) def link_update(lib, argv, modifiers): """ Options: * --force - treat validation issues and not resolvable addresses as warnings instead of errors * --skip-offline - skip unreachable nodes * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported( "--force", "--request-timeout", "--skip-offline" ) if len(argv) < 2: raise CmdLineInputError() force_flags = [] if modifiers.get("--force"): force_flags.append(report_codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(report_codes.SKIP_OFFLINE_NODES) linknumber = argv[0] parsed = parse_args.group_by_keywords( argv[1:], {"options"}, implicit_first_group_key="nodes", keyword_repeat_allowed=False ) lib.cluster.update_link( linknumber, parse_args.prepare_options(parsed["nodes"]), parse_args.prepare_options(parsed["options"]), force_flags=force_flags, ) pcs-0.10.4/pcs/common/000077500000000000000000000000001356771603100144475ustar00rootroot00000000000000pcs-0.10.4/pcs/common/__init__.py000066400000000000000000000000001356771603100165460ustar00rootroot00000000000000pcs-0.10.4/pcs/common/fencing_topology.py000066400000000000000000000001341356771603100203640ustar00rootroot00000000000000TARGET_TYPE_NODE = "node" TARGET_TYPE_REGEXP = "regexp" TARGET_TYPE_ATTRIBUTE = "attribute" pcs-0.10.4/pcs/common/file.py000066400000000000000000000146131356771603100157450ustar00rootroot00000000000000from collections import namedtuple import fcntl import os import shutil from pcs.common.tools import format_os_error # TODO add logging (logger / debug reports ?) to the RawFile class; be aware # the class is used both in pcs.cli and pcs.lib packages FileMetadata = namedtuple( "FileMetadata", [ "file_type_code", "path", "owner_user_name", "owner_group_name", "permissions", "is_binary", ] ) class RawFileError(Exception): # So far there has been no need to have a separate exception for each # action. Actions must be passed in a report and we certainely do not want # a separate report for each action. ACTION_CHMOD = "chmod" ACTION_CHOWN = "chown" ACTION_READ = "read" ACTION_REMOVE = "remove" ACTION_WRITE = "write" def __init__(self, metadata, action, reason=""): """ FileMetadata metadata -- describes the file involved in the error string action -- possible values enumerated in RawFileError string reason -- plain text error details """ super().__init__() self.metadata = metadata self.action = action self.reason = reason class FileAlreadyExists(RawFileError): def __init__(self, metadata): """ FileMetadata metadata -- describes the file involved in the error """ super().__init__(metadata, RawFileError.ACTION_WRITE) class RawFileInterface(): def __init__(self, metadata): """ FileMetadata metadata -- describes the file and provides its metadata """ self.__metadata = metadata @property def metadata(self): return self.__metadata def exists(self): """ Return True if file exists, False otherwise """ raise NotImplementedError() def read(self): """ Return content of the file as bytes """ raise NotImplementedError() def write(self, file_data, can_overwrite=False): """ Write file_data to the file bytes file_data -- data to be written bool can_overwrite -- raise if False and the file already exists """ raise NotImplementedError() class RawFile(RawFileInterface): def exists(self): # Returns False if the file is not accessible, does not raise. return os.path.exists(self.metadata.path) def read(self): try: mode = "rb" if self.metadata.is_binary else "r" with open(self.metadata.path, mode) as my_file: # the lock is released when the file gets closed on leaving the # with statement fcntl.flock(my_file.fileno(), fcntl.LOCK_SH) content = my_file.read() return ( content if self.metadata.is_binary else content.encode("utf-8") ) except OSError as e: # Specific expection if the file does not exist is not needed, # anyone can and should check that using the exists method. raise RawFileError( self.metadata, RawFileError.ACTION_READ, format_os_error(e) ) def write(self, file_data, can_overwrite=False): try: mode = "{write_mode}{binary_mode}".format( write_mode="w" if can_overwrite else "x", binary_mode="b" if self.metadata.is_binary else "", ) # It seems pylint cannot process constructing the mode variable and # gives a false positive. # pylint: disable=bad-open-mode with open(self.metadata.path, mode) as my_file: # the lock is released when the file gets closed on leaving the # with statement fcntl.flock(my_file.fileno(), fcntl.LOCK_EX) # Set the ownership and permissions to cover the case when we # just created the file. If the file already existed, make sure # the ownership and permissions are correct before writing any # data into it. if ( self.metadata.owner_user_name is not None or self.metadata.owner_group_name is not None ): try: shutil.chown( self.metadata.path, self.metadata.owner_user_name, self.metadata.owner_group_name, ) except LookupError as e: raise RawFileError( self.metadata, RawFileError.ACTION_CHOWN, str(e) ) except OSError as e: raise RawFileError( self.metadata, RawFileError.ACTION_CHOWN, format_os_error(e) ) if self.metadata.permissions is not None: try: os.chmod(my_file.fileno(), self.metadata.permissions) except OSError as e: raise RawFileError( self.metadata, RawFileError.ACTION_CHMOD, format_os_error(e) ) # Write file data my_file.write( file_data if self.metadata.is_binary else file_data.decode("utf-8") ) except FileExistsError as e: raise FileAlreadyExists(self.metadata) except OSError as e: raise RawFileError( self.metadata, RawFileError.ACTION_WRITE, format_os_error(e) ) def remove(self, fail_if_file_not_found=True): get_raw_file_error = lambda e: RawFileError( self.metadata, RawFileError.ACTION_REMOVE, format_os_error(e) ) try: os.remove(self.metadata.path) except FileNotFoundError as e: if fail_if_file_not_found: raise get_raw_file_error(e) except OSError as e: raise get_raw_file_error(e) def backup(self): # TODO implement raise NotImplementedError() pcs-0.10.4/pcs/common/file_type_codes.py000066400000000000000000000005561356771603100201640ustar00rootroot00000000000000BOOTH_CONFIG = "BOOTH_CONFIG" BOOTH_KEY = "BOOTH_KEY" CIB = "CIB" COROSYNC_AUTHKEY = "COROSYNC_AUTHKEY" COROSYNC_CONF = "COROSYNC_CONF" PACEMAKER_AUTHKEY = "PACEMAKER_AUTHKEY" PCSD_ENVIRONMENT_CONFIG = "PCSD_ENVIRONMENT_CONFIG" PCSD_SSL_CERT = "PCSD_SSL_CERT" PCSD_SSL_KEY = "PCSD_SSL_KEY" PCS_KNOWN_HOSTS = "PCS_KNOWN_HOSTS" PCS_SETTINGS_CONF = "PCS_SETTINGS_CONF" pcs-0.10.4/pcs/common/host.py000066400000000000000000000021111356771603100157710ustar00rootroot00000000000000from collections import namedtuple from pcs import settings Destination = namedtuple("Destination", ["addr", "port"]) class PcsKnownHost( namedtuple("KnownHost", ["name", "token", "dest_list"]) ): @classmethod def from_known_host_file_dict(cls, name, known_host_dict): dest_list = [ Destination(conn["addr"], conn["port"]) for conn in known_host_dict["dest_list"] ] if not dest_list: raise KeyError("no destination defined") return cls(name, token=known_host_dict["token"], dest_list=dest_list) def to_known_host_dict(self): return ( self.name, dict( token=self.token, dest_list=[ dict( addr=dest.addr, port=dest.port, ) for dest in self.dest_list ] ) ) @property def dest(self): if self.dest_list: return self.dest_list[0] return Destination(self.name, settings.pcsd_default_port) pcs-0.10.4/pcs/common/interface/000077500000000000000000000000001356771603100164075ustar00rootroot00000000000000pcs-0.10.4/pcs/common/interface/__init__.py000066400000000000000000000000001356771603100205060ustar00rootroot00000000000000pcs-0.10.4/pcs/common/interface/dto.py000066400000000000000000000010771356771603100175540ustar00rootroot00000000000000from typing import ( Any, Mapping, Type, TypeVar, ) class DataTransferObject: def to_dict(self) -> Mapping[str, Any]: raise NotImplementedError() @classmethod def from_dict(cls, payload: Mapping[str, Any]) -> "DataTransferObject": raise NotImplementedError() class ImplementsToDto: def to_dto(self) -> DataTransferObject: raise NotImplementedError() T = TypeVar("T") class ImplementsFromDto: @classmethod def from_dto(cls: Type[T], dto_obj: DataTransferObject) -> T: raise NotImplementedError() pcs-0.10.4/pcs/common/node_communicator.py000066400000000000000000000426121356771603100205330ustar00rootroot00000000000000import base64 import io import re from collections import namedtuple from urllib.parse import urlencode # We should ignore SIGPIPE when using pycurl.NOSIGNAL - see the libcurl tutorial # for more info. try: import signal signal.signal(signal.SIGPIPE, signal.SIG_IGN) except ImportError: pass from pcs import settings from pcs.common import pcs_pycurl as pycurl from pcs.common.host import Destination def _find_value_for_possible_keys(value_dict, possible_key_list): for key in possible_key_list: if key in value_dict: return value_dict[key] return None class HostNotFound(Exception): def __init__(self, name): super(HostNotFound, self).__init__() self.name = name class NodeTargetFactory(): def __init__(self, known_hosts): self._known_hosts = known_hosts def get_target(self, host_name): known_host = self._known_hosts.get(host_name) if known_host is None: raise HostNotFound(host_name) return RequestTarget.from_known_host(known_host) def get_target_from_hostname(self, hostname): try: return self.get_target(hostname) except HostNotFound: return RequestTarget(hostname) class RequestData( namedtuple("RequestData", ["action", "structured_data", "data"]) ): """ This class represents action and data asociated with action which will be send in request """ def __new__(cls, action, structured_data=()): """ string action -- action to perform list structured_data -- list of tuples, data to send with specified action """ return super(RequestData, cls).__new__( cls, action, structured_data, urlencode(structured_data) ) class RequestTarget(namedtuple( "RequestTarget", ["label", "token", "dest_list"] )): """ This class represents target (host) for request to be performed on """ def __new__(cls, label, token=None, dest_list=()): if not dest_list: dest_list = [ Destination(label, settings.pcsd_default_port) ] return super(RequestTarget, cls).__new__( cls, label, token=token, dest_list=list(dest_list), ) @classmethod def from_known_host(cls, known_host): return cls( known_host.name, token=known_host.token, dest_list=known_host.dest_list, ) @property def first_addr(self): # __new__ ensures there is always at least one item in self.dest_list return self.dest_list[0].addr class Request(): """ This class represents request. With usage of RequestTarget it provides interface for getting next available host to make request on. """ def __init__(self, request_target, request_data): """ RequestTarget request_target RequestData request_data """ self._target = request_target self._data = request_data self._current_dest_iterator = iter(self._target.dest_list) self._current_dest = None self.next_dest() def next_dest(self): """ Move to the next available host connection. Raises StopIteration when there is no connection to use. """ self._current_dest = next(self._current_dest_iterator) @property def url(self): """ URL representing request using current host. """ addr = self.dest.addr port = self.dest.port return "https://{host}:{port}/{request}".format( host="[{0}]".format(addr) if ":" in addr else addr, port=(port if port else settings.pcsd_default_port), request=self._data.action ) @property def dest(self): return self._current_dest @property def host_label(self): return self._target.label @property def target(self): return self._target @property def data(self): return self._data.data @property def action(self): return self._data.action @property def cookies(self): cookies = {} if self._target.token: cookies["token"] = self._target.token return cookies def __repr__(self): return str("Request({0}, {1})").format(self._target, self._data) class Response(): """ This class represents response for request which is available as instance property. """ def __init__(self, handle, was_connected, errno=None, error_msg=None): self._handle = handle self._was_connected = was_connected self._errno = errno self._error_msg = error_msg self._data = None self._debug = None @classmethod def connection_successful(cls, handle): """ Returns Response instance that is marked as successfully connected. pycurl.Curl handle -- curl easy handle, which connection was successful """ return cls(handle, True) @classmethod def connection_failure(cls, handle, errno, error_msg): """ Returns Response instance that is marked as not successfuly connected. pycurl.Curl handle -- curl easy handle, which was not connected int errno -- error number string error_msg -- text description of error """ return cls(handle, False, errno, error_msg) @property def request(self): return self._handle.request_obj @property def handle(self): return self._handle @property def was_connected(self): return self._was_connected @property def errno(self): return self._errno @property def error_msg(self): return self._error_msg @property def data(self): if self._data is None: self._data = self._handle.output_buffer.getvalue().decode("utf-8") return self._data @property def debug(self): if self._debug is None: self._debug = self._handle.debug_buffer.getvalue().decode("utf-8") return self._debug @property def response_code(self): if not self.was_connected: return None return self._handle.getinfo(pycurl.RESPONSE_CODE) def __repr__(self): return str( "Response({0} data='{1}' was_connected={2}) errno='{3}'" " error_msg='{4}' response_code='{5}')" ).format( self.request, self.data, self.was_connected, self.errno, self.error_msg, self.response_code, ) class NodeCommunicatorFactory(): def __init__(self, communicator_logger, user, groups, request_timeout): self._logger = communicator_logger self._user = user self._groups = groups self._request_timeout = request_timeout def get_communicator(self, request_timeout=None): return self.get_simple_communicator(request_timeout=request_timeout) def get_simple_communicator(self, request_timeout=None): timeout = request_timeout if request_timeout else self._request_timeout return Communicator( self._logger, self._user, self._groups, request_timeout=timeout ) def get_multiaddress_communicator(self, request_timeout=None): timeout = request_timeout if request_timeout else self._request_timeout return MultiaddressCommunicator( self._logger, self._user, self._groups, request_timeout=timeout ) class Communicator(): """ This class provides simple interface for making parallel requests. The instances of this class are not thread-safe! It is intended to use it only in a single thread. Use an unique instance for each thread. """ curl_multi_select_timeout_default = 0.8 # in seconds def __init__(self, communicator_logger, user, groups, request_timeout=None): self._logger = communicator_logger self._auth_cookies = _get_auth_cookies(user, groups) self._request_timeout = ( request_timeout if request_timeout is not None else settings.default_request_timeout ) self._multi_handle = pycurl.CurlMulti() self._is_running = False # This is used just for storing references of curl easy handles. # We need to have references for all the handles, so they don't be # cleaned up by the garbage collector. self._easy_handle_list = [] def add_requests(self, request_list): """ Add requests to queue to be processed. It is possible to call this method before getting generator using start_loop method and also during getting responses from generator. Requests are not performed after calling this method, but only when generator returned by start_loop method is in progress (returned at least one response and not raised StopIteration exception). list request_list -- Request objects to add to the queue """ for request in request_list: handle = _create_request_handle( request, self._auth_cookies, self._request_timeout, ) self._easy_handle_list.append(handle) self._multi_handle.add_handle(handle) if self._is_running: self._logger.log_request_start(request) def start_loop(self): """ Returns generator. When generator is invoked, all requests in queue (added by method add_requests) will be invoked in parallel, and generator will then return responses for these requests. It is possible to add new request to the queue while the generator is in progres. Generator will stop (raise StopIteration) after all requests (also those added after creation of generator) are processed. WARNING: do not use multiple instances of generator (of one Communicator instance) when there is one which didn't finish (raised StopIteration). It wil cause AssertionError. USAGE: com = Communicator(...) com.add_requests([ Request(...), ... ]) for response in communicator.start_loop(): # do something with response # if needed, add some new requests to the queue com.add_requests([Request(...)]) """ if self._is_running: raise AssertionError("Method start_loop already running") self._is_running = True for handle in self._easy_handle_list: self._logger.log_request_start(handle.request_obj) finished_count = 0 while finished_count < len(self._easy_handle_list): self.__multi_perform() self.__wait_for_multi_handle() response_list = self.__get_all_ready_responses() for response in response_list: # free up memory for next usage of this Communicator instance self._multi_handle.remove_handle(response.handle) self._logger.log_response(response) yield response # if something was added to the queue in the meantime, run it # immediately, so we don't need to wait until all responses will # be processed self.__multi_perform() finished_count += len(response_list) self._easy_handle_list = [] self._is_running = False def __get_all_ready_responses(self): response_list = [] repeat = True while repeat: num_queued, ok_list, err_list = self._multi_handle.info_read() response_list.extend( [Response.connection_successful(handle) for handle in ok_list] + [ Response.connection_failure(handle, errno, error_msg) for handle, errno, error_msg in err_list ] ) repeat = num_queued > 0 return response_list def __multi_perform(self): # run all internal operation required by libcurl status, num_to_process = self._multi_handle.perform() # if perform returns E_CALL_MULTI_PERFORM it requires to call perform # once again right away while status == pycurl.E_CALL_MULTI_PERFORM: status, num_to_process = self._multi_handle.perform() return num_to_process def __wait_for_multi_handle(self): # try to wait until there is something to do for us need_to_wait = True while need_to_wait: timeout = self._multi_handle.timeout() if timeout == 0: # if timeout == 0 then there is something to precess already return timeout = ( timeout / 1000.0 if timeout > 0 # curl don't have timeout set, so we can use our default else self.curl_multi_select_timeout_default ) # when value returned from select is -1, it timed out, so we can # wait need_to_wait = (self._multi_handle.select(timeout) == -1) class MultiaddressCommunicator(Communicator): """ Class with same interface as Communicator. In difference with Communicator, it takes advantage of multiple hosts in RequestTarget. So if it is not possible to connect to target using first hostname, it will use next one until connection will be successful or there is no host left. """ def start_loop(self): for response in super(MultiaddressCommunicator, self).start_loop(): if response.was_connected: yield response continue try: previous_dest = response.request.dest response.request.next_dest() self._logger.log_retry(response, previous_dest) self.add_requests([response.request]) except StopIteration: self._logger.log_no_more_addresses(response) yield response class CommunicatorLoggerInterface(): def log_request_start(self, request): raise NotImplementedError() def log_response(self, response): raise NotImplementedError() def log_retry(self, response, previous_dest): raise NotImplementedError() def log_no_more_addresses(self, response): raise NotImplementedError() def _get_auth_cookies(user, group_list): """ Returns input parameters in a dictionary which is prepared to be converted to cookie string. string user -- CIB user string group_list -- CIB user groups """ # Let's be safe about characters in variables (they can come from env) # and do base64. We cannot do it for CIB_user however to be backward # compatible so we at least remove disallowed characters. cookies = {} if user: cookies["CIB_user"] = re.sub(r"[^!-~]", "", user).replace(";", "") if group_list: # cookies require string but base64encode returns bytes, so decode it... cookies["CIB_user_groups"] = base64.b64encode( # python3 requires the value to be bytes not str " ".join(group_list).encode("utf-8") ).decode("utf-8") return cookies def _create_request_handle(request, cookies, timeout): """ Returns Curl object (easy handle) which is set up witc specified parameters. Request request -- request specification dict cookies -- cookies to add to request int timeot -- request timeout """ # it is not possible to take this callback out of this function, because of # curl API def __debug_callback(data_type, debug_data): # pylint: disable=no-member prefixes = { pycurl.DEBUG_TEXT: b"* ", pycurl.DEBUG_HEADER_IN: b"< ", pycurl.DEBUG_HEADER_OUT: b"> ", pycurl.DEBUG_DATA_IN: b"<< ", pycurl.DEBUG_DATA_OUT: b">> ", } if data_type in prefixes: debug_output.write(prefixes[data_type]) debug_output.write(debug_data) if not debug_data.endswith(b"\n"): debug_output.write(b"\n") output = io.BytesIO() debug_output = io.BytesIO() cookies.update(request.cookies) handle = pycurl.Curl() handle.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTPS) handle.setopt(pycurl.TIMEOUT, timeout) handle.setopt(pycurl.URL, request.url.encode("utf-8")) handle.setopt(pycurl.WRITEFUNCTION, output.write) handle.setopt(pycurl.VERBOSE, 1) handle.setopt(pycurl.DEBUGFUNCTION, __debug_callback) handle.setopt(pycurl.SSL_VERIFYHOST, 0) handle.setopt(pycurl.SSL_VERIFYPEER, 0) handle.setopt(pycurl.NOSIGNAL, 1) # required for multi-threading handle.setopt(pycurl.HTTPHEADER, ["Expect: "]) if cookies: handle.setopt( pycurl.COOKIE, _dict_to_cookies(cookies).encode("utf-8") ) if request.data: handle.setopt( pycurl.COPYPOSTFIELDS, request.data.encode("utf-8") ) # add reference for request object and output bufers to handle, so later # we don't need to match these objects when they are returned from # pycurl after they've been processed # similar usage is in pycurl example: # https://github.com/pycurl/pycurl/blob/REL_7_19_0_3/examples/retriever-multi.py handle.request_obj = request handle.output_buffer = output handle.debug_buffer = debug_output return handle def _dict_to_cookies(cookies_dict): return ";".join([ "{0}={1}".format(key, value) for key, value in sorted(cookies_dict.items()) ]) pcs-0.10.4/pcs/common/pacemaker/000077500000000000000000000000001356771603100163775ustar00rootroot00000000000000pcs-0.10.4/pcs/common/pacemaker/__init__.py000066400000000000000000000000001356771603100204760ustar00rootroot00000000000000pcs-0.10.4/pcs/common/pacemaker/resource/000077500000000000000000000000001356771603100202265ustar00rootroot00000000000000pcs-0.10.4/pcs/common/pacemaker/resource/__init__.py000066400000000000000000000000001356771603100223250ustar00rootroot00000000000000pcs-0.10.4/pcs/common/pacemaker/resource/relations.py000066400000000000000000000053541356771603100226070ustar00rootroot00000000000000from enum import auto from typing import ( cast, Any, Mapping, Sequence, Union, ) from pcs.common.interface.dto import DataTransferObject from pcs.common.tools import AutoNameEnum class ResourceRelationType(AutoNameEnum): ORDER = auto() ORDER_SET = auto() INNER_RESOURCES = auto() OUTER_RESOURCE = auto() class RelationEntityDto(DataTransferObject): # Note: mypy doesn't understand recursive NamedTuple types, therefore this # class cannot inherit NamedTuple def __init__( self, id_: str, type_: Union[ResourceRelationType, str], members: Sequence[str], metadata: Mapping[str, Any], ): # pylint: disable=invalid-name self.id = id_ self.type = type_ self.members = members self.metadata = metadata def __eq__(self, other: object) -> bool: return ( isinstance(other, self.__class__) and self.to_dict() == cast(RelationEntityDto, other).to_dict() ) def to_dict(self) -> Mapping[str, Any]: return dict( id=self.id, type=self.type, members=self.members, metadata=self.metadata, ) @classmethod def from_dict(cls, payload: Mapping[str, Any]) -> "RelationEntityDto": obj_type = payload["type"] try: obj_type = ResourceRelationType(obj_type) except ValueError: # if obj_type is not valid ResourceRelationType, it is resource # type as string such as 'primitive', 'clone', ... pass return cls( payload["id"], obj_type, payload["members"], payload["metadata"], ) class ResourceRelationDto(DataTransferObject): # Note: mypy doesn't understand recursive NamedTuple types, therefore this # class cannot inherit NamedTuple def __init__( self, relation_entity: RelationEntityDto, members: Sequence["ResourceRelationDto"], is_leaf: bool, ): self.relation_entity = relation_entity self.members = members self.is_leaf = is_leaf def to_dict(self) -> Mapping[str, Any]: return dict( relation_entity=self.relation_entity.to_dict(), members=[member.to_dict() for member in self.members], is_leaf=self.is_leaf, ) @classmethod def from_dict(cls, payload: Mapping[str, Any]) -> "ResourceRelationDto": return cls( RelationEntityDto.from_dict(payload["relation_entity"]), [ ResourceRelationDto.from_dict(member_data) for member_data in payload["members"] ], payload["is_leaf"], ) pcs-0.10.4/pcs/common/pcs_pycurl.py000066400000000000000000000014521356771603100172060ustar00rootroot00000000000000import sys # pylint: disable=wildcard-import, unused-wildcard-import from pycurl import * # This package defines constants which are not present in some older versions # of pycurl but pcs needs to use them required_constants = { "PROTOCOLS": 181, "PROTO_HTTPS": 2, "E_OPERATION_TIMEDOUT": 28, # these are types of debug messages # see https://curl.haxx.se/libcurl/c/CURLOPT_DEBUGFUNCTION.html "DEBUG_TEXT": 0, "DEBUG_HEADER_IN": 1, "DEBUG_HEADER_OUT": 2, "DEBUG_DATA_IN": 3, "DEBUG_DATA_OUT": 4, "DEBUG_SSL_DATA_IN": 5, "DEBUG_SSL_DATA_OUT": 6, "DEBUG_END": 7, } __current_module = sys.modules[__name__] for constant, value in required_constants.items(): if not hasattr(__current_module, constant): setattr(__current_module, constant, value) pcs-0.10.4/pcs/common/report_codes.py000066400000000000000000000527671356771603100175320ustar00rootroot00000000000000# pylint: disable=line-too-long # force categories FORCE = "FORCE" FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE" FORCE_ALREADY_IN_CLUSTER = "FORCE_ALREADY_IN_CLUSTER" FORCE_BOOTH_DESTROY = "FORCE_BOOTH_DESTROY" FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB" FORCE_REMOVE_MULTIPLE_NODES = "FORCE_REMOVE_MULTIPLE_NODES" FORCE_CONSTRAINT_DUPLICATE = "FORCE_CONSTRAINT_DUPLICATE" FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE" FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE" FORCE_LOAD_NODES_FROM_CIB = "FORCE_LOAD_NODES_FROM_CIB" FORCE_LOAD_THRESHOLD = "FORCE_LOAD_THRESHOLD" FORCE_METADATA_ISSUE = "FORCE_METADATA_ISSUE" FORCE_NODE_ADDRESSES_UNRESOLVABLE = "FORCE_NODE_ADDRESSES_UNRESOLVABLE" FORCE_NODE_DOES_NOT_EXIST = "FORCE_NODE_DOES_NOT_EXIST" FORCE_OPTIONS = "FORCE_OPTIONS" FORCE_QDEVICE_MODEL = "FORCE_QDEVICE_MODEL" FORCE_QDEVICE_USED = "FORCE_QDEVICE_USED" FORCE_QUORUM_LOSS = "FORCE_QUORUM_LOSS" FORCE_STONITH_RESOURCE_DOES_NOT_EXIST = "FORCE_STONITH_RESOURCE_DOES_NOT_EXIST" FORCE_NOT_SUITABLE_COMMAND = "FORCE_NOT_SUITABLE_COMMAND" FORCE_CLEAR_CLUSTER_NODE = "FORCE_CLEAR_CLUSTER_NODE" FORCE_RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE = "FORCE_RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE" SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES" SKIP_FILE_DISTRIBUTION_ERRORS = "SKIP_FILE_DISTRIBUTION_ERRORS" SKIP_ACTION_ON_NODES_ERRORS = "SKIP_ACTION_ON_NODES_ERRORS" SKIP_UNREADABLE_CONFIG = "SKIP_UNREADABLE_CONFIG" AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE = "AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE" AGENT_NAME_GUESS_FOUND_NONE = "AGENT_NAME_GUESS_FOUND_NONE" AGENT_NAME_GUESSED = "AGENT_NAME_GUESSED" BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT' BOOTH_ADDRESS_DUPLICATION = "BOOTH_ADDRESS_DUPLICATION" BOOTH_ALREADY_IN_CIB = "BOOTH_ALREADY_IN_CIB" BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP = "BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP" BOOTH_CONFIG_ACCEPTED_BY_NODE = "BOOTH_CONFIG_ACCEPTED_BY_NODE" BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR = "BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR" BOOTH_CONFIG_DISTRIBUTION_STARTED = "BOOTH_CONFIG_DISTRIBUTION_STARTED" BOOTH_CONFIG_IS_USED = "BOOTH_CONFIG_IS_USED" BOOTH_CONFIG_UNEXPECTED_LINES = "BOOTH_CONFIG_UNEXPECTED_LINES" BOOTH_DAEMON_STATUS_ERROR = "BOOTH_DAEMON_STATUS_ERROR" BOOTH_EVEN_PEERS_NUM = "BOOTH_EVEN_PEERS_NUM" BOOTH_FETCHING_CONFIG_FROM_NODE = "BOOTH_FETCHING_CONFIG_FROM_NODE" BOOTH_INVALID_NAME = "BOOTH_INVALID_NAME" BOOTH_LACK_OF_SITES = "BOOTH_LACK_OF_SITES" BOOTH_MULTIPLE_TIMES_IN_CIB = "BOOTH_MULTIPLE_TIMES_IN_CIB" BOOTH_NOT_EXISTS_IN_CIB = "BOOTH_NOT_EXISTS_IN_CIB" BOOTH_PEERS_STATUS_ERROR = "BOOTH_PEERS_STATUS_ERROR" BOOTH_TICKET_DOES_NOT_EXIST = "BOOTH_TICKET_DOES_NOT_EXIST" BOOTH_TICKET_DUPLICATE = "BOOTH_TICKET_DUPLICATE" BOOTH_TICKET_NAME_INVALID = "BOOTH_TICKET_NAME_INVALID" BOOTH_TICKET_OPERATION_FAILED = "BOOTH_TICKET_OPERATION_FAILED" BOOTH_TICKET_STATUS_ERROR = "BOOTH_TICKET_STATUS_ERROR" BOOTH_UNSUPPORTED_FILE_LOCATION = "BOOTH_UNSUPPORTED_FILE_LOCATION" CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE = "CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE" CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED = "CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED" CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP = "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP" CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP = "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP" CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP = "CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP" CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE = "CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE" CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF = "CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF" CANNOT_GROUP_RESOURCE_NO_RESOURCES = "CANNOT_GROUP_RESOURCE_NO_RESOURCES" CANNOT_GROUP_RESOURCE_WRONG_TYPE = "CANNOT_GROUP_RESOURCE_WRONG_TYPE" CANNOT_MOVE_RESOURCE_BUNDLE = "CANNOT_MOVE_RESOURCE_BUNDLE" CANNOT_MOVE_RESOURCE_CLONE = "CANNOT_MOVE_RESOURCE_CLONE" CANNOT_MOVE_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE = "CANNOT_MOVE_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE" CANNOT_MOVE_RESOURCE_PROMOTABLE_NOT_MASTER = "CANNOT_MOVE_RESOURCE_PROMOTABLE_NOT_MASTER" CANNOT_MOVE_RESOURCE_STOPPED_NO_NODE_SPECIFIED = "CANNOT_MOVE_RESOURCE_STOPPED_NO_NODE_SPECIFIED" CANNOT_REMOVE_ALL_CLUSTER_NODES = "CANNOT_REMOVE_ALL_CLUSTER_NODES" CANNOT_UNMOVE_UNBAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE = "CANNOT_UNMOVE_UNBAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE" CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET = "CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET" CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET = "CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET" CIB_ACL_TARGET_ALREADY_EXISTS = "CIB_ACL_TARGET_ALREADY_EXISTS" CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS" CIB_ALERT_RECIPIENT_VALUE_INVALID = "CIB_ALERT_RECIPIENT_VALUE_INVALID" CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION" CIB_DIFF_ERROR = "CIB_DIFF_ERROR" CIB_FENCING_LEVEL_ALREADY_EXISTS = "CIB_FENCING_LEVEL_ALREADY_EXISTS" CIB_FENCING_LEVEL_DOES_NOT_EXIST = "CIB_FENCING_LEVEL_DOES_NOT_EXIST" CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT" CIB_LOAD_ERROR = "CIB_LOAD_ERROR" CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION = "CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION" CIB_LOAD_ERROR_SCOPE_MISSING = "CIB_LOAD_ERROR_SCOPE_MISSING" CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET = "CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET" CIB_PUSH_ERROR = "CIB_PUSH_ERROR" CIB_SAVE_TMP_ERROR = "CIB_SAVE_TMP_ERROR" CIB_SIMULATE_ERROR = "CIB_SIMULATE_ERROR" CIB_UPGRADE_FAILED = "CIB_UPGRADE_FAILED" CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION = "CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION" CIB_UPGRADE_SUCCESSFUL = "CIB_UPGRADE_SUCCESSFUL" CLUSTER_DESTROY_STARTED = "CLUSTER_DESTROY_STARTED" CLUSTER_DESTROY_SUCCESS = "CLUSTER_DESTROY_SUCCESS" CLUSTER_ENABLE_STARTED = "CLUSTER_ENABLE_STARTED" CLUSTER_ENABLE_SUCCESS = "CLUSTER_ENABLE_SUCCESS" CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES = "CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES" CLUSTER_SETUP_SUCCESS = "CLUSTER_SETUP_SUCCESS" CLUSTER_START_STARTED = "CLUSTER_START_STARTED" CLUSTER_START_SUCCESS = "CLUSTER_START_SUCCESS" CLUSTER_WILL_BE_DESTROYED = "CLUSTER_WILL_BE_DESTROYED" LIVE_ENVIRONMENT_NOT_CONSISTENT = "LIVE_ENVIRONMENT_NOT_CONSISTENT" LIVE_ENVIRONMENT_REQUIRED = "LIVE_ENVIRONMENT_REQUIRED" LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE = "LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE" COROSYNC_ADDRESS_IP_VERSION_WRONG_FOR_LINK = "COROSYNC_ADDRESS_IP_VERSION_WRONG_FOR_LINK" COROSYNC_BAD_NODE_ADDRESSES_COUNT = "COROSYNC_BAD_NODE_ADDRESSES_COUNT" COROSYNC_CONFIG_ACCEPTED_BY_NODE = "COROSYNC_CONFIG_ACCEPTED_BY_NODE" COROSYNC_CONFIG_CANNOT_SAVE_INVALID_NAMES_VALUES = "COROSYNC_CONFIG_CANNOT_SAVE_INVALID_NAMES_VALUES" COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR = "COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR" COROSYNC_CONFIG_DISTRIBUTION_STARTED = "COROSYNC_CONFIG_DISTRIBUTION_STARTED" COROSYNC_CONFIG_MISSING_NAMES_OF_NODES = "COROSYNC_CONFIG_MISSING_NAMES_OF_NODES" COROSYNC_CONFIG_NO_NODES_DEFINED = "COROSYNC_CONFIG_NO_NODES_DEFINED" COROSYNC_CONFIG_RELOADED = "COROSYNC_CONFIG_RELOADED" COROSYNC_CONFIG_RELOAD_ERROR = "COROSYNC_CONFIG_RELOAD_ERROR" COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE = "COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE" COROSYNC_IP_VERSION_MISMATCH_IN_LINKS = "COROSYNC_IP_VERSION_MISMATCH_IN_LINKS" COROSYNC_CANNOT_ADD_REMOVE_LINKS_BAD_TRANSPORT = "COROSYNC_CANNOT_ADD_REMOVE_LINKS_BAD_TRANSPORT" COROSYNC_CANNOT_ADD_REMOVE_LINKS_NO_LINKS_SPECIFIED = "COROSYNC_CANNOT_ADD_REMOVE_LINKS_NO_LINKS_SPECIFIED" COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS = "COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS" COROSYNC_LINK_ALREADY_EXISTS_CANNOT_ADD = "COROSYNC_LINK_ALREADY_EXISTS_CANNOT_ADD" COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_REMOVE = "COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_REMOVE" COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_UPDATE = "COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_UPDATE" COROSYNC_LINK_NUMBER_DUPLICATION = "COROSYNC_LINK_NUMBER_DUPLICATION" COROSYNC_NODE_ADDRESS_COUNT_MISMATCH = "COROSYNC_NODE_ADDRESS_COUNT_MISMATCH" COROSYNC_NODE_CONFLICT_CHECK_SKIPPED = "COROSYNC_NODE_CONFLICT_CHECK_SKIPPED" COROSYNC_NODES_MISSING = "COROSYNC_NODES_MISSING" COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR" COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED" COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE" COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE" COROSYNC_QUORUM_ATB_CANNOT_BE_DISABLED_DUE_TO_SBD = "COROSYNC_QUORUM_ATB_CANNOT_BE_DISABLED_DUE_TO_SBD" COROSYNC_QUORUM_ATB_WILL_BE_ENABLED_DUE_TO_SBD = "COROSYNC_QUORUM_ATB_WILL_BE_ENABLED_DUE_TO_SBD" COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR" COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC = "COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC" COROSYNC_QUORUM_LOSS_UNABLE_TO_CHECK = "COROSYNC_QUORUM_LOSS_UNABLE_TO_CHECK" COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR" COROSYNC_QUORUM_WILL_BE_LOST = "COROSYNC_QUORUM_WILL_BE_LOST" COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE" COROSYNC_TOO_MANY_LINKS_OPTIONS = "COROSYNC_TOO_MANY_LINKS_OPTIONS" COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS = "COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS" CRM_MON_ERROR = "CRM_MON_ERROR" DEFAULTS_CAN_BE_OVERRIDEN = "DEFAULTS_CAN_BE_OVERRIDEN" DEPRECATED_OPTION = "DEPRECATED_OPTION" DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST" EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST" EMPTY_ID = "EMPTY_ID" FENCE_HISTORY_COMMAND_ERROR = "FENCE_HISTORY_COMMAND_ERROR" FENCE_HISTORY_NOT_SUPPORTED = "FENCE_HISTORY_NOT_SUPPORTED" FILES_DISTRIBUTION_SKIPPED = "FILES_DISTRIBUTION_SKIPPED" FILES_DISTRIBUTION_STARTED = "FILES_DISTRIBUTION_STARTED" FILES_REMOVE_FROM_NODES_STARTED = "FILES_REMOVE_FROM_NODES_STARTED" FILES_REMOVE_FROM_NODES_SKIPPED = "FILES_REMOVE_FROM_NODES_SKIPPED" FILE_ALREADY_EXISTS = "FILE_ALREADY_EXISTS" FILE_DISTRIBUTION_ERROR = "FILE_DISTRIBUTION_ERROR" FILE_DISTRIBUTION_SUCCESS = "FILE_DISTRIBUTION_SUCCESS" FILE_IO_ERROR = "FILE_IO_ERROR" FILE_REMOVE_FROM_NODE_ERROR = "FILE_REMOVE_FROM_NODE_ERROR" FILE_REMOVE_FROM_NODE_SUCCESS = "FILE_REMOVE_FROM_NODE_SUCCESS" HOST_NOT_FOUND = "HOST_NOT_FOUND" HOST_ALREADY_AUTHORIZED = "HOST_ALREADY_AUTHORIZED" HOST_ALREADY_IN_CLUSTER_CONFIG = "HOST_ALREADY_IN_CLUSTER_CONFIG" HOST_ALREADY_IN_CLUSTER_SERVICES = "HOST_ALREADY_IN_CLUSTER_SERVICES" ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS' ID_BELONGS_TO_UNEXPECTED_TYPE = "ID_BELONGS_TO_UNEXPECTED_TYPE" ID_NOT_FOUND = 'ID_NOT_FOUND' INVALID_CIB_CONTENT = "INVALID_CIB_CONTENT" INVALID_ID = "INVALID_ID" INVALID_OPTIONS = "INVALID_OPTIONS" INVALID_USERDEFINED_OPTIONS = "INVALID_USERDEFINED_OPTIONS" INVALID_OPTION_TYPE = "INVALID_OPTION_TYPE" INVALID_OPTION_VALUE = "INVALID_OPTION_VALUE" INVALID_RESOURCE_AGENT_NAME = 'INVALID_RESOURCE_AGENT_NAME' INVALID_RESPONSE_FORMAT = "INVALID_RESPONSE_FORMAT" INVALID_SCORE = "INVALID_SCORE" INVALID_STONITH_AGENT_NAME = "INVALID_STONITH_AGENT_NAME" INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE" MULTIPLE_SCORE_OPTIONS = "MULTIPLE_SCORE_OPTIONS" MULTIPLE_RESULTS_FOUND = "MULTIPLE_RESULTS_FOUND" MUTUALLY_EXCLUSIVE_OPTIONS = "MUTUALLY_EXCLUSIVE_OPTIONS" NODE_ADDRESSES_ALREADY_EXIST = "NODE_ADDRESSES_ALREADY_EXIST" NODE_ADDRESSES_CANNOT_BE_EMPTY = "NODE_ADDRESSES_CANNOT_BE_EMPTY" NODE_ADDRESSES_DUPLICATION = "NODE_ADDRESSES_DUPLICATION" NODE_ADDRESSES_UNRESOLVABLE = "NODE_ADDRESSES_UNRESOLVABLE" NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL = "NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL" NODE_COMMUNICATION_DEBUG_INFO = "NODE_COMMUNICATION_DEBUG_INFO" NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR" NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED" NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED" NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT" NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND" NODE_COMMUNICATION_ERROR_TIMED_OUT = "NODE_COMMUNICATION_ERROR_TIMED_OUT" NODE_COMMUNICATION_FINISHED = "NODE_COMMUNICATION_FINISHED" NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED" NODE_COMMUNICATION_NO_MORE_ADDRESSES = "NODE_COMMUNICATION_NO_MORE_ADDRESSES" NODE_COMMUNICATION_PROXY_IS_SET = "NODE_COMMUNICATION_PROXY_IS_SET" NODE_COMMUNICATION_RETRYING = "NODE_COMMUNICATION_RETRYING" NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED" NODE_NAMES_ALREADY_EXIST = "NODE_NAMES_ALREADY_EXIST" NODE_NAMES_DUPLICATION = "NODE_NAMES_DUPLICATION" NODE_NOT_FOUND = "NODE_NOT_FOUND" NODE_REMOVE_IN_PACEMAKER_FAILED = "NODE_REMOVE_IN_PACEMAKER_FAILED" NONE_HOST_FOUND = "NONE_HOST_FOUND" NODE_USED_AS_TIE_BREAKER = "NODE_USED_AS_TIE_BREAKER" NODES_TO_REMOVE_UNREACHABLE = "NODES_TO_REMOVE_UNREACHABLE" NODE_TO_CLEAR_IS_STILL_IN_CLUSTER = "NODE_TO_CLEAR_IS_STILL_IN_CLUSTER" OMITTING_NODE = "OMITTING_NODE" OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT = "OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT" PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND" PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF" PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_AFTER_OPENING_BRACE = "PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_AFTER_OPENING_BRACE" PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_BEFORE_OR_AFTER_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_BEFORE_OR_AFTER_CLOSING_BRACE" PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE = "PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE" PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE" PARSE_ERROR_COROSYNC_CONF_MISSING_SECTION_NAME_BEFORE_OPENING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_SECTION_NAME_BEFORE_OPENING_BRACE" PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE" PARSE_ERROR_JSON_FILE = "PARSE_ERROR_JSON_FILE" PCSD_VERSION_TOO_OLD = "PCSD_VERSION_TOO_OLD" PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED = "PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED" PCSD_SSL_CERT_AND_KEY_SET_SUCCESS = "PCSD_SSL_CERT_AND_KEY_SET_SUCCESS" PREREQUISITE_OPTION_MUST_BE_ENABLED_AS_WELL = "PREREQUISITE_OPTION_MUST_BE_ENABLED_AS_WELL" PREREQUISITE_OPTION_MUST_BE_DISABLED = "PREREQUISITE_OPTION_MUST_BE_DISABLED" PREREQUISITE_OPTION_MUST_NOT_BE_SET = "PREREQUISITE_OPTION_MUST_NOT_BE_SET" PREREQUISITE_OPTION_IS_MISSING = "PREREQUISITE_OPTION_IS_MISSING" QDEVICE_ALREADY_DEFINED = "QDEVICE_ALREADY_DEFINED" QDEVICE_ALREADY_INITIALIZED = "QDEVICE_ALREADY_INITIALIZED" QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE = "QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE" QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED = "QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED" QDEVICE_CERTIFICATE_REMOVAL_STARTED = "QDEVICE_CERTIFICATE_REMOVAL_STARTED" QDEVICE_CERTIFICATE_REMOVED_FROM_NODE = "QDEVICE_CERTIFICATE_REMOVED_FROM_NODE" QDEVICE_CERTIFICATE_IMPORT_ERROR = "QDEVICE_CERTIFICATE_IMPORT_ERROR" QDEVICE_CERTIFICATE_SIGN_ERROR = "QDEVICE_CERTIFICATE_SIGN_ERROR" QDEVICE_DESTROY_ERROR = "QDEVICE_DESTROY_ERROR" QDEVICE_DESTROY_SUCCESS = "QDEVICE_DESTROY_SUCCESS" QDEVICE_GET_STATUS_ERROR = "QDEVICE_GET_STATUS_ERROR" QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR" QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS" QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED" QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED" QDEVICE_NOT_RUNNING = "QDEVICE_NOT_RUNNING" QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED" QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED" QDEVICE_USED_BY_CLUSTERS = "QDEVICE_USED_BY_CLUSTERS" REQUIRED_OPTIONS_ARE_MISSING = "REQUIRED_OPTIONS_ARE_MISSING" REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING = "REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING" RESOURCE_BAN_PCMK_ERROR = "RESOURCE_BAN_PCMK_ERROR" RESOURCE_BAN_PCMK_SUCCESS = "RESOURCE_BAN_PCMK_SUCCESS" RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE = "RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE" RESOURCE_BUNDLE_UNSUPPORTED_CONTAINER_TYPE = "RESOURCE_BUNDLE_UNSUPPORTED_CONTAINER_TYPE" RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR" RESOURCE_DOES_NOT_RUN = "RESOURCE_DOES_NOT_RUN" RESOURCE_DISABLE_AFFECTS_OTHER_RESOURCES = "RESOURCE_DISABLE_AFFECTS_OTHER_RESOURCES" RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE = 'RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE' RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE = "RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE" RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE = "RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE" RESOURCE_IS_GUEST_NODE_ALREADY = "RESOURCE_IS_GUEST_NODE_ALREADY" RESOURCE_IS_UNMANAGED = "RESOURCE_IS_UNMANAGED" RESOURCE_MANAGED_NO_MONITOR_ENABLED = "RESOURCE_MANAGED_NO_MONITOR_ENABLED" RESOURCE_MOVE_PCMK_ERROR = "RESOURCE_MOVE_PCMK_ERROR" RESOURCE_MOVE_PCMK_SUCCESS = "RESOURCE_MOVE_PCMK_SUCCESS" RESOURCE_OPERATION_INTERVAL_ADAPTED = "RESOURCE_OPERATION_INTERVAL_ADAPTED" RESOURCE_OPERATION_INTERVAL_DUPLICATION = "RESOURCE_OPERATION_INTERVAL_DUPLICATION" RESOURCE_REFRESH_ERROR = "RESOURCE_REFRESH_ERROR" RESOURCE_REFRESH_TOO_TIME_CONSUMING = 'RESOURCE_REFRESH_TOO_TIME_CONSUMING' RESOURCE_RUNNING_ON_NODES = "RESOURCE_RUNNING_ON_NODES" RESOURCE_UNMOVE_UNBAN_PCMK_ERROR = "RESOURCE_UNMOVE_UNBAN_PCMK_ERROR" RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS = "RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS" RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED = "RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED" RUN_EXTERNAL_PROCESS_ERROR = "RUN_EXTERNAL_PROCESS_ERROR" RUN_EXTERNAL_PROCESS_FINISHED = "RUN_EXTERNAL_PROCESS_FINISHED" RUN_EXTERNAL_PROCESS_STARTED = "RUN_EXTERNAL_PROCESS_STARTED" SBD_CHECK_STARTED = "SBD_CHECK_STARTED" SBD_CHECK_SUCCESS = "SBD_CHECK_SUCCESS" SBD_CONFIG_ACCEPTED_BY_NODE = "SBD_CONFIG_ACCEPTED_BY_NODE" SBD_CONFIG_DISTRIBUTION_STARTED = "SBD_CONFIG_DISTRIBUTION_STARTED" SBD_DEVICE_DOES_NOT_EXIST = "SBD_DEVICE_DOES_NOT_EXIST" SBD_DEVICE_DUMP_ERROR = "SBD_DEVICE_DUMP_ERROR" SBD_DEVICE_INITIALIZATION_ERROR = "SBD_DEVICE_INITIALIZATION_ERROR" SBD_DEVICE_INITIALIZATION_STARTED = "SBD_DEVICE_INITIALIZATION_STARTED" SBD_DEVICE_INITIALIZATION_SUCCESS = "SBD_DEVICE_INITIALIZATION_SUCCESS" SBD_DEVICE_IS_NOT_BLOCK_DEVICE = "SBD_DEVICE_IS_NOT_BLOCK_DEVICE" SBD_DEVICE_LIST_ERROR = "SBD_DEVICE_LIST_ERROR" SBD_DEVICE_MESSAGE_ERROR = "SBD_DEVICE_MESSAGE_ERROR" SBD_DEVICE_PATH_NOT_ABSOLUTE = "SBD_DEVICE_PATH_NOT_ABSOLUTE" SBD_DISABLING_STARTED = "SBD_DISABLING_STARTED" SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED" SBD_LIST_WATCHDOG_ERROR = "SBD_LIST_WATCHDOG_ERROR" SBD_NO_DEVICE_FOR_NODE = "SBD_NO_DEVICE_FOR_NODE" SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED" SBD_NOT_USED_CANNOT_SET_SBD_OPTIONS = "SBD_NOT_USED_CANNOT_SET_SBD_OPTIONS" SBD_TOO_MANY_DEVICES_FOR_NODE = "SBD_TOO_MANY_DEVICES_FOR_NODE" SBD_WITH_DEVICES_NOT_USED_CANNOT_SET_DEVICE = "SBD_WITH_DEVICES_NOT_USED_CANNOT_SET_DEVICE" SBD_WATCHDOG_NOT_SUPPORTED = "SBD_WATCHDOG_NOT_SUPPORTED" SBD_WATCHDOG_VALIDATION_INACTIVE = "SBD_WATCHDOG_VALIDATION_INACTIVE" SBD_WATCHDOG_TEST_ERROR = "SBD_WATCHDOG_TEST_ERROR" SBD_WATCHDOG_TEST_MULTUPLE_DEVICES = "SBD_WATCHDOG_TEST_MULTUPLE_DEVICES" SBD_WATCHDOG_TEST_FAILED = "SBD_WATCHDOG_TEST_FAILED" SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR" SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED" SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS" SERVICE_ENABLE_ERROR = "SERVICE_ENABLE_ERROR" SERVICE_ENABLE_STARTED = "SERVICE_ENABLE_STARTED" SERVICE_ENABLE_SKIPPED = "SERVICE_ENABLE_SKIPPED" SERVICE_ENABLE_SUCCESS = "SERVICE_ENABLE_SUCCESS" SERVICE_KILL_ERROR = "SERVICE_KILL_ERROR" SERVICE_KILL_SUCCESS = "SERVICE_KILL_SUCCESS" SERVICE_NOT_INSTALLED = "SERVICE_NOT_INSTALLED" SERVICE_START_ERROR = "SERVICE_START_ERROR" SERVICE_START_SKIPPED = "SERVICE_START_SKIPPED" SERVICE_START_STARTED = "SERVICE_START_STARTED" SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS" SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR" SERVICE_STOP_STARTED = "SERVICE_STOP_STARTED" SERVICE_STOP_SUCCESS = "SERVICE_STOP_SUCCESS" SERVICE_VERSION_MISMATCH = "SERVICE_VERSION_MISMATCH" STONITH_RESOURCES_DO_NOT_EXIST = "STONITH_RESOURCES_DO_NOT_EXIST" SERVICE_COMMANDS_ON_NODES_STARTED = "SERVICE_COMMANDS_ON_NODES_STARTED" SERVICE_COMMANDS_ON_NODES_SKIPPED = "SERVICE_COMMANDS_ON_NODES_SKIPPED" SERVICE_COMMAND_ON_NODE_ERROR = "SERVICE_COMMAND_ON_NODE_ERROR" SERVICE_COMMAND_ON_NODE_SUCCESS = "SERVICE_COMMAND_ON_NODE_SUCCESS" SYSTEM_WILL_RESET = "SYSTEM_WILL_RESET" TMP_FILE_WRITE = "TMP_FILE_WRITE" UNABLE_TO_CONNECT_TO_ANY_REMAINING_NODE = "UNABLE_TO_CONNECT_TO_ANY_REMAINING_NODE" UNABLE_TO_CONNECT_TO_ALL_REMAINING_NODE = "UNABLE_TO_CONNECT_TO_ALL_REMAINING_NODE" UNABLE_TO_DETERMINE_USER_UID = "UNABLE_TO_DETERMINE_USER_UID" UNABLE_TO_DETERMINE_GROUP_GID = "UNABLE_TO_DETERMINE_GROUP_GID" UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA' UNABLE_TO_READ_COROSYNC_CONFIG = "UNABLE_TO_READ_COROSYNC_CONFIG" UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG" UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS" UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE = "UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE" WATCHDOG_INVALID = "WATCHDOG_INVALID" UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS" USE_COMMAND_NODE_ADD_REMOTE = "USE_COMMAND_NODE_ADD_REMOTE" USE_COMMAND_NODE_ADD_GUEST = "USE_COMMAND_NODE_ADD_GUEST" USE_COMMAND_NODE_REMOVE_GUEST = "USE_COMMAND_NODE_REMOVE_GUEST" USING_KNOWN_HOST_ADDRESS_FOR_HOST = "USING_KNOWN_HOST_ADDRESS_FOR_HOST" USING_DEFAULT_WATCHDOG = "USING_DEFAULT_WATCHDOG" WAIT_FOR_IDLE_ERROR = "WAIT_FOR_IDLE_ERROR" WAIT_FOR_IDLE_NOT_LIVE_CLUSTER = "WAIT_FOR_IDLE_NOT_LIVE_CLUSTER" WAIT_FOR_IDLE_NOT_SUPPORTED = "WAIT_FOR_IDLE_NOT_SUPPORTED" WAIT_FOR_IDLE_TIMED_OUT = "WAIT_FOR_IDLE_TIMED_OUT" WAIT_FOR_NODE_STARTUP_ERROR = "WAIT_FOR_NODE_STARTUP_ERROR" WAIT_FOR_NODE_STARTUP_STARTED = "WAIT_FOR_NODE_STARTUP_STARTED" WAIT_FOR_NODE_STARTUP_TIMED_OUT = "WAIT_FOR_NODE_STARTUP_TIMED_OUT" WAIT_FOR_NODE_STARTUP_WITHOUT_START = "WAIT_FOR_NODE_STARTUP_WITHOUT_START" WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND" pcs-0.10.4/pcs/common/reports.py000066400000000000000000000027541356771603100165270ustar00rootroot00000000000000import abc from pcs.lib.errors import ReportItem, ReportItemList class SimpleReportProcessorInterface(abc.ABC): def report(self, report_item: ReportItem) -> ReportItemList: return self.report_list([report_item]) @abc.abstractmethod def report_list(self, report_list: ReportItemList) -> ReportItemList: raise NotImplementedError() class SimpleReportProcessor(SimpleReportProcessorInterface): """ This class is a wrapper for a report processor class and at the same time implements interface of a simple report processor. This class interface for easy checking if some errors have been reported. """ def __new__(cls, report_processor: SimpleReportProcessorInterface): if isinstance(report_processor, cls): # There is no point in wrapping the same object multiple times return report_processor self = super().__new__(cls) self.__init__(report_processor) return self def __init__(self, report_processor: SimpleReportProcessorInterface): self._report_processor = report_processor self._error_list: ReportItemList = [] def report_list(self, report_list: ReportItemList) -> ReportItemList: error_list = self._report_processor.report_list(report_list) self._error_list.extend(error_list) return error_list @property def has_errors(self) -> bool: return bool(self._error_list) def clear_errors(self) -> None: self._error_list = [] pcs-0.10.4/pcs/common/ssl.py000066400000000000000000000020721356771603100156230ustar00rootroot00000000000000import time from OpenSSL import crypto def cert_date_format(timestamp): return str.encode(time.strftime("%Y%m%d%H%M%SZ", time.gmtime(timestamp))) def generate_key(length=3072): key = crypto.PKey() key.generate_key(crypto.TYPE_RSA, length) return key def generate_cert(key, server_name): now = time.time() cert = crypto.X509() subject = cert.get_subject() subject.countryName = "US" subject.stateOrProvinceName = "MN" subject.localityName = "Minneapolis" subject.organizationName = "pcsd" subject.organizationalUnitName = "pcsd" subject.commonName = server_name cert.set_version(2) cert.set_serial_number(int(now*1000)) cert.set_notBefore(cert_date_format(now)) cert.set_notAfter(cert_date_format(now + 60*60*24*365*10)) # 10 years cert.set_issuer(subject) cert.set_pubkey(key) cert.sign(key, 'sha256') return cert def dump_cert(certificate): return crypto.dump_certificate(crypto.FILETYPE_PEM, certificate) def dump_key(key): return crypto.dump_privatekey(crypto.FILETYPE_PEM, key) pcs-0.10.4/pcs/common/system.py000066400000000000000000000003321356771603100163430ustar00rootroot00000000000000import os.path from functools import lru_cache SYSTEMD_PATHS = [ '/run/systemd/system', '/var/run/systemd/system', ] @lru_cache() def is_systemd(): return any([os.path.isdir(path) for path in SYSTEMD_PATHS]) pcs-0.10.4/pcs/common/tools.py000066400000000000000000000064141356771603100161660ustar00rootroot00000000000000from collections import namedtuple from enum import Enum import threading from typing import ( Iterable, List, Mapping, Optional, ) from lxml import etree def run_parallel(worker, data_list): thread_list = [] for args, kwargs in data_list: thread = threading.Thread(target=worker, args=args, kwargs=kwargs) thread.daemon = True thread_list.append(thread) thread.start() for thread in thread_list: thread.join() def format_environment_error(e): return format_os_error(e) def format_os_error(e): if e.filename: return "{0}: '{1}'".format(e.strerror, e.filename) return e.strerror def indent(line_list: Iterable[str], indent_step: int = 2) -> List[str]: """ return line list where each line of input is prefixed by N spaces line_list -- original lines indent_step -- count of spaces for line prefix """ return [ "{0}{1}".format(" " * indent_step, line) if line else line for line in line_list ] def format_list( item_list: Iterable[str], optional_transformations: Optional[Mapping[str, str]] = None ) -> str: if not optional_transformations: optional_transformations = {} return ", ".join(sorted([ "'{0}'".format(optional_transformations.get(item, item)) for item in item_list ])) def join_multilines(strings): return "\n".join([a.strip() for a in strings if a.strip()]) def xml_fromstring(xml): # If the xml contains encoding declaration such as: # # we get an exception in python3: # ValueError: Unicode strings with encoding declaration are not supported. # Please use bytes input or XML fragments without declaration. # So we encode the string to bytes. return etree.fromstring( xml.encode("utf-8"), #it raises on a huge xml without the flag huge_tree=True #see https://bugzilla.redhat.com/show_bug.cgi?id=1506864 etree.XMLParser(huge_tree=True) ) class AutoNameEnum(str, Enum): def _generate_next_value_(name, start, count, last_values): # pylint: disable=no-self-argument del start, count, last_values return name class Version(namedtuple("Version", ["major", "minor", "revision"])): def __new__(cls, major, minor=None, revision=None): return super(Version, cls).__new__(cls, major, minor, revision) @property def as_full_tuple(self): return ( self.major, self.minor if self.minor is not None else 0, self.revision if self.revision is not None else 0, ) def normalize(self): return self.__class__(*self.as_full_tuple) def __str__(self): return ".".join([str(x) for x in self if x is not None]) def __lt__(self, other): return self.as_full_tuple < other.as_full_tuple def __le__(self, other): return self.as_full_tuple <= other.as_full_tuple def __eq__(self, other): return self.as_full_tuple == other.as_full_tuple def __ne__(self, other): return self.as_full_tuple != other.as_full_tuple def __gt__(self, other): return self.as_full_tuple > other.as_full_tuple def __ge__(self, other): return self.as_full_tuple >= other.as_full_tuple pcs-0.10.4/pcs/config.py000066400000000000000000001045421356771603100150040ustar00rootroot00000000000000import sys import os import os.path import re import datetime from io import BytesIO import tarfile import json from xml.dom.minidom import parse import logging import pwd import grp import tempfile import time import platform import shutil import difflib try: import clufter.facts import clufter.format_manager import clufter.filter_manager import clufter.command_manager no_clufter = False except ImportError: no_clufter = True from pcs import ( cluster, constraint, quorum, resource, settings, status, stonith, usage, utils, alert, ) from pcs.cli.common import middleware from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.reports import process_library_reports from pcs.cli.constraint import command as constraint_command from pcs.cli.constraint_colocation import ( console_report as colocation_console_report, ) from pcs.cli.constraint_order import console_report as order_console_report from pcs.cli.constraint_ticket import console_report as ticket_console_report from pcs.common.tools import indent from pcs.lib.commands import quorum as lib_quorum from pcs.lib.errors import LibraryError from pcs.lib.external import is_service_running from pcs.lib.node import get_existing_nodes_names # pylint: disable=too-many-branches, too-many-locals, too-many-statements def config_show(lib, argv, modifiers): """ Options: * -f - CIB file, when getting cluster name on remote node (corosync.conf doesn't exist) * --corosync_conf - corosync.conf file """ modifiers.ensure_only_supported("-f", "--corosync_conf") if argv: raise CmdLineInputError() print("Cluster Name: %s" % utils.getClusterName()) status.nodes_status(lib, ["config"], modifiers.get_subset("-f")) print() print("\n".join(_config_show_cib_lines(lib))) if ( utils.hasCorosyncConf() and not modifiers.is_specified("-f") and not modifiers.is_specified("--corosync_conf") ): cluster.cluster_uidgid( lib, [], modifiers.get_subset(), silent_list=True ) if ( modifiers.is_specified("--corosync_conf") or utils.hasCorosyncConf() ): print() print("Quorum:") try: config = lib_quorum.get_config(utils.get_lib_env()) print("\n".join(indent(quorum.quorum_config_to_str(config)))) except LibraryError as e: process_library_reports(e.args) def _config_show_cib_lines(lib): """ Commandline options: * -f - CIB file """ # update of pcs_options will change output of constraint show utils.pcs_options["--full"] = 1 # get latest modifiers object after updating pcs_options modifiers = utils.get_input_modifiers() cib_xml = utils.get_cib() cib_etree = utils.get_cib_etree(cib_xml=cib_xml) cib_dom = utils.get_cib_dom(cib_xml=cib_xml) resource_lines = [] stonith_lines = [] for resource_el in cib_etree.find(".//resources"): is_stonith = ( "class" in resource_el.attrib and resource_el.attrib["class"] == "stonith" ) resource_el_lines = resource.resource_node_lines(resource_el) if is_stonith: stonith_lines += resource_el_lines else: resource_lines += resource_el_lines all_lines = [] all_lines.append("Resources:") all_lines.extend(indent(resource_lines, indent_step=1)) all_lines.append("") all_lines.append("Stonith Devices:") all_lines.extend(indent(stonith_lines, indent_step=1)) all_lines.append("Fencing Levels:") levels_lines = stonith.stonith_level_config_to_str( lib.fencing_topology.get_config() ) if levels_lines: all_lines.extend(indent(levels_lines, indent_step=2)) all_lines.append("") constraints_element = cib_dom.getElementsByTagName('constraints')[0] all_lines.extend( constraint.location_lines( constraints_element, showDetail=True, show_expired=True, verify_expiration=False ) ) all_lines.extend(constraint_command.show( "Ordering Constraints:", lib.constraint_order.show, order_console_report.constraint_plain, modifiers.get_subset("-f", "--full"), )) all_lines.extend(constraint_command.show( "Colocation Constraints:", lib.constraint_colocation.show, colocation_console_report.constraint_plain, modifiers.get_subset("-f", "--full"), )) all_lines.extend(constraint_command.show( "Ticket Constraints:", lib.constraint_ticket.show, ticket_console_report.constraint_plain, modifiers.get_subset("-f", "--full"), )) all_lines.append("") all_lines.extend(alert.alert_config_lines(lib)) all_lines.append("") all_lines.append("Resources Defaults:") all_lines.extend(indent( resource.show_defaults(cib_dom, "rsc_defaults"), indent_step=1 )) all_lines.append("Operations Defaults:") all_lines.extend(indent( resource.show_defaults(cib_dom, "op_defaults"), indent_step=1 )) all_lines.append("") all_lines.append("Cluster Properties:") properties = utils.get_set_properties() all_lines.extend(indent( [ "{0}: {1}".format(prop, val) for prop, val in sorted(properties.items()) ], indent_step=1 )) return all_lines def config_backup(lib, argv, modifiers): """ Options: * --force - overwrite file if already exists """ del lib modifiers.ensure_only_supported("--force") if len(argv) > 1: usage.config(["backup"]) sys.exit(1) outfile_name = None if argv: outfile_name = argv[0] if not outfile_name.endswith(".tar.bz2"): outfile_name += ".tar.bz2" tar_data = config_backup_local() if outfile_name: ok, message = utils.write_file( outfile_name, tar_data, permissions=0o600, binary=True ) if not ok: utils.err(message) else: # in python3 stdout accepts str so we need to use buffer sys.stdout.buffer.write(tar_data) def config_backup_local(): """ Commandline options: no options """ file_list = config_backup_path_list() tar_data = BytesIO() try: tarball = tarfile.open(fileobj=tar_data, mode="w|bz2") config_backup_add_version_to_tarball(tarball) for tar_path, path_info in file_list.items(): if ( not os.path.exists(path_info["path"]) and not path_info["required"] ): continue tarball.add(path_info["path"], tar_path) tarball.close() except (tarfile.TarError, EnvironmentError) as e: utils.err("unable to create tarball: %s" % e) tar = tar_data.getvalue() tar_data.close() return tar def config_restore(lib, argv, modifiers): """ Options: * --local - restore config only on local node * --request-timeout - timeout for HTTP requests, used only if --local was not defined or user is not root """ del lib modifiers.ensure_only_supported("--local", "--request-timeout") if len(argv) > 1: usage.config(["restore"]) sys.exit(1) infile_name = infile_obj = None if argv: infile_name = argv[0] if not infile_name: # in python3 stdin returns str so we need to use buffer infile_obj = BytesIO(sys.stdin.buffer.read()) if os.getuid() == 0: if modifiers.get("--local"): config_restore_local(infile_name, infile_obj) else: config_restore_remote(infile_name, infile_obj) else: new_argv = ['config', 'restore'] new_stdin = None if modifiers.get("--local"): new_argv.append('--local') if infile_name: new_argv.append(os.path.abspath(infile_name)) else: new_stdin = infile_obj.read() err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( new_argv, new_stdin ) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) print(std_out) sys.stderr.write(std_err) sys.exit(exitcode) def config_restore_remote(infile_name, infile_obj): """ Commandline options: * --request-timeout - timeout for HTTP requests """ extracted = { "version.txt": "", "corosync.conf": "", } try: tarball = tarfile.open(infile_name, "r|*", infile_obj) while True: # next(tarball) does not work in python2.6 tar_member_info = tarball.next() if tar_member_info is None: break if tar_member_info.name in extracted: tar_member = tarball.extractfile(tar_member_info) extracted[tar_member_info.name] = tar_member.read() tar_member.close() tarball.close() except (tarfile.TarError, EnvironmentError) as e: utils.err("unable to read the tarball: %s" % e) config_backup_check_version(extracted["version.txt"]) node_list, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade( conf_text=extracted["corosync.conf"].decode("utf-8") ) ) if report_list: process_library_reports(report_list) if not node_list: utils.err("no nodes found in the tarball") err_msgs = [] for node in node_list: try: retval, output = utils.checkStatus(node) if retval != 0: err_msgs.append(output) continue _status = json.loads(output) if ( _status["corosync"] or _status["pacemaker"] or # not supported by older pcsd, do not fail if not present _status.get("pacemaker_remote", False) ): err_msgs.append( "Cluster is currently running on node %s. You need to stop " "the cluster in order to restore the configuration." % node ) continue except (ValueError, NameError, LookupError): err_msgs.append("unable to determine status of the node %s" % node) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) # Temporarily disable config files syncing thread in pcsd so it will not # rewrite restored files. 10 minutes should be enough time to restore. # If node returns HTTP 404 it does not support config syncing at all. for node in node_list: retval, output = utils.pauseConfigSyncing(node, 10 * 60) if not (retval == 0 or "(HTTP error: 404)" in output): utils.err(output) if infile_obj: infile_obj.seek(0) tarball_data = infile_obj.read() else: with open(infile_name, "rb") as tarball: tarball_data = tarball.read() error_list = [] for node in node_list: retval, error = utils.restoreConfig(node, tarball_data) if retval != 0: error_list.append(error) if error_list: utils.err("unable to restore all nodes\n" + "\n".join(error_list)) def config_restore_local(infile_name, infile_obj): """ Commandline options: no options """ if ( is_service_running(utils.cmd_runner(), "corosync") or is_service_running(utils.cmd_runner(), "pacemaker") or is_service_running(utils.cmd_runner(), "pacemaker_remote") ): utils.err( "Cluster is currently running on this node. You need to stop " "the cluster in order to restore the configuration." ) file_list = config_backup_path_list(with_uid_gid=True) tarball_file_list = [] version = None tmp_dir = None try: tarball = tarfile.open(infile_name, "r|*", infile_obj) while True: # next(tarball) does not work in python2.6 tar_member_info = tarball.next() if tar_member_info is None: break if tar_member_info.name == "version.txt": version_data = tarball.extractfile(tar_member_info) version = version_data.read() version_data.close() continue tarball_file_list.append(tar_member_info.name) tarball.close() required_file_list = [ tar_path for tar_path, path_info in file_list.items() if path_info["required"] ] missing = set(required_file_list) - set(tarball_file_list) if missing: utils.err( "unable to restore the cluster, missing files in backup: %s" % ", ".join(missing) ) config_backup_check_version(version) if infile_obj: infile_obj.seek(0) tarball = tarfile.open(infile_name, "r|*", infile_obj) while True: # next(tarball) does not work in python2.6 tar_member_info = tarball.next() if tar_member_info is None: break extract_info = None path = tar_member_info.name while path: if path in file_list: extract_info = file_list[path] break path = os.path.dirname(path) if not extract_info: continue path_full = None if hasattr(extract_info.get("pre_store_call"), '__call__'): extract_info["pre_store_call"]() if "rename" in extract_info and extract_info["rename"]: if tmp_dir is None: tmp_dir = tempfile.mkdtemp() tarball.extractall(tmp_dir, [tar_member_info]) path_full = extract_info["path"] shutil.move( os.path.join(tmp_dir, tar_member_info.name), path_full ) else: dir_path = os.path.dirname(extract_info["path"]) tarball.extractall(dir_path, [tar_member_info]) path_full = os.path.join(dir_path, tar_member_info.name) file_attrs = extract_info["attrs"] os.chmod(path_full, file_attrs["mode"]) os.chown(path_full, file_attrs["uid"], file_attrs["gid"]) tarball.close() except (tarfile.TarError, EnvironmentError, OSError) as e: utils.err("unable to restore the cluster: %s" % e) finally: if tmp_dir: shutil.rmtree(tmp_dir, ignore_errors=True) try: sig_path = os.path.join(settings.cib_dir, "cib.xml.sig") if os.path.exists(sig_path): os.remove(sig_path) except EnvironmentError as e: utils.err("unable to remove %s: %s" % (sig_path, e)) def config_backup_path_list(with_uid_gid=False): """ Commandline options: no option NOTE: corosync.conf path may be altered using --corosync_conf """ corosync_attrs = { "mtime": int(time.time()), "mode": 0o644, "uname": "root", "gname": "root", "uid": 0, "gid": 0, } corosync_authkey_attrs = dict(corosync_attrs) corosync_authkey_attrs["mode"] = 0o400 cib_attrs = { "mtime": int(time.time()), "mode": 0o600, "uname": settings.pacemaker_uname, "gname": settings.pacemaker_gname, } if with_uid_gid: cib_attrs["uid"] = _get_uid(cib_attrs["uname"]) cib_attrs["gid"] = _get_gid(cib_attrs["gname"]) pcmk_authkey_attrs = dict(cib_attrs) pcmk_authkey_attrs["mode"] = 0o440 file_list = { "cib.xml": { "path": os.path.join(settings.cib_dir, "cib.xml"), "required": True, "attrs": dict(cib_attrs), }, "corosync_authkey": { "path": settings.corosync_authkey_file, "required": False, "attrs": corosync_authkey_attrs, "restore_procedure": None, "rename": True, }, "pacemaker_authkey": { "path": settings.pacemaker_authkey_file, "required": False, "attrs": pcmk_authkey_attrs, "restore_procedure": None, "rename": True, "pre_store_call": _ensure_etc_pacemaker_exists, }, "corosync.conf": { "path": settings.corosync_conf_file, "required": True, "attrs": dict(corosync_attrs), }, "uidgid.d": { "path": settings.corosync_uidgid_dir.rstrip("/"), "required": False, "attrs": dict(corosync_attrs), }, "pcs_settings.conf": { "path": settings.pcsd_settings_conf_location, "required": False, "attrs": { "mtime": int(time.time()), "mode": 0o644, "uname": "root", "gname": "root", "uid": 0, "gid": 0, }, } } return file_list def _get_uid(user_name): """ Commandline options: no options """ try: return pwd.getpwnam(user_name).pw_uid except KeyError: utils.err("Unable to determine uid of user '{0}'".format(user_name)) def _get_gid(group_name): """ Commandline options: no options """ try: return grp.getgrnam(group_name).gr_gid except KeyError: utils.err( "Unable to determine gid of group '{0}'".format(group_name) ) def _ensure_etc_pacemaker_exists(): """ Commandline options: no options """ dir_name = os.path.dirname(settings.pacemaker_authkey_file) if not os.path.exists(dir_name): os.mkdir(dir_name) os.chmod(dir_name, 0o750) os.chown( dir_name, _get_uid(settings.pacemaker_uname), _get_gid(settings.pacemaker_gname) ) def config_backup_check_version(version): """ Commandline options: no options """ try: version_number = int(version) supported_version = config_backup_version() if version_number > supported_version: utils.err( "Unsupported version of the backup, " "supported version is %d, backup version is %d" % (supported_version, version_number) ) if version_number < supported_version: print( "Warning: restoring from the backup version %d, " "current supported version is %s" % (version_number, supported_version) ) except TypeError: utils.err("Cannot determine version of the backup") def config_backup_add_version_to_tarball(tarball, version=None): """ Commandline options: no options """ ver = version if version is not None else str(config_backup_version()) return utils.tar_add_file_data(tarball, ver.encode("utf-8"), "version.txt") def config_backup_version(): """ Commandline options: no options """ return 1 def config_checkpoint_list(lib, argv, modifiers): """ Options: no options """ del lib modifiers.ensure_only_supported() if argv: raise CmdLineInputError() try: file_list = os.listdir(settings.cib_dir) except OSError as e: utils.err("unable to list checkpoints: %s" % e) cib_list = [] cib_name_re = re.compile(r"^cib-(\d+)\.raw$") for filename in file_list: match = cib_name_re.match(filename) if not match: continue file_path = os.path.join(settings.cib_dir, filename) try: if os.path.isfile(file_path): cib_list.append( (float(os.path.getmtime(file_path)), match.group(1)) ) except OSError: pass cib_list.sort() if not cib_list: print("No checkpoints available") return for cib_info in cib_list: print( "checkpoint %s: date %s" % (cib_info[1], datetime.datetime.fromtimestamp(round(cib_info[0]))) ) def _checkpoint_to_lines(lib, checkpoint_number): # backup current settings orig_usefile = utils.usefile orig_filename = utils.filename orig_middleware = lib.middleware_factory # configure old code to read the CIB from a file utils.usefile = True utils.filename = os.path.join( settings.cib_dir, "cib-%s.raw" % checkpoint_number ) # configure new code to read the CIB from a file lib.middleware_factory = orig_middleware._replace( cib=middleware.cib(utils.filename, utils.touch_cib_file) ) # export the CIB to text result = False, [] if os.path.isfile(utils.filename): result = True, _config_show_cib_lines(lib) # restore original settings utils.usefile = orig_usefile utils.filename = orig_filename lib.middleware_factory = orig_middleware return result def config_checkpoint_view(lib, argv, modifiers): """ Options: no options """ modifiers.ensure_only_supported() if len(argv) != 1: usage.config(["checkpoint view"]) sys.exit(1) loaded, lines = _checkpoint_to_lines(lib, argv[0]) if not loaded: utils.err("unable to read the checkpoint") print("\n".join(lines)) def config_checkpoint_diff(lib, argv, modifiers): """ Commandline options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 2: usage.config(["checkpoint diff"]) sys.exit(1) if argv[0] == argv[1]: utils.err("cannot diff a checkpoint against itself") errors = [] checkpoints_lines = [] for checkpoint in argv: if checkpoint == "live": lines = _config_show_cib_lines(lib) if not lines: errors.append("unable to read live configuration") else: checkpoints_lines.append(lines) else: loaded, lines = _checkpoint_to_lines(lib, checkpoint) if not loaded: errors.append( "unable to read checkpoint '{0}'".format(checkpoint) ) else: checkpoints_lines.append(lines) if errors: utils.err("\n".join(errors)) print("Differences between {0} (-) and {1} (+):".format(*[ "live configuration" if label == "live" else f"checkpoint {label}" for label in argv ])) print("\n".join([ line.rstrip() for line in difflib.Differ().compare( checkpoints_lines[0], checkpoints_lines[1] )] )) def config_checkpoint_restore(lib, argv, modifiers): """ Options: * -f - CIB file, a checkpoint will be restored into a specified file """ # pylint: disable=broad-except del lib modifiers.ensure_only_supported("-f") if len(argv) != 1: usage.config(["checkpoint restore"]) sys.exit(1) cib_path = os.path.join(settings.cib_dir, "cib-%s.raw" % argv[0]) try: snapshot_dom = parse(cib_path) except Exception as e: utils.err("unable to read the checkpoint: %s" % e) utils.replace_cib_configuration(snapshot_dom) def config_import_cman(lib, argv, modifiers): """ Options: * --force - skip checks, overwrite files * --interactive - interactive issue resolving * --request-timeout - effective only when ouput is not specified """ # pylint: disable=no-member del lib modifiers.ensure_only_supported( "--force", "interactive", "--request-timeout", ) if no_clufter: utils.err( "Unable to perform a CMAN cluster conversion due to missing " "python-clufter package" ) clufter_supports_corosync3 = hasattr(clufter.facts, "cluster_pcs_camelback") # prepare convertor options cluster_conf = settings.cluster_conf_file dry_run_output = None output_format = "corosync.conf" dist = None invalid_args = False for arg in argv: if "=" in arg: name, value = arg.split("=", 1) if name == "input": cluster_conf = value elif name == "output": dry_run_output = value elif name == "output-format": if value in ( "corosync.conf", "pcs-commands", "pcs-commands-verbose", ): output_format = value else: invalid_args = True elif name == "dist": dist = value else: invalid_args = True else: invalid_args = True if ( output_format not in ("pcs-commands", "pcs-commands-verbose") and (dry_run_output and not dry_run_output.endswith(".tar.bz2")) ): dry_run_output += ".tar.bz2" if invalid_args or not dry_run_output: usage.config(["import-cman"]) sys.exit(1) debug = modifiers.get("--debug") force = modifiers.get("--force") interactive = modifiers.get("--interactive") if dist is not None: if not clufter_supports_corosync3: utils.err( "Unable to perform a CMAN cluster conversion due to clufter " "not supporting Corosync 3. Please, upgrade clufter packages." ) if not clufter.facts.cluster_pcs_camelback("linux", dist.split(",")): utils.err("dist does not match output-format") elif output_format == "corosync.conf": dist = _get_linux_dist() else: # for output-format=pcs-command[-verbose] dist = _get_linux_dist() clufter_args = { "input": str(cluster_conf), "cib": {"passin": "bytestring"}, "nocheck": force, "batch": True, "sys": "linux", "dist": dist, } if interactive: if "EDITOR" not in os.environ: utils.err("$EDITOR environment variable is not set") clufter_args["batch"] = False clufter_args["editor"] = os.environ["EDITOR"] if debug: logging.getLogger("clufter").setLevel(logging.DEBUG) if output_format == "corosync.conf": clufter_args["coro"] = {"passin": "struct"} cmd_name = "ccs2pcs-camelback" elif output_format in ("pcs-commands", "pcs-commands-verbose"): clufter_args["output"] = {"passin": "bytestring"} clufter_args["start_wait"] = "60" clufter_args["tmp_cib"] = "tmp-cib.xml" clufter_args["force"] = force clufter_args["text_width"] = "80" clufter_args["silent"] = True clufter_args["noguidance"] = True if output_format == "pcs-commands-verbose": clufter_args["text_width"] = "-1" clufter_args["silent"] = False clufter_args["noguidance"] = False if clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")): cmd_name = "ccs2pcscmd-flatiron" elif clufter.facts.cluster_pcs_needle("linux", dist.split(",")): cmd_name = "ccs2pcscmd-needle" elif ( clufter_supports_corosync3 and clufter.facts.cluster_pcs_camelback("linux", dist.split(",")) ): cmd_name = "ccs2pcscmd-camelback" else: utils.err( "unrecognized dist, try something recognized" + " (e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty)" ) clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args) # run convertor run_clufter( cmd_name, clufter_args_obj, debug, force, "Error: unable to import cluster configuration" ) # save commands if output_format in ("pcs-commands", "pcs-commands-verbose"): ok, message = utils.write_file( dry_run_output, clufter_args_obj.output["passout"].decode() ) if not ok: utils.err(message) return # put new config files into tarball file_list = config_backup_path_list() for file_item in file_list.values(): file_item["attrs"]["uname"] = "root" file_item["attrs"]["gname"] = "root" file_item["attrs"]["uid"] = 0 file_item["attrs"]["gid"] = 0 file_item["attrs"]["mode"] = 0o600 tar_data = BytesIO() try: tarball = tarfile.open(fileobj=tar_data, mode="w|bz2") config_backup_add_version_to_tarball(tarball) utils.tar_add_file_data( tarball, clufter_args_obj.cib["passout"], "cib.xml", **file_list["cib.xml"]["attrs"] ) # put uidgid into separate files fmt_simpleconfig = clufter.format_manager.FormatManager.init_lookup( 'simpleconfig' ).plugins['simpleconfig'] corosync_struct = [] uidgid_list = [] for section in clufter_args_obj.coro["passout"][2]: if section[0] == "uidgid": uidgid_list.append(section[1]) else: corosync_struct.append(section) corosync_conf_data = fmt_simpleconfig( "struct", ("corosync", (), corosync_struct) )("bytestring") utils.tar_add_file_data( tarball, corosync_conf_data, "corosync.conf", **file_list["corosync.conf"]["attrs"] ) for uidgid in uidgid_list: uid = "" gid = "" for item in uidgid: if item[0] == "uid": uid = item[1] if item[0] == "gid": gid = item[1] filename = utils.get_uid_gid_file_name(uid, gid) uidgid_data = fmt_simpleconfig( "struct", ("corosync", (), [("uidgid", uidgid, None)]) )("bytestring") utils.tar_add_file_data( tarball, uidgid_data, "uidgid.d/" + filename, **file_list["uidgid.d"]["attrs"] ) tarball.close() except (tarfile.TarError, EnvironmentError) as e: utils.err("unable to create tarball: %s" % e) tar_data.seek(0) #save tarball / remote restore if dry_run_output: ok, message = utils.write_file( dry_run_output, tar_data.read(), permissions=0o600, binary=True ) if not ok: utils.err(message) else: config_restore_remote(None, tar_data) tar_data.close() def _get_linux_dist(): # pylint: disable=deprecated-method return ",".join(platform.linux_distribution(full_distribution_name=0)) def config_export_pcs_commands(lib, argv, modifiers, verbose=False): """ Options: * --force - skip checks, overwrite files * --interactive - interactive issue resolving * -f - CIB file * --corosync_conf """ del lib modifiers.ensure_only_supported( "--force", "--interactive", "-f", "--corosync_conf" ) if no_clufter: utils.err( "Unable to perform export due to missing python-clufter package" ) # parse options debug = modifiers.get("--debug") force = modifiers.get("--force") interactive = modifiers.get("--interactive") invalid_args = False output_file = None dist = None for arg in argv: if "=" in arg: name, value = arg.split("=", 1) if name == "output": output_file = value elif name == "dist": dist = value else: invalid_args = True else: invalid_args = True # check options if invalid_args: usage.config(["export pcs-commands"]) sys.exit(1) # complete optional options if dist is None: dist = _get_linux_dist() # prepare convertor options clufter_args = { "nocheck": force, "batch": True, "sys": "linux", "dist": dist, "coro": settings.corosync_conf_file, "start_wait": "60", "tmp_cib": "tmp-cib.xml", "force": force, "text_width": "80", "silent": True, "noguidance": True, } if output_file: clufter_args["output"] = {"passin": "bytestring"} else: clufter_args["output"] = "-" if interactive: if "EDITOR" not in os.environ: utils.err("$EDITOR environment variable is not set") clufter_args["batch"] = False clufter_args["editor"] = os.environ["EDITOR"] if debug: logging.getLogger("clufter").setLevel(logging.DEBUG) if utils.usefile: clufter_args["cib"] = os.path.abspath(utils.filename) else: clufter_args["cib"] = ("bytestring", utils.get_cib()) if verbose: clufter_args["text_width"] = "-1" clufter_args["silent"] = False clufter_args["noguidance"] = False clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args) cmd_name = "pcs2pcscmd-camelback" # run convertor run_clufter( cmd_name, clufter_args_obj, debug, force, "Error: unable to export cluster configuration" ) # save commands if not printed to stdout by clufter if output_file: # pylint: disable=no-member ok, message = utils.write_file( output_file, clufter_args_obj.output["passout"].decode() ) if not ok: utils.err(message) def run_clufter(cmd_name, cmd_args, debug, force, err_prefix): """ Commandline options: no options used but messages which include --force, --debug and --interactive are generated """ # pylint: disable=broad-except try: result = None cmd_manager = clufter.command_manager.CommandManager.init_lookup( cmd_name ) result = cmd_manager.commands[cmd_name](cmd_args) error_message = "" except Exception as e: error_message = str(e) if error_message or result != 0: hints = [] hints.append("--interactive to solve the issues manually") if not debug: hints.append("--debug to get more information") if not force: hints.append("--force to override") hints_string = "\nTry using %s." % ", ".join(hints) if hints else "" sys.stderr.write( err_prefix + (": %s" % error_message if error_message else "") + hints_string + "\n" ) sys.exit(1 if result is None else result) pcs-0.10.4/pcs/constraint.py000066400000000000000000001436621356771603100157310ustar00rootroot00000000000000import sys from collections import defaultdict from os.path import isfile import xml.dom.minidom from xml.dom.minidom import parseString from enum import Enum from pcs import ( rule as rule_utils, settings, usage, utils, ) from pcs.cli import ( constraint_colocation, constraint_order, ) from pcs.cli.common import parse_args from pcs.cli.common.console_report import warn from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.reports import process_library_reports import pcs.cli.constraint_colocation.command as colocation_command import pcs.cli.constraint_order.command as order_command from pcs.cli.constraint_ticket import command as ticket_command from pcs.common import report_codes from pcs.lib import reports from pcs.lib.cib.constraint import resource_set from pcs.lib.cib.constraint.order import ATTRIB as order_attrib from pcs.lib.node import get_existing_nodes_names from pcs.lib.pacemaker.values import ( RESOURCE_ROLES, sanitize_id, SCORE_INFINITY, ) # pylint: disable=too-many-branches, too-many-statements # pylint: disable=invalid-name, too-many-nested-blocks # pylint: disable=too-many-locals, too-many-lines OPTIONS_ACTION = resource_set.ATTRIB["action"] DEFAULT_ACTION = "start" DEFAULT_ROLE = "Started" OPTIONS_SYMMETRICAL = order_attrib["symmetrical"] OPTIONS_KIND = order_attrib["kind"] LOCATION_NODE_VALIDATION_SKIP_MSG = ( "Validation for node existence in the cluster will be skipped" ) CRM_RULE_MISSING_MSG = ( "Warning: crm_rule is not available, therefore expired constraints may be " "shown. Consider upgrading pacemaker.\n" ) RESOURCE_TYPE_RESOURCE = "resource" RESOURCE_TYPE_REGEXP = "regexp" RULE_IN_EFFECT = "in effect" RULE_EXPIRED = "expired" RULE_NOT_IN_EFFECT = "not yet in effect" RULE_UNKNOWN_STATUS = "unknown status" class CrmRuleReturnCode(Enum): IN_EFFECT = 0 EXPIRED = 110 TO_BE_IN_EFFECT = 111 def constraint_location_cmd(lib, argv, modifiers): if not argv: sub_cmd = "show" else: sub_cmd = argv.pop(0) try: if sub_cmd == "add": location_add(lib, argv, modifiers) elif sub_cmd in ["remove", "delete"]: location_remove(lib, argv, modifiers) elif sub_cmd == "show": location_show(lib, argv, modifiers) elif len(argv) >= 2: if argv[0] == "rule": location_rule(lib, [sub_cmd] + argv, modifiers) else: location_prefer(lib, [sub_cmd] + argv, modifiers) else: raise CmdLineInputError() except CmdLineInputError as e: utils.exit_on_cmdline_input_errror( e, "constraint", ["location", sub_cmd] ) def constraint_order_cmd(lib, argv, modifiers): if not argv: sub_cmd = "show" else: sub_cmd = argv.pop(0) try: if sub_cmd == "set": order_command.create_with_set(lib, argv, modifiers) elif sub_cmd in ["remove", "delete"]: order_rm(lib, argv, modifiers) elif sub_cmd == "show": order_command.show(lib, argv, modifiers) else: order_start(lib, [sub_cmd] + argv, modifiers) except CmdLineInputError as e: utils.exit_on_cmdline_input_errror( e, "constraint", ["order", sub_cmd] ) def constraint_show(lib, argv, modifiers): """ Options: * --all - print expired constraints * -f - CIB file * --full """ location_show(lib, argv, modifiers) order_command.show(lib, argv, modifiers.get_subset("--full", "-f")) colocation_command.show(lib, argv, modifiers.get_subset("--full", "-f")) ticket_command.show(lib, argv, modifiers.get_subset("--full", "-f")) def colocation_rm(lib, argv, modifiers): """ Options: * -f - CIB file """ del lib modifiers.ensure_only_supported("-f") elementFound = False if len(argv) < 2: raise CmdLineInputError() (dom, constraintsElement) = getCurrentConstraints() resource1 = argv[0] resource2 = argv[1] for co_loc in constraintsElement.getElementsByTagName('rsc_colocation')[:]: if ( co_loc.getAttribute("rsc") == resource1 and co_loc.getAttribute("with-rsc") == resource2 ): constraintsElement.removeChild(co_loc) elementFound = True if ( co_loc.getAttribute("rsc") == resource2 and co_loc.getAttribute("with-rsc") == resource1 ): constraintsElement.removeChild(co_loc) elementFound = True if elementFound: utils.replace_cib_configuration(dom) else: print("No matching resources found in ordering list") def _validate_constraint_resource(cib_dom, resource_id): resource_valid, resource_error, dummy_correct_id \ = utils.validate_constraint_resource(cib_dom, resource_id) if not resource_valid: utils.err(resource_error) # Syntax: colocation add [role] with [role] [score] [options] # possible commands: # with [score] [options] # with [score] [options] # with [score] [options] # with [score] [options] def colocation_add(lib, argv, modifiers): """ Options: * -f - CIB file * --force - allow constraint on any resource, allow duplicate constraints """ def _parse_score_options(argv): # When passed an array of arguments if the first argument doesn't have # an '=' then it's the score, otherwise they're all arguments. Return a # tuple with the score and array of name,value pairs """ Commandline options: no options """ if not argv: return SCORE_INFINITY, [] score = SCORE_INFINITY if "=" in argv[0] else argv.pop(0) # create a list of 2-tuples (name, value) arg_array = [ parse_args.split_option(arg, allow_empty_value=False) for arg in argv ] return score, arg_array def _validate_and_prepare_role(role): role_cleaned = role.lower().capitalize() if role_cleaned not in RESOURCE_ROLES: utils.err( "invalid role value '{0}', allowed values are: '{1}'".format( role, "', '".join(RESOURCE_ROLES) ) ) return role_cleaned del lib modifiers.ensure_only_supported("-f", "--force") if len(argv) < 3: raise CmdLineInputError() role1 = "" role2 = "" if argv[2] == "with": role1 = _validate_and_prepare_role(argv.pop(0)) resource1 = argv.pop(0) elif argv[1] == "with": resource1 = argv.pop(0) else: raise CmdLineInputError() if argv.pop(0) != "with": raise CmdLineInputError() if "with" in argv: raise CmdLineInputError( message="Multiple 'with's cannot be specified.", hint=( "Use the 'pcs constraint colocation set' command if you want " "to create a constraint for more than two resources." ), show_both_usage_and_message=True ) if not argv: raise CmdLineInputError() if len(argv) == 1: resource2 = argv.pop(0) else: if utils.is_score_or_opt(argv[1]): resource2 = argv.pop(0) else: role2 = _validate_and_prepare_role(argv.pop(0)) resource2 = argv.pop(0) score, nv_pairs = _parse_score_options(argv) cib_dom = utils.get_cib_dom() _validate_constraint_resource(cib_dom, resource1) _validate_constraint_resource(cib_dom, resource2) id_in_nvpairs = None for name, value in nv_pairs: if name == "id": id_valid, id_error = utils.validate_xml_id(value, 'constraint id') if not id_valid: utils.err(id_error) if utils.does_id_exist(cib_dom, value): utils.err( "id '%s' is already in use, please specify another one" % value ) id_in_nvpairs = True if not id_in_nvpairs: nv_pairs.append(( "id", utils.find_unique_id( cib_dom, "colocation-%s-%s-%s" % (resource1, resource2, score) ) )) (dom, constraintsElement) = getCurrentConstraints(cib_dom) # If one role is specified, the other should default to "started" if role1 != "" and role2 == "": role2 = DEFAULT_ROLE if role2 != "" and role1 == "": role1 = DEFAULT_ROLE element = dom.createElement("rsc_colocation") element.setAttribute("rsc", resource1) element.setAttribute("with-rsc", resource2) element.setAttribute("score", score) if role1 != "": element.setAttribute("rsc-role", role1) if role2 != "": element.setAttribute("with-rsc-role", role2) for nv_pair in nv_pairs: element.setAttribute(nv_pair[0], nv_pair[1]) if not modifiers.get("--force"): duplicates = colocation_find_duplicates(constraintsElement, element) if duplicates: utils.err( "duplicate constraint already exists, use --force to override\n" + "\n".join([ " " + constraint_colocation.console_report.constraint_plain( {"options": dict(dup.attributes.items())}, True ) for dup in duplicates ]) ) constraintsElement.appendChild(element) utils.replace_cib_configuration(dom) def colocation_find_duplicates(dom, constraint_el): """ Commandline options: no options """ def normalize(const_el): return ( const_el.getAttribute("rsc"), const_el.getAttribute("with-rsc"), const_el.getAttribute("rsc-role").capitalize() or DEFAULT_ROLE, const_el.getAttribute("with-rsc-role").capitalize() or DEFAULT_ROLE, ) normalized_el = normalize(constraint_el) return [ other_el for other_el in dom.getElementsByTagName("rsc_colocation") if not other_el.getElementsByTagName("resource_set") and constraint_el is not other_el and normalized_el == normalize(other_el) ] def order_rm(lib, argv, modifiers): """ Options: * -f - CIB file """ del lib modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() elementFound = False (dom, constraintsElement) = getCurrentConstraints() for resource in argv: for ord_loc in constraintsElement.getElementsByTagName('rsc_order')[:]: if ( ord_loc.getAttribute("first") == resource or ord_loc.getAttribute("then") == resource ): constraintsElement.removeChild(ord_loc) elementFound = True resource_refs_to_remove = [] for ord_set in constraintsElement.getElementsByTagName('resource_ref'): if ord_set.getAttribute("id") == resource: resource_refs_to_remove.append(ord_set) elementFound = True for res_ref in resource_refs_to_remove: res_set = res_ref.parentNode res_order = res_set.parentNode res_ref.parentNode.removeChild(res_ref) if not res_set.getElementsByTagName('resource_ref'): res_set.parentNode.removeChild(res_set) if not res_order.getElementsByTagName('resource_set'): res_order.parentNode.removeChild(res_order) if elementFound: utils.replace_cib_configuration(dom) else: utils.err("No matching resources found in ordering list") def order_start(lib, argv, modifiers): """ Options: * -f - CIB file * --force - allow constraint for any resource, allow duplicate constraints """ del lib modifiers.ensure_only_supported("-f", "--force") if len(argv) < 3: raise CmdLineInputError() first_action = DEFAULT_ACTION then_action = DEFAULT_ACTION action = argv[0] if action in OPTIONS_ACTION: first_action = action argv.pop(0) resource1 = argv.pop(0) if argv.pop(0) != "then": raise CmdLineInputError() if not argv: raise CmdLineInputError() action = argv[0] if action in OPTIONS_ACTION: then_action = action argv.pop(0) if not argv: raise CmdLineInputError() resource2 = argv.pop(0) order_options = [] if argv: order_options = order_options + argv[:] if "then" in order_options: raise CmdLineInputError( message="Multiple 'then's cannot be specified.", hint=( "Use the 'pcs constraint order set' command if you want to " "create a constraint for more than two resources." ), show_both_usage_and_message=True ) order_options.append("first-action="+first_action) order_options.append("then-action="+then_action) _order_add(resource1, resource2, order_options, modifiers) def _order_add(resource1, resource2, options_list, modifiers): """ Commandline options: * -f - CIB file * --force - allow constraint for any resource, allow duplicate constraints """ cib_dom = utils.get_cib_dom() _validate_constraint_resource(cib_dom, resource1) _validate_constraint_resource(cib_dom, resource2) order_options = [] id_specified = False sym = None for arg in options_list: if arg == "symmetrical": sym = "true" elif arg == "nonsymmetrical": sym = "false" else: name, value = parse_args.split_option(arg, allow_empty_value=False) if name == "id": id_valid, id_error = utils.validate_xml_id( value, 'constraint id' ) if not id_valid: utils.err(id_error) if utils.does_id_exist(cib_dom, value): utils.err( "id '%s' is already in use, please specify another one" % value ) id_specified = True order_options.append((name, value)) elif name == "symmetrical": if value.lower() in OPTIONS_SYMMETRICAL: sym = value.lower() else: utils.err( "invalid symmetrical value '%s', allowed values are: %s" % (value, ", ".join(OPTIONS_SYMMETRICAL)) ) else: order_options.append((name, value)) if sym: order_options.append(("symmetrical", sym)) options = "" if order_options: options = " (Options: %s)" % " ".join([ "%s=%s" % (name, value) for name, value in order_options if name not in ("kind", "score") ]) scorekind = "kind: Mandatory" id_suffix = "mandatory" for opt in order_options: if opt[0] == "score": scorekind = "score: " + opt[1] id_suffix = opt[1] break if opt[0] == "kind": scorekind = "kind: " + opt[1] id_suffix = opt[1] break if not id_specified: order_id = "order-" + resource1 + "-" + resource2 + "-" + id_suffix order_id = utils.find_unique_id(cib_dom, order_id) order_options.append(("id", order_id)) (dom, constraintsElement) = getCurrentConstraints() element = dom.createElement("rsc_order") element.setAttribute("first", resource1) element.setAttribute("then", resource2) for order_opt in order_options: element.setAttribute(order_opt[0], order_opt[1]) constraintsElement.appendChild(element) if not modifiers.get("--force"): duplicates = order_find_duplicates(constraintsElement, element) if duplicates: utils.err( "duplicate constraint already exists, use --force to override\n" + "\n".join([ " " + constraint_order.console_report.constraint_plain( {"options": dict(dup.attributes.items())}, True ) for dup in duplicates ]) ) print( "Adding " + resource1 + " " + resource2 + " ("+scorekind+")" + options ) utils.replace_cib_configuration(dom) def order_find_duplicates(dom, constraint_el): """ Commandline options: no options """ def normalize(constraint_el): # pylint: disable=line-too-long return ( constraint_el.getAttribute("first"), constraint_el.getAttribute("then"), constraint_el.getAttribute("first-action").lower() or DEFAULT_ACTION, constraint_el.getAttribute("then-action").lower() or DEFAULT_ACTION, ) normalized_el = normalize(constraint_el) return [ other_el for other_el in dom.getElementsByTagName("rsc_order") if not other_el.getElementsByTagName("resource_set") and constraint_el is not other_el and normalized_el == normalize(other_el) ] # Show the currently configured location constraints by node or resource def location_show(lib, argv, modifiers): """ Options: * --all - print expired constraints * --full - print all details * -f - CIB file """ del lib modifiers.ensure_only_supported("-f", "--full", "--all") by_node = False if argv and argv[0] == "nodes": by_node = True if len(argv) > 1: if by_node: valid_noderes = argv[1:] else: valid_noderes = [ parse_args.parse_typed_arg( arg, [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP], RESOURCE_TYPE_RESOURCE ) for arg in argv[1:] ] else: valid_noderes = [] (dummy_dom, constraintsElement) = getCurrentConstraints() print("\n".join(location_lines( constraintsElement, showDetail=modifiers.get("--full"), byNode=by_node, valid_noderes=valid_noderes, show_expired=modifiers.get("--all"), ))) def location_lines( constraintsElement, showDetail=False, byNode=False, valid_noderes=None, show_expired=False, verify_expiration=True ): """ Commandline options: no options """ all_lines = [] nodehashon = {} nodehashoff = {} rschashon = {} rschashoff = {} ruleshash = defaultdict(list) all_loc_constraints = constraintsElement.getElementsByTagName( 'rsc_location' ) cib = utils.get_cib() if not isfile(settings.crm_rule): if verify_expiration: sys.stderr.write(CRM_RULE_MISSING_MSG) verify_expiration = False all_lines.append("Location Constraints:") for rsc_loc in all_loc_constraints: if rsc_loc.hasAttribute("rsc-pattern"): lc_rsc_type = RESOURCE_TYPE_REGEXP lc_rsc_value = rsc_loc.getAttribute("rsc-pattern") lc_name = "Resource pattern: {0}".format(lc_rsc_value) else: lc_rsc_type = RESOURCE_TYPE_RESOURCE lc_rsc_value = rsc_loc.getAttribute("rsc") lc_name = "Resource: {0}".format(lc_rsc_value) lc_rsc = lc_rsc_type, lc_rsc_value, lc_name lc_id = rsc_loc.getAttribute("id") lc_node = rsc_loc.getAttribute("node") lc_score = rsc_loc.getAttribute("score") lc_role = rsc_loc.getAttribute("role") lc_resource_discovery = rsc_loc.getAttribute("resource-discovery") for child in rsc_loc.childNodes: if child.nodeType == child.ELEMENT_NODE and child.tagName == "rule": ruleshash[lc_rsc].append(child) # NEED TO FIX FOR GROUP LOCATION CONSTRAINTS (where there are children of # rsc_location) if lc_score == "": lc_score = "0" if lc_score == "INFINITY": positive = True elif lc_score == "-INFINITY": positive = False elif int(lc_score) >= 0: positive = True else: positive = False if positive: nodeshash = nodehashon rschash = rschashon else: nodeshash = nodehashoff rschash = rschashoff hash_element = { "id": lc_id, "rsc_type": lc_rsc_type, "rsc_value": lc_rsc_value, "rsc_label": lc_name, "node": lc_node, "score": lc_score, "role": lc_role, "resource-discovery": lc_resource_discovery, } if lc_node in nodeshash: nodeshash[lc_node].append(hash_element) else: nodeshash[lc_node] = [hash_element] if lc_rsc in rschash: rschash[lc_rsc].append(hash_element) else: rschash[lc_rsc] = [hash_element] nodelist = sorted(set(list(nodehashon.keys()) + list(nodehashoff.keys()))) rsclist = sorted( set(list(rschashon.keys()) + list(rschashoff.keys())), key=lambda item: ( { RESOURCE_TYPE_RESOURCE: 1, RESOURCE_TYPE_REGEXP: 0, }[item[0]], item[1] ) ) if byNode: for node in nodelist: if valid_noderes: if node not in valid_noderes: continue all_lines.append(" Node: " + node) nodehash_label = ( (nodehashon, " Allowed to run:"), (nodehashoff, " Not allowed to run:") ) all_lines += _hashtable_to_lines( nodehash_label, "rsc_label", node, showDetail ) all_lines += _show_location_rules( ruleshash, cib, show_detail=showDetail, show_expired=show_expired, verify_expiration=verify_expiration ) else: for rsc in rsclist: rsc_lines = [] if valid_noderes: if rsc[0:2] not in valid_noderes: continue rsc_lines.append(" {0}".format(rsc[2])) rschash_label = ( (rschashon, " Enabled on:"), (rschashoff, " Disabled on:"), ) rsc_lines += _hashtable_to_lines( rschash_label, "node", rsc, showDetail ) miniruleshash = {} miniruleshash[rsc] = ruleshash[rsc] rsc_lines += _show_location_rules( miniruleshash, cib, show_detail=showDetail, show_expired=show_expired, verify_expiration=verify_expiration, noheader=True, ) # Append to all_lines only if the resource has any constraints if len(rsc_lines) > 2: all_lines += rsc_lines return all_lines def _hashtable_to_lines(hash_label, hash_type, hash_key, show_detail): hash_lines = [] for hashtable, label in hash_label: if hash_key in hashtable: labeled_lines = [] for options in hashtable[hash_key]: # Skips nodeless constraints and prints nodes/resources if not options[hash_type]: continue line_parts = [" {0}{1}".format( "Node: " if hash_type == "node" else "", options[hash_type] )] line_parts.append(f"(score:{options['score']})") if options["role"]: line_parts.append(f"(role:{options['role']})") if options["resource-discovery"]: line_parts.append( "(resource-discovery={0})".format( options["resource-discovery"] ) ) if show_detail: line_parts.append(f"(id:{options['id']})") labeled_lines.append(" ".join(line_parts)) if labeled_lines: labeled_lines.insert(0, label) hash_lines += labeled_lines return hash_lines def _show_location_rules( ruleshash, cib, show_detail, show_expired=False, verify_expiration=True, noheader=False ): """ Commandline options: no options """ all_lines = [] constraint_options = {} for rsc in sorted( ruleshash.keys(), key=lambda item: ( { RESOURCE_TYPE_RESOURCE: 1, RESOURCE_TYPE_REGEXP: 0, }[item[0]], item[1] ) ): constrainthash = defaultdict(list) if not noheader: all_lines.append(" {0}".format(rsc[2])) for rule in ruleshash[rsc]: constraint_id = rule.parentNode.getAttribute("id") constrainthash[constraint_id].append(rule) constraint_options[constraint_id] = [] if rule.parentNode.getAttribute("resource-discovery"): constraint_options[constraint_id].append( "resource-discovery=%s" % rule.parentNode.getAttribute( "resource-discovery" ) ) for constraint_id in sorted(constrainthash.keys()): if ( constraint_id in constraint_options and constraint_options[constraint_id] ): constraint_option_info = ( " (" + " ".join(constraint_options[constraint_id]) + ")" ) else: constraint_option_info = "" rule_lines = [] # When expiration check is needed, starting value should be True and # when it's not, check is skipped so the initial value must be False # to print the constraint is_constraint_expired = verify_expiration for rule in constrainthash[constraint_id]: rule_status = RULE_UNKNOWN_STATUS if verify_expiration: rule_status = _get_rule_status(rule.getAttribute("id"), cib) if rule_status != RULE_EXPIRED: is_constraint_expired = False rule_lines.append(rule_utils.ExportDetailed().get_string( rule, rule_status == RULE_EXPIRED and show_expired, show_detail, indent=" " )) if not show_expired and is_constraint_expired: continue all_lines.append( " Constraint{0}: {1}{2}".format( " (expired)" if is_constraint_expired else "", constraint_id, constraint_option_info ) ) all_lines += rule_lines return all_lines def _verify_node_name(node, existing_nodes): report_list = [] if node not in existing_nodes: report_list.append(reports.node_not_found( node, forceable=report_codes.FORCE_NODE_DOES_NOT_EXIST )) return report_list def _verify_score(score): if not utils.is_score(score): utils.err( "invalid score '%s', use integer or INFINITY or -INFINITY" % score ) def _get_rule_status(rule_id, cib): _, _, retval = utils.cmd_runner().run( [settings.crm_rule, "--check", "--rule=" + rule_id, "-X-"], cib ) translation_map = { CrmRuleReturnCode.IN_EFFECT.value: RULE_IN_EFFECT, CrmRuleReturnCode.EXPIRED.value: RULE_EXPIRED, CrmRuleReturnCode.TO_BE_IN_EFFECT.value: RULE_NOT_IN_EFFECT, } return translation_map.get(retval, RULE_UNKNOWN_STATUS) def location_prefer(lib, argv, modifiers): """ Options: * --force - allow unknown options, allow constraint for any resource type * -f - CIB file """ modifiers.ensure_only_supported("--force", "-f") rsc = argv.pop(0) prefer_option = argv.pop(0) dummy_rsc_type, rsc_value = parse_args.parse_typed_arg( rsc, [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP], RESOURCE_TYPE_RESOURCE ) if prefer_option == "prefers": prefer = True elif prefer_option == "avoids": prefer = False else: raise CmdLineInputError() skip_node_check = False if modifiers.is_specified("-f") or modifiers.get("--force"): skip_node_check = True warn(LOCATION_NODE_VALIDATION_SKIP_MSG) else: lib_env = utils.get_lib_env() existing_nodes, report_list = get_existing_nodes_names( corosync_conf=lib_env.get_corosync_conf(), cib=lib_env.get_cib(), ) if report_list: process_library_reports(report_list) report_list = [] parameters_list = [] for nodeconf in argv: nodeconf_a = nodeconf.split("=", 1) node = nodeconf_a[0] if not skip_node_check: report_list += _verify_node_name(node, existing_nodes) if len(nodeconf_a) == 1: if prefer: score = "INFINITY" else: score = "-INFINITY" else: score = nodeconf_a[1] _verify_score(score) if not prefer: if score[0] == "-": score = score[1:] else: score = "-" + score parameters_list.append([ sanitize_id(f"location-{rsc_value}-{node}-{score}"), rsc, node, score ]) if report_list: process_library_reports(report_list) modifiers = modifiers.get_subset("--force", "-f") for parameters in parameters_list: location_add(lib, parameters, modifiers, skip_score_and_node_check=True) def location_add(lib, argv, modifiers, skip_score_and_node_check=False): """ Options: * --force - allow unknown options, allow constraint for any resource type * -f - CIB file """ del lib modifiers.ensure_only_supported("--force", "-f") if len(argv) < 4: raise CmdLineInputError() constraint_id = argv.pop(0) rsc_type, rsc_value = parse_args.parse_typed_arg( argv.pop(0), [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP], RESOURCE_TYPE_RESOURCE ) node = argv.pop(0) score = argv.pop(0) options = [] # For now we only allow setting resource-discovery if argv: for arg in argv: if '=' in arg: options.append(arg.split('=', 1)) else: raise CmdLineInputError(f"bad option '{arg}'") if ( options[-1][0] != "resource-discovery" and not modifiers.get("--force") ): utils.err( "bad option '%s', use --force to override" % options[-1][0] ) # Verify that specified node exists in the cluster and score is valid if not skip_score_and_node_check: if modifiers.is_specified("-f") or modifiers.get("--force"): warn(LOCATION_NODE_VALIDATION_SKIP_MSG) else: lib_env = utils.get_lib_env() existing_nodes, report_list = get_existing_nodes_names( corosync_conf=lib_env.get_corosync_conf(), cib=lib_env.get_cib(), ) report_list = _verify_node_name(node, existing_nodes) if report_list: process_library_reports(report_list) _verify_score(score) id_valid, id_error = utils.validate_xml_id(constraint_id, 'constraint id') if not id_valid: utils.err(id_error) required_version = None if [x for x in options if x[0] == "resource-discovery"]: required_version = 2, 2, 0 if rsc_type == RESOURCE_TYPE_REGEXP: required_version = 2, 6, 0 if required_version: dom = utils.cluster_upgrade_to_version(required_version) else: dom = utils.get_cib_dom() if rsc_type == RESOURCE_TYPE_RESOURCE: rsc_valid, rsc_error, dummy_correct_id = ( utils.validate_constraint_resource(dom, rsc_value) ) if not rsc_valid: utils.err(rsc_error) # Verify current constraint doesn't already exist # If it does we replace it with the new constraint dummy_dom, constraintsElement = getCurrentConstraints(dom) elementsToRemove = [] # If the id matches, or the rsc & node match, then we replace/remove for rsc_loc in constraintsElement.getElementsByTagName('rsc_location'): # pylint: disable=too-many-boolean-expressions if ( rsc_loc.getAttribute("id") == constraint_id or ( rsc_loc.getAttribute("node") == node and ( ( RESOURCE_TYPE_RESOURCE == rsc_type and rsc_loc.getAttribute("rsc") == rsc_value ) or ( RESOURCE_TYPE_REGEXP == rsc_type and rsc_loc.getAttribute("rsc-pattern") == rsc_value ) ) ) ): elementsToRemove.append(rsc_loc) for etr in elementsToRemove: constraintsElement.removeChild(etr) element = dom.createElement("rsc_location") element.setAttribute("id", constraint_id) if rsc_type == RESOURCE_TYPE_RESOURCE: element.setAttribute("rsc", rsc_value) elif rsc_type == RESOURCE_TYPE_REGEXP: element.setAttribute("rsc-pattern", rsc_value) element.setAttribute("node", node) element.setAttribute("score", score) for option in options: element.setAttribute(option[0], option[1]) constraintsElement.appendChild(element) utils.replace_cib_configuration(dom) def location_remove(lib, argv, modifiers): """ Options: * -f - CIB file """ # This code was originally merged in the location_add function and was # documented to take 1 or 4 arguments: # location remove [ ] # However it has always ignored all arguments but constraint id. Therefore # this command / function has no use as it can be fully replaced by "pcs # constraint remove" which also removes constraints by id. For now I keep # things as they are but we should solve this when moving these functions # to pcs.lib. del lib modifiers.ensure_only_supported("-f") if len(argv) != 1: raise CmdLineInputError() constraint_id = argv.pop(0) dom, constraintsElement = getCurrentConstraints() elementsToRemove = [] for rsc_loc in constraintsElement.getElementsByTagName('rsc_location'): if constraint_id == rsc_loc.getAttribute("id"): elementsToRemove.append(rsc_loc) if not elementsToRemove: utils.err("resource location id: " + constraint_id + " not found.") for etr in elementsToRemove: constraintsElement.removeChild(etr) utils.replace_cib_configuration(dom) def location_rule(lib, argv, modifiers): """ Options: * -f - CIB file * --force - allow constraint on any resource type, allow duplicate constraints """ del lib modifiers.ensure_only_supported("-f", "--force") if len(argv) < 3: usage.constraint(["location", "rule"]) sys.exit(1) rsc_type, rsc_value = parse_args.parse_typed_arg( argv.pop(0), [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP], RESOURCE_TYPE_RESOURCE ) argv.pop(0) # pop "rule" options, rule_argv = rule_utils.parse_argv( argv, { "constraint-id": None, "resource-discovery": None, } ) resource_discovery = ( "resource-discovery" in options and options["resource-discovery"] ) required_version = None if resource_discovery: required_version = 2, 2, 0 if rsc_type == RESOURCE_TYPE_REGEXP: required_version = 2, 6, 0 if required_version: dom = utils.cluster_upgrade_to_version(required_version) else: dom = utils.get_cib_dom() if rsc_type == RESOURCE_TYPE_RESOURCE: rsc_valid, rsc_error, dummy_correct_id = ( utils.validate_constraint_resource(dom, rsc_value) ) if not rsc_valid: utils.err(rsc_error) cib, constraints = getCurrentConstraints(dom) lc = cib.createElement("rsc_location") # If resource-discovery is specified, we use it with the rsc_location # element not the rule if resource_discovery: lc.setAttribute("resource-discovery", options.pop("resource-discovery")) constraints.appendChild(lc) if options.get("constraint-id"): id_valid, id_error = utils.validate_xml_id( options["constraint-id"], 'constraint id' ) if not id_valid: utils.err(id_error) if utils.does_id_exist(dom, options["constraint-id"]): utils.err( "id '%s' is already in use, please specify another one" % options["constraint-id"] ) lc.setAttribute("id", options["constraint-id"]) del options["constraint-id"] else: lc.setAttribute( "id", utils.find_unique_id(dom, sanitize_id("location-" + rsc_value)) ) if rsc_type == RESOURCE_TYPE_RESOURCE: lc.setAttribute("rsc", rsc_value) elif rsc_type == RESOURCE_TYPE_REGEXP: lc.setAttribute("rsc-pattern", rsc_value) rule_utils.dom_rule_add(lc, options, rule_argv) location_rule_check_duplicates(constraints, lc, modifiers.get("--force")) utils.replace_cib_configuration(cib) def location_rule_check_duplicates(dom, constraint_el, force): """ Commandline options: no options """ if not force: duplicates = location_rule_find_duplicates(dom, constraint_el) if duplicates: lines = [] for dup in duplicates: lines.append(" Constraint: %s" % dup.getAttribute("id")) for dup_rule in utils.dom_get_children_by_tag_name(dup, "rule"): lines.append(rule_utils.ExportDetailed().get_string( dup_rule, False, True, indent=" " )) utils.err( "duplicate constraint already exists, use --force to override\n" + "\n".join(lines) ) def location_rule_find_duplicates(dom, constraint_el): """ Commandline options: no options """ def normalize(constraint_el): if constraint_el.hasAttribute("rsc-pattern"): rsc = ( RESOURCE_TYPE_REGEXP, constraint_el.getAttribute("rsc-pattern") ) else: rsc = ( RESOURCE_TYPE_RESOURCE, constraint_el.getAttribute("rsc") ) return ( rsc, [ rule_utils.ExportAsExpression().get_string(rule_el, True) for rule_el in constraint_el.getElementsByTagName("rule") ] ) normalized_el = normalize(constraint_el) return [ other_el for other_el in dom.getElementsByTagName("rsc_location") if other_el.getElementsByTagName("rule") and constraint_el is not other_el and normalized_el == normalize(other_el) ] # Grabs the current constraints and returns the dom and constraint element def getCurrentConstraints(passed_dom=None): """ Commandline options: * -f - CIB file, only if passed_dom is None """ if passed_dom: dom = passed_dom else: current_constraints_xml = utils.get_cib_xpath('//constraints') if current_constraints_xml == "": utils.err("unable to process cib") # Verify current constraint doesn't already exist # If it does we replace it with the new constraint dom = parseString(current_constraints_xml) constraintsElement = dom.getElementsByTagName('constraints')[0] return (dom, constraintsElement) # If returnStatus is set, then we don't error out, we just print the error # and return false def constraint_rm( lib, argv, modifiers, returnStatus=False, constraintsElement=None, passed_dom=None, ): """ Options: * -f - CIB file, effective only if passed_dom is None """ if passed_dom is None: modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() bad_constraint = False if len(argv) != 1: for arg in argv: if not constraint_rm( lib, [arg], modifiers, returnStatus=True, passed_dom=passed_dom ): bad_constraint = True if bad_constraint: sys.exit(1) return None c_id = argv.pop(0) elementFound = False if not constraintsElement: (dom, constraintsElement) = getCurrentConstraints(passed_dom) use_cibadmin = True else: use_cibadmin = False for co in constraintsElement.childNodes[:]: if co.nodeType != xml.dom.Node.ELEMENT_NODE: continue if co.getAttribute("id") == c_id: constraintsElement.removeChild(co) elementFound = True if not elementFound: for rule in constraintsElement.getElementsByTagName("rule")[:]: if rule.getAttribute("id") == c_id: elementFound = True parent = rule.parentNode parent.removeChild(rule) if not parent.getElementsByTagName("rule"): parent.parentNode.removeChild(parent) if elementFound: if passed_dom: return dom if use_cibadmin: utils.replace_cib_configuration(dom) if returnStatus: return True else: utils.err("Unable to find constraint - '%s'" % c_id, False) if returnStatus: return False sys.exit(1) return None def constraint_ref(lib, argv, modifiers): """ Options: * -f - CIB file """ del lib modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() for arg in argv: print("Resource: %s" % arg) constraints, set_constraints = find_constraints_containing(arg) if not constraints and not set_constraints: print(" No Matches.") else: for constraint in constraints: print(" " + constraint) for constraint in sorted(set_constraints): print(" " + constraint) def remove_constraints_containing( resource_id, output=False, constraints_element=None, passed_dom=None ): """ Commandline options: * -f - CIB file, effective only if passed_dom is None """ lib = utils.get_library_wrapper() modifiers = utils.get_input_modifiers() constraints, set_constraints = find_constraints_containing( resource_id, passed_dom ) for c in constraints: if output: print("Removing Constraint - " + c) if constraints_element is not None: constraint_rm( lib, [c], modifiers, True, constraints_element, passed_dom=passed_dom ) else: constraint_rm(lib, [c], modifiers, passed_dom=passed_dom) if set_constraints: (dom, constraintsElement) = getCurrentConstraints(passed_dom) for c in constraintsElement.getElementsByTagName("resource_ref")[:]: # If resource id is in a set, remove it from the set, if the set # is empty, then we remove the set, if the parent of the set # is empty then we remove it if c.getAttribute("id") == resource_id: pn = c.parentNode pn.removeChild(c) if output: print( "Removing %s from set %s" % (resource_id, pn.getAttribute("id")) ) if pn.getElementsByTagName("resource_ref").length == 0: print("Removing set %s" % pn.getAttribute("id")) pn2 = pn.parentNode pn2.removeChild(pn) if pn2.getElementsByTagName("resource_set").length == 0: pn2.parentNode.removeChild(pn2) print("Removing constraint %s" % pn2.getAttribute("id")) if passed_dom: return dom utils.replace_cib_configuration(dom) return None def find_constraints_containing(resource_id, passed_dom=None): """ Commandline options: * -f - CIB file, effective only if passed_dom is None """ if passed_dom: dom = passed_dom else: dom = utils.get_cib_dom() constraints_found = [] set_constraints = [] resources = dom.getElementsByTagName("primitive") resource_match = None for res in resources: if res.getAttribute("id") == resource_id: resource_match = res break if resource_match: if ( resource_match.parentNode.tagName == "master" or resource_match.parentNode.tagName == "clone" ): constraints_found, set_constraints = find_constraints_containing( resource_match.parentNode.getAttribute("id"), dom ) constraints = dom.getElementsByTagName("constraints") if not constraints: return [], [] constraints = constraints[0] myConstraints = constraints.getElementsByTagName("rsc_colocation") myConstraints += constraints.getElementsByTagName("rsc_location") myConstraints += constraints.getElementsByTagName("rsc_order") myConstraints += constraints.getElementsByTagName("rsc_ticket") attr_to_match = ["rsc", "first", "then", "with-rsc", "first", "then"] for c in myConstraints: for attr in attr_to_match: if c.getAttribute(attr) == resource_id: constraints_found.append(c.getAttribute("id")) break setConstraints = constraints.getElementsByTagName("resource_ref") for c in setConstraints: if c.getAttribute("id") == resource_id: set_constraints.append(c.parentNode.parentNode.getAttribute("id")) # Remove duplicates set_constraints = list(set(set_constraints)) return constraints_found, set_constraints def remove_constraints_containing_node(dom, node, output=False): """ Commandline options: no options """ for constraint in find_constraints_containing_node(dom, node): if output: print("Removing Constraint - %s" % constraint.getAttribute("id")) constraint.parentNode.removeChild(constraint) return dom def find_constraints_containing_node(dom, node): """ Commandline options: no options """ return [ constraint for constraint in dom.getElementsByTagName("rsc_location") if constraint.getAttribute("node") == node ] # Re-assign any constraints referencing a resource to its parent (a clone # or master) def constraint_resource_update(old_id, dom): """ Commandline options: no options """ new_id = None clone_ms_parent = utils.dom_get_resource_clone_ms_parent(dom, old_id) if clone_ms_parent: new_id = clone_ms_parent.getAttribute("id") if new_id: constraints = dom.getElementsByTagName("rsc_location") constraints += dom.getElementsByTagName("rsc_order") constraints += dom.getElementsByTagName("rsc_colocation") attrs_to_update = ["rsc", "first", "then", "with-rsc"] for constraint in constraints: for attr in attrs_to_update: if constraint.getAttribute(attr) == old_id: constraint.setAttribute(attr, new_id) return dom def constraint_rule(lib, argv, modifiers): """ Options: * -f - CIB file * --force - allow duplicate constraints, only for add command NOTE: modifiers check is in subcommand """ del lib if len(argv) < 2: raise CmdLineInputError() found = False command = argv.pop(0) constraint_id = None if command == "add": modifiers.ensure_only_supported("-f", "--force") constraint_id = argv.pop(0) cib = utils.get_cib_dom() constraint = utils.dom_get_element_with_id( cib.getElementsByTagName("constraints")[0], "rsc_location", constraint_id ) if not constraint: utils.err("Unable to find constraint: " + constraint_id) options, rule_argv = rule_utils.parse_argv(argv) rule_utils.dom_rule_add(constraint, options, rule_argv) location_rule_check_duplicates( cib, constraint, modifiers.get("--force") ) utils.replace_cib_configuration(cib) elif command in ["remove", "delete"]: modifiers.ensure_only_supported("-f") cib = utils.get_cib_etree() temp_id = argv.pop(0) constraints = cib.find('.//constraints') loc_cons = cib.findall(str('.//rsc_location')) for loc_con in loc_cons: for rule in loc_con: if rule.get("id") == temp_id: if len(loc_con) > 1: print("Removing Rule: {0}".format(rule.get("id"))) loc_con.remove(rule) found = True else: print( "Removing Constraint: {0}".format(loc_con.get("id")) ) constraints.remove(loc_con) found = True break if found: break if found: utils.replace_cib_configuration(cib) else: utils.err("unable to find rule with id: %s" % temp_id) else: raise CmdLineInputError() pcs-0.10.4/pcs/daemon/000077500000000000000000000000001356771603100144225ustar00rootroot00000000000000pcs-0.10.4/pcs/daemon/__init__.py000066400000000000000000000000001356771603100165210ustar00rootroot00000000000000pcs-0.10.4/pcs/daemon/app/000077500000000000000000000000001356771603100152025ustar00rootroot00000000000000pcs-0.10.4/pcs/daemon/app/__init__.py000066400000000000000000000000001356771603100173010ustar00rootroot00000000000000pcs-0.10.4/pcs/daemon/app/common.py000066400000000000000000000047751356771603100170610ustar00rootroot00000000000000from tornado.web import RequestHandler class EnhanceHeadersMixin: """ EnhanceHeadersMixin allows to add security headers to GUI urls. """ def set_strict_transport_security(self): # rhbz 1558063 # The HTTP Strict-Transport-Security response header (often abbreviated # as HSTS) lets a web site tell browsers that it should only be # accessed using HTTPS, instead of using HTTP. self.set_header("Strict-Transport-Security", "max-age=604800") def set_header_nosniff_content_type(self): # The X-Content-Type-Options response HTTP header is a marker used by # the server to indicate that the MIME types advertised in the # Content-Type headers should not be changed and be followed. This # allows to opt-out of MIME type sniffing, or, in other words, it is a # way to say that the webmasters knew what they were doing. self.set_header("X-Content-Type-Options", "nosniff") def enhance_headers(self): self.set_header_nosniff_content_type() # The X-Frame-Options HTTP response header can be used to indicate # whether or not a browser should be allowed to render a page in a # ,