pax_global_header00006660000000000000000000000064141270636460014523gustar00rootroot0000000000000052 comment=a64a2450bd868120c6b098c5e418db970efadb9d pcs-0.10.11/000077500000000000000000000000001412706364600124505ustar00rootroot00000000000000pcs-0.10.11/.eslintrc000066400000000000000000000001731412706364600142750ustar00rootroot00000000000000{ "parser": "babel-eslint", "rules": { "semi": [2, "always", { "omitLastInOneLineBlock": true, }], } } pcs-0.10.11/.gitarchivever000066400000000000000000000000571412706364600153150ustar00rootroot00000000000000ref names: (tag: v0.10.11, refs/pull/385/head) pcs-0.10.11/.gitattributes000066400000000000000000000000661412706364600153450ustar00rootroot00000000000000configure.ac export-subst .gitarchivever export-subst pcs-0.10.11/.gitignore000066400000000000000000000015051412706364600144410ustar00rootroot00000000000000*.pyc *.swp pcs.spec /MANIFEST /dist/ /pcsd/logrotate/pcsd pcs/snmp/pcs_snmp_agent pcs/snmp/settings.py pcs/snmp/pcs_snmp_agent.service /pcsd/pcsd.service /pcsd/pcsd-ruby.service /pcs_bundled/ /.bundle /pcsd/vendor /pcsd/public/ui /Gemfile* /scripts/pcsd.sh pcs-* .mypy_cache/ requirements.txt setup.py setup.cfg pcs/pcs pcs/pcs_internal pcs/settings.py pcs_test/pcs_for_tests pcs_test/settings.py pcs_test/suite pcs_test/smoke.sh pcs_test/tools/bin_mock/pcmk/crm_resource pcs_test/resources/*.tmp pcs_test/resources/temp*.xml pcs_test/resources/temp* *.8 pcsd/pcsd pcsd/pcsd-cli.rb pcsd/settings.rb build/ *.egg-info rpm/*.gem rpm/*.tar* rpm/*.rpm stamps* aclocal.m4 autom4te.cache/ compile config.guess config.sub configure install-sh ltmain.sh missing Makefile Makefile.in config.log config.status libtool m4/lt* m4/lib* .version pcs-0.10.11/.gitlab-ci.yml000066400000000000000000000110051412706364600151010ustar00rootroot00000000000000image: fedora${CI_PCS_FC_IMG_TAG} variables: PYTHON_DEPENDENCIES: " python3-cryptography python3-devel python3-lxml python3-pycurl python3-pyparsing python3-setuptools python3-setuptools_scm python3-wheel " RUBY_GEMS: " rubygem-backports rubygem-ethon rubygem-ffi rubygem-io-console rubygem-json rubygem-open4 rubygem-rack rubygem-rack-protection rubygem-rack-test rubygem-rexml rubygem-sinatra rubygem-test-unit rubygem-thin rubygem-tilt rubygem-webrick " COMMON_DEPENDENCIES: " autoconf automake coreutils curl findutils git make sed systemd tar time wget python3-pip pkgconf-pkg-config ruby ruby-devel rubygem-bundler " COMMON_BUILD_DEPENDENCIES: " fontconfig gcc gcc-c++ psmisc redhat-rpm-config ${COMMON_DEPENDENCIES} ${PYTHON_DEPENDENCIES} " APP_VERSION: "0.0.1.ci" DIST_VERSION: "${APP_VERSION}.${CI_PROJECT_NAMESPACE}-${CI_COMMIT_SHORT_SHA}" RPM_VERSION: "${APP_VERSION}-1.${CI_PROJECT_NAMESPACE}.$CI_COMMIT_SHORT_SHA" default: before_script: - "echo ${DIST_VERSION} > .tarball-version" - cat .tarball-version stages: - stage1 - stage2 rpm_build: stage: stage1 script: - "dnf install -y ${COMMON_BUILD_DEPENDENCIES} dnf-plugins-core rpm-build rpmdevtools " - ./autogen.sh - ./configure --enable-local-build - make rpm/pcs.spec - dnf builddep -y rpm/pcs.spec - make rpm - mkdir -p rpms && cp -v $(find rpm -type f -name '*.rpm' -not -name '*.src.rpm') rpms artifacts: expire_in: 1 week paths: - rpms distcheck: stage: stage1 script: - "dnf install -y ${COMMON_BUILD_DEPENDENCIES} ${RUBY_GEMS} bzip2 xz " - "pip3 install dacite tornado pyagentx " - ./autogen.sh - ./configure --enable-local-build - make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-local-build' - mkdir -p dist && cp -v pcs*.tar.* dist/ artifacts: expire_in: 1 week paths: - dist black: stage: stage1 script: - dnf install -y ${COMMON_DEPENDENCIES} - python3 -m pip install --upgrade -r dev_requirements.txt - ./autogen.sh - ./configure --enable-local-build --enable-dev-tests --enable-tests-only - make black_check pylint: stage: stage1 script: - dnf install -y ${COMMON_BUILD_DEPENDENCIES} - python3 -m pip install --upgrade -r dev_requirements.txt - ./autogen.sh - ./configure --enable-local-build --enable-dev-tests - make - make pylint mypy: stage: stage1 script: - dnf install -y ${COMMON_BUILD_DEPENDENCIES} - python3 -m pip install --upgrade -r dev_requirements.txt - ./autogen.sh - ./configure --enable-local-build --enable-dev-tests - make - make mypy ruby_tests: stage: stage1 script: - dnf install -y ${COMMON_BUILD_DEPENDENCIES} - ./autogen.sh - ./configure --enable-local-build - make - make pcsd-tests python_tier0_tests: stage: stage1 script: - dnf install -y ${COMMON_BUILD_DEPENDENCIES} - python3 -m pip install concurrencytest - ./autogen.sh - ./configure --enable-local-build - make - make tests_tier0 python_tier1_tests: stage: stage2 needs: - rpm_build script: - "dnf install -y ${COMMON_DEPENDENCIES} python3-mock fence-agents-scsi fence-agents-apc fence-agents-ipmilan fence-virt booth-site rpms/pcs-*.rpm " - python3 -m pip install concurrencytest - ./autogen.sh - ./configure --enable-local-build --enable-destructive-tests --enable-tests-only - rm -rf pcs pcsd pcs_bundled # make sure we are testing installed package - pcs_test/suite -v --installed --tier1 python_smoke_tests: stage: stage2 needs: - rpm_build script: - "dnf install -y ${COMMON_DEPENDENCIES} cracklib-dicts passwd procps-ng rpms/pcs-*.rpm " - export GEM_HOME=/usr/lib64/pcsd/vendor/bundle/ - /usr/lib64/pcsd/pcsd & # start pcsd (ruby - thin) - sleep 10 # wait for pcsd (ruby - thin) to start up properly - /usr/sbin/pcsd & # start pcsd (python - tornado) - sleep 10 # wait for pcsd (python - tornado) to start up properly - ./autogen.sh - ./configure --enable-local-build --enable-tests-only - pcs_test/smoke.sh artifacts: paths: - /var/log/pcsd/ when: on_failure pcs-0.10.11/CHANGELOG.md000066400000000000000000001617511412706364600142740ustar00rootroot00000000000000# Change Log ## [0.10.11] - 2021-10-05 ### Added - Add add/remove cli syntax for command `pcs stonith update-scsi-devices` ([rhbz#1992668]) ### Fixed - Fixed an error when creating a resource which defines 'depth' attribute for its operations ([rhbz#1998454]) - Do not unfence newly added devices on fenced cluster nodes ([rhbz#1991654]) - Fix displaying fencing levels with regular expression targets ([rhbz#1533090]) [rhbz#1533090]: https://bugzilla.redhat.com/show_bug.cgi?id=1533090 [rhbz#1991654]: https://bugzilla.redhat.com/show_bug.cgi?id=1991654 [rhbz#1992668]: https://bugzilla.redhat.com/show_bug.cgi?id=1992668 [rhbz#1998454]: https://bugzilla.redhat.com/show_bug.cgi?id=1998454 ## [0.10.10] - 2021-08-19 ### Added - Support for new role names introduced in pacemaker 2.1 ([rhbz#1885293]) ### Fixed - Traceback in some cases when --wait without timeout is used [rhbz#1885293]: https://bugzilla.redhat.com/show_bug.cgi?id=1885293 ## [0.10.9] - 2021-08-10 ### Added - Elliptic curve TLS certificates are now supported in pcsd ([ghissue#123]) - Support for corosync option `totem.block_unlisted_ips` ([rhbz#1720221]) - Support for displaying status of a single resource or tag ([rhbz#1290830]) - Support for displaying status of resources on a specified node ([rhbz#1285269]) - New option `--brief` for `pcs resource disable --safe` or its alias `pcs resource safe-disable` that only prints errors ([rhbz#1909901]) - Support for updating scsi fencing devices without affecting other resources added in the new command `pcs stonith update-scsi-devices` ([rhbz#1759995], [rhbz#1872378]) - Option `--autodelete` for `pcs resource move` command which removes a location constraint used for moving a resource, once the resource has been moved. This feature is in tech-preview state and thus may be changed in the future ([rhbz#1847102]) ### Fixed - Node attribute expressions are now correctly reported as not allowed in resource defaults rules ([rhbz#1896458]) - Upgreded to jquery 3.6.0 ([rhbz#1882291, rhbz#1886342]) - Man page and help: note that 'pcs resource unclone' accepts clone resources as well ([rhbz#1930886]) - Improved error messages when a host is found to be a part of a cluster already ([rhbz#1690419]) - `pcs cluster sync` command now warns reloading corosync config is necessary for changes to take effect ([rhbz#1750240]) - Show user friendly error if unable to delete a group (due to the group being referenced within configuration) when moving resources out of the the group. ([rhbz#1678273]) - Exit with an error if `on-fail=demote` is specified for a resource operation and pacemaker doesn't support it - The `pcs status nodes` command now correctly shows status of nodes that are both in maintenance and standby modes ([rhbz#1432097]) ### Changed - python3-openssl was replaced with python3-cryptography ([rhbz#1927404]) ### Deprecated - `pcs acl show` replaced with `pcs acl config` - `pcs alert show` replaced with `pcs alert config` - Undocumented command `pcs cluster certkey` replaced with `pcs pcsd certkey` - `pcs cluster pcsd-status` replaced with `pcs status pcsd` or `pcs pcsd status` - `pcs constraint [location | colocation | order | ticket] show | list` replaced with `pcs constraint [location | colocation | order | ticket] config` - `pcs property show`, `pcs property list` replaced with `pcs property config` - pcsd urls: `/remote/config_backup`, `/remote/node_available`, `/remote/node_restart`, `/remote/resource_status` - Undocumented syntax for constraint location rules: - `date start= gt` replaced with `date gt ` - `date end= lt` replaced with `date lt ` - `date start= end= in_range` replaced with `date in_range to ` - `operation=date_spec` replaced with `date-spec ` - converting invalid score to score-attribute=pingd - Delimiting stonith devices with a comma in `pcs stonith level add | clear | delete | remove` commands, use a space instead - `pcs stonith level delete | remove [] []...` replaced with `pcs stonith level delete | remove [target ] [stonith ]...` - `pcs stonith level clear [ | ]` replaced with `pcs stonith level clear [target | stonith ...]` - `pcs tag list` replaced with `pcs tag config` [ghissue#123]: https://github.com/ClusterLabs/pcs/issues/123 [rhbz#1285269]: https://bugzilla.redhat.com/show_bug.cgi?id=1285269 [rhbz#1290830]: https://bugzilla.redhat.com/show_bug.cgi?id=1290830 [rhbz#1432097]: https://bugzilla.redhat.com/show_bug.cgi?id=1432097 [rhbz#1678273]: https://bugzilla.redhat.com/show_bug.cgi?id=1678273 [rhbz#1690419]: https://bugzilla.redhat.com/show_bug.cgi?id=1690419 [rhbz#1720221]: https://bugzilla.redhat.com/show_bug.cgi?id=1720221 [rhbz#1750240]: https://bugzilla.redhat.com/show_bug.cgi?id=1750240 [rhbz#1759995]: https://bugzilla.redhat.com/show_bug.cgi?id=1759995 [rhbz#1847102]: https://bugzilla.redhat.com/show_bug.cgi?id=1847102 [rhbz#1872378]: https://bugzilla.redhat.com/show_bug.cgi?id=1872378 [rhbz#1882291]: https://bugzilla.redhat.com/show_bug.cgi?id=1882291 [rhbz#1886342]: https://bugzilla.redhat.com/show_bug.cgi?id=1886342 [rhbz#1896458]: https://bugzilla.redhat.com/show_bug.cgi?id=1896458 [rhbz#1909901]: https://bugzilla.redhat.com/show_bug.cgi?id=1909901 [rhbz#1927404]: https://bugzilla.redhat.com/show_bug.cgi?id=1927404 [rhbz#1930886]: https://bugzilla.redhat.com/show_bug.cgi?id=1930886 ## [0.10.8] - 2021-02-01 ### Added - Support for changing corosync configuration in an existing cluster ([rhbz#1457314], [rhbz#1667061], [rhbz#1856397], [rhbz#1774143]) - Command to show structured corosync configuration (see `pcs cluster config show` command) ([rhbz#1667066]) ### Fixed - Improved error message with a hint in `pcs cluster cib-push` ([ghissue#241]) - Option --wait was not working with pacemaker 2.0.5+ ([ghissue#260]) - Explicitly close libcurl connections to prevent stalled TCP connections in CLOSE-WAIT state ([ghissue#261], [rhbz#1885841]) - Fixed parsing negative float numbers on command line ([rhbz#1869399]) - Removed unwanted logging to system log (/var/log/messages) ([rhbz#1917286]) - Fixed rare race condition in `pcs cluster start --wait` ([rhbz#1794062]) - Better error message when unable to connect to pcsd ([rhbz#1619818]) ### Deprecated - Commands `pcs config import-cman` and `pcs config export pcs-commands|pcs-commands-verbose` have been deprecated ([rhbz#1851335]) - Entering values starting with '-' (negative numbers) without '--' on command line is now deprecated ([rhbz#1869399]) [ghissue#241]: https://github.com/ClusterLabs/pcs/issues/241 [ghissue#260]: https://github.com/ClusterLabs/pcs/issues/260 [ghissue#261]: https://github.com/ClusterLabs/pcs/issues/261 [rhbz#1457314]: https://bugzilla.redhat.com/show_bug.cgi?id=1457314 [rhbz#1619818]: https://bugzilla.redhat.com/show_bug.cgi?id=1619818 [rhbz#1667061]: https://bugzilla.redhat.com/show_bug.cgi?id=1667061 [rhbz#1667066]: https://bugzilla.redhat.com/show_bug.cgi?id=1667066 [rhbz#1774143]: https://bugzilla.redhat.com/show_bug.cgi?id=1774143 [rhbz#1794062]: https://bugzilla.redhat.com/show_bug.cgi?id=1794062 [rhbz#1851335]: https://bugzilla.redhat.com/show_bug.cgi?id=1851335 [rhbz#1856397]: https://bugzilla.redhat.com/show_bug.cgi?id=1856397 [rhbz#1869399]: https://bugzilla.redhat.com/show_bug.cgi?id=1869399 [rhbz#1885841]: https://bugzilla.redhat.com/show_bug.cgi?id=1885841 [rhbz#1917286]: https://bugzilla.redhat.com/show_bug.cgi?id=1917286 ## [0.10.7] - 2020-09-30 ### Added - Support for multiple sets of resource and operation defaults, including support for rules ([rhbz#1222691], [rhbz#1817547], [rhbz#1862966], [rhbz#1867516], [rhbz#1869399]) - Support for "demote" value of resource operation's "on-fail" option ([rhbz#1843079]) - Support for 'number' type in rules ([rhbz#1869399]) - It is possible to set custom (promotable) clone id in `pcs resource create` and `pcs resource clone/promotable` commands ([rhbz#1741056]) ### Fixed - Prevent removing non-empty tag by removing tagged resource group or clone ([rhbz#1857295]) - Clarify documentation for 'resource move' and 'resource ban' commands with regards to the 'lifetime' option. - Allow moving both promoted and demoted promotable clone resources ([rhbz#1875301]) ### Deprecated - `pcs resource [op] defaults =...` commands are deprecated now. Use `pcs resource [op] defaults update =...` if you only manage one set of defaults, or `pcs resource [op] defaults set` if you manage several sets of defaults. ([rhbz#1817547]) [rhbz#1222691]: https://bugzilla.redhat.com/show_bug.cgi?id=1222691 [rhbz#1741056]: https://bugzilla.redhat.com/show_bug.cgi?id=1741056 [rhbz#1817547]: https://bugzilla.redhat.com/show_bug.cgi?id=1817547 [rhbz#1843079]: https://bugzilla.redhat.com/show_bug.cgi?id=1843079 [rhbz#1857295]: https://bugzilla.redhat.com/show_bug.cgi?id=1857295 [rhbz#1862966]: https://bugzilla.redhat.com/show_bug.cgi?id=1862966 [rhbz#1867516]: https://bugzilla.redhat.com/show_bug.cgi?id=1867516 [rhbz#1869399]: https://bugzilla.redhat.com/show_bug.cgi?id=1869399 [rhbz#1875301]: https://bugzilla.redhat.com/show_bug.cgi?id=1875301 ## [0.10.6] - 2020-06-11 ### Security - Web UI sends HTTP headers: Content-Security-Policy, X-Frame-Options and X-Xss-Protection ### Added - When creating a cluster, verify the cluster name does not prevent mounting GFS2 volumes ([rhbz#1782553]) - An option to run 'pcs cluster setup' in a local mode (do not connect to any nodes, save corosync.conf to a specified file) ([rhbz#1839637]) - Support for pacemaker tags. Pcs provides commands for creating and removing tags, adding and/or removing IDs to/from tags, and listing current tag configuration. ([rhbz#1684676]) - Support for tag ids in commands resource enable/disable/manage/unmanage ([rhbz#1684676]) - `pcs resource [safe-]disable --simulate` has a new option `--brief` to print only a list of affected resources ([rhbz#1833114]) ### Fixed - Keep autogenerated IDs of set constraints reasonably short ([rhbz#1387358], [rhbz#1824206]) - Pcs is now compatible with Ruby 2.7 and Python 3.8. To achieve this, it newly depends on python3-distro package. - `pcs status` works on remote nodes again (broken since pcs-0.10.4) ([rhbz#1830552]) - Fixed inability to create colocation constraint from web ui ([rhbz#1832973]) - Actions going through pcsd no longer time out after 30s (broken since pcs-0.10.5) ([rhbz#1833506]) [rhbz#1387358]: https://bugzilla.redhat.com/show_bug.cgi?id=1387358 [rhbz#1684676]: https://bugzilla.redhat.com/show_bug.cgi?id=1684676 [rhbz#1782553]: https://bugzilla.redhat.com/show_bug.cgi?id=1782553 [rhbz#1824206]: https://bugzilla.redhat.com/show_bug.cgi?id=1824206 [rhbz#1830552]: https://bugzilla.redhat.com/show_bug.cgi?id=1830552 [rhbz#1832973]: https://bugzilla.redhat.com/show_bug.cgi?id=1832973 [rhbz#1833114]: https://bugzilla.redhat.com/show_bug.cgi?id=1833114 [rhbz#1833506]: https://bugzilla.redhat.com/show_bug.cgi?id=1833506 [rhbz#1839637]: https://bugzilla.redhat.com/show_bug.cgi?id=1839637 ## [0.10.5] - 2020-03-18 ### Added - It is possible to configure a disaster-recovery site and display its status ([rhbz#1676431]) ### Fixed - Error messages in cases when cluster is not set up ([rhbz#1743731]) - Improved documentation of configuring links in the 'pcs cluster setup' command - Safe-disabling clones and groups does not fail any more due to their inner resources get stopped ([rhbz#1781303]) - Booth documentation clarified ([ghissue#231]) - Detection of fence history support ([rhbz#1793574]) - Fix documentation and flags regarding bundled/cloned/grouped resources for `pcs (resource | stonith) (cleanup | refresh)` ([rhbz#1805082]) - Improved ACL documentation ([rhbz#1722970]) - Added missing Strict-Transport-Security headers to redirects ([rhbz#1810017]) - Improved pcsd daemon performance ([rhbz#1783106]) [ghissue#231]: https://github.com/ClusterLabs/pcs/issues/231 [rhbz#1676431]: https://bugzilla.redhat.com/show_bug.cgi?id=1676431 [rhbz#1722970]: https://bugzilla.redhat.com/show_bug.cgi?id=1722970 [rhbz#1743731]: https://bugzilla.redhat.com/show_bug.cgi?id=1743731 [rhbz#1781303]: https://bugzilla.redhat.com/show_bug.cgi?id=1781303 [rhbz#1783106]: https://bugzilla.redhat.com/show_bug.cgi?id=1783106 [rhbz#1793574]: https://bugzilla.redhat.com/show_bug.cgi?id=1793574 [rhbz#1805082]: https://bugzilla.redhat.com/show_bug.cgi?id=1805082 [rhbz#1810017]: https://bugzilla.redhat.com/show_bug.cgi?id=1810017 ## [0.10.4] - 2019-11-28 ### Added - New section in pcs man page summarizing changes in pcs-0.10. Commands removed or changed in pcs-0.10 print errors poiting to that section. ([rhbz#1728890]) - `pcs resource disable` can show effects of disabling resources and prevent disabling resources if any other resources would be affected ([rhbz#1631519]) - `pcs resource relations` command shows relations between resources such as ordering constraints, ordering set constraints and relations defined by resource hierarchy ([rhbz#1631514]) ### Changed - Expired location constraints are now hidden by default when listing constraints in any way. Using `--all` will list and denote them with `(expired)`. All expired rules are then marked the same way. ([rhbz#1442116]) ### Fixed - All node names and scores are validated when running `pcs constraint location avoids/prefers` before writing configuration to cib ([rhbz#1673835]) - Fixed crash when an invalid port is given in an address to the `pcs host auth` command ([rhbz#1698763]) - Command `pcs cluster verify` suggests `--full` option instead of `-V` option which is not recognized by pcs ([rhbz#1712347]) - It is now possible to authenticate remote clusters in web UI even if the local cluster is not authenticated ([rhbz#1743735]) - Documentation of `pcs constraint colocation add` ([rhbz#1734361]) - Empty constraint option are not allowed in `pcs constraint order` and `pcs constraint colocation add` commands ([rhbz#1734361]) - More fixes for the case when PATH environment variable is not set - Fixed crashes and other issues when UTF-8 characters are present in the corosync.conf file ([rhbz#1741586]) [rhbz#1442116]: https://bugzilla.redhat.com/show_bug.cgi?id=1442116 [rhbz#1631514]: https://bugzilla.redhat.com/show_bug.cgi?id=1631514 [rhbz#1631519]: https://bugzilla.redhat.com/show_bug.cgi?id=1631519 [rhbz#1673835]: https://bugzilla.redhat.com/show_bug.cgi?id=1673835 [rhbz#1698763]: https://bugzilla.redhat.com/show_bug.cgi?id=1698763 [rhbz#1712347]: https://bugzilla.redhat.com/show_bug.cgi?id=1712347 [rhbz#1728890]: https://bugzilla.redhat.com/show_bug.cgi?id=1728890 [rhbz#1734361]: https://bugzilla.redhat.com/show_bug.cgi?id=1734361 [rhbz#1741586]: https://bugzilla.redhat.com/show_bug.cgi?id=1741586 [rhbz#1743735]: https://bugzilla.redhat.com/show_bug.cgi?id=1743735 ## [0.10.3] - 2019-08-23 ### Fixed - Fixed crashes in the `pcs host auth` command ([rhbz#1676957]) - Fixed id conflict with current bundle configuration in `pcs resource bundle reset` ([rhbz#1657166]) - Options starting with - and -- are no longer ignored for non-root users (broken since pcs-0.10.2) ([rhbz#1725183]) - Fixed crashes when pcs is configured that no rubygems are bundled in pcs package ([ghissue#208]) - Standby nodes running resources are listed separately in `pcs status nodes` - Parsing arguments in the `pcs constraint order` and `pcs constraint colocation add` commands has been improved, errors which were previously silent are now reported ([rhbz#1734361]) - Fixed shebang correction in Makefile ([ghissue#206]) - Generate 256 bytes long corosync authkey, longer keys are not supported when FIPS is enabled ([rhbz#1740218]) ### Changed - Command `pcs resource bundle reset` no longer accepts the container type ([rhbz#1657166]) [ghissue#206]: https://github.com/ClusterLabs/pcs/issues/206 [ghissue#208]: https://github.com/ClusterLabs/pcs/issues/208 [rhbz#1657166]: https://bugzilla.redhat.com/show_bug.cgi?id=1657166 [rhbz#1676957]: https://bugzilla.redhat.com/show_bug.cgi?id=1676957 [rhbz#1725183]: https://bugzilla.redhat.com/show_bug.cgi?id=1725183 [rhbz#1734361]: https://bugzilla.redhat.com/show_bug.cgi?id=1734361 [rhbz#1740218]: https://bugzilla.redhat.com/show_bug.cgi?id=1740218 ## [0.10.2] - 2019-06-12 ### Added - Command `pcs config checkpoint diff` for displaying differences between two specified checkpoints ([rhbz#1655055]) - Support for resource instance attributes uniqueness check according to resource agent metadata ([rhbz#1665404]) - Command `pcs resource bundle reset` for a bundle configuration reseting ([rhbz#1657166]) - `pcs cluster setup` now checks if nodes' addresses match value of `ip_version` ([rhbz#1667053]) - Support for sbd option SBD\_TIMEOUT\_ACTION ([rhbz#1664828]) - Support for clearing expired moves and bans of resources ([rhbz#1625386]) - Commands for adding, changing and removing corosync links ([rhbz#1667058]) ### Fixed - Corosync config file parser updated and made more strict to match changes in corosync - Allow non-root users to read quorum status (commands `pcs status corosync`, `pcs status quorum`, `pcs quorum device status`, `pcs quorum status`) ([rhbz#1653316]) - Removed command `pcs resource show` dropped from usage and man page ([rhbz#1656953]) - Put proper link options' names to corosync.conf ([rhbz#1659051]) - Fixed issuses in configuring links in the 'create cluster' form in web UI ([rhbz#1664057]) - Pcs no longer removes empty `meta_attributes`, `instance_attributes` and other nvsets and similar elements from CIB. Such behavior was causing problems when pacemaker ACLs were in effect, leading to inability of pushing modified CIBs to pacemaker. ([rhbz#1659144]) - `ipv4-6` and `ipv6-4` are now valid values of `ip_version` in cluster setup ([rhbz#1667040]) - Crash when using unsupported options in commands `pcs status` and `pcs config` ([rhbz#1668422]) - `pcs resource group add` now fails gracefully instead of dumping an invalid CIB when a group ID is already occupied by a non-resource element ([rhbz#1668223]) - pcs no longer spawns unnecessary processes for reading known hosts ([rhbz#1676945]) - Lower load caused by periodical config files syncing in pcsd by making it sync less frequently ([rhbz#1676957]) - Improve logging of periodical config files syncing in pcsd - Knet link option `ip_version` has been removed, it was never supported by corosync. Transport option `ip_version` is still in place. ([rhbz#1674005]) - Several bugs in linklist validation in `pcs cluster setup` ([rhbz#1667090]) - Fixed a typo in documentation (regardles -> regardless) ([rhbz#1660702]) - Fixed pcsd crashes when non-ASCII characters are present in systemd journal - Pcs works even when PATH environment variable is not set ([rhbz#1673825]) - Fixed several "Unknown report" error messages - Pcsd SSL certificates are no longer synced across cluster nodes when creating new cluster or adding new node to an existing cluster. To enable the syncing, set `PCSD_SSL_CERT_SYNC_ENABLED` to `true` in pcsd config. ([rhbz#1673822]) - Pcs now reports missing node names in corosync.conf instead of failing silently - Fixed an issue where some pcs commands could not connect to cluster nodes over IPv6 - Fixed cluster setup problem in web UI when full domain names are used ([rhbz#1687965]) - Fixed inability to setup cluster in web UI when knet links are not specified ([rhbz#1687562]) - `--force` works correctly in `pcs quorum unblock` (broken since pcs-0.10.1) - Removed `3des` from allowed knet crypto ciphers since it is actually not supported by corosync - Improved validation of corosync options and their values ([rhbz#1679196], [rhbz#1679197]) ### Changed - Do not check whether watchdog is defined as an absolute path when enabling SBD. This check is not needed anymore as we are validating watchdog against list provided by SBD itself. ### Deprecated - Command `pcs resource show`, removed in pcs-0.10.1, has been readded as deprecated to ease transition to its replacements. It will be removed again in future. [rhbz#1661059] [rhbz#1625386]: https://bugzilla.redhat.com/show_bug.cgi?id=1625386 [rhbz#1653316]: https://bugzilla.redhat.com/show_bug.cgi?id=1653316 [rhbz#1655055]: https://bugzilla.redhat.com/show_bug.cgi?id=1655055 [rhbz#1656953]: https://bugzilla.redhat.com/show_bug.cgi?id=1656953 [rhbz#1657166]: https://bugzilla.redhat.com/show_bug.cgi?id=1657166 [rhbz#1659051]: https://bugzilla.redhat.com/show_bug.cgi?id=1659051 [rhbz#1659144]: https://bugzilla.redhat.com/show_bug.cgi?id=1659144 [rhbz#1660702]: https://bugzilla.redhat.com/show_bug.cgi?id=1660702 [rhbz#1661059]: https://bugzilla.redhat.com/show_bug.cgi?id=1661059 [rhbz#1664057]: https://bugzilla.redhat.com/show_bug.cgi?id=1664057 [rhbz#1664828]: https://bugzilla.redhat.com/show_bug.cgi?id=1664828 [rhbz#1665404]: https://bugzilla.redhat.com/show_bug.cgi?id=1665404 [rhbz#1667040]: https://bugzilla.redhat.com/show_bug.cgi?id=1667040 [rhbz#1667053]: https://bugzilla.redhat.com/show_bug.cgi?id=1667053 [rhbz#1667058]: https://bugzilla.redhat.com/show_bug.cgi?id=1667058 [rhbz#1667090]: https://bugzilla.redhat.com/show_bug.cgi?id=1667090 [rhbz#1668223]: https://bugzilla.redhat.com/show_bug.cgi?id=1668223 [rhbz#1668422]: https://bugzilla.redhat.com/show_bug.cgi?id=1668422 [rhbz#1673822]: https://bugzilla.redhat.com/show_bug.cgi?id=1673822 [rhbz#1673825]: https://bugzilla.redhat.com/show_bug.cgi?id=1673825 [rhbz#1674005]: https://bugzilla.redhat.com/show_bug.cgi?id=1674005 [rhbz#1676945]: https://bugzilla.redhat.com/show_bug.cgi?id=1676945 [rhbz#1676957]: https://bugzilla.redhat.com/show_bug.cgi?id=1676957 [rhbz#1679196]: https://bugzilla.redhat.com/show_bug.cgi?id=1679196 [rhbz#1679197]: https://bugzilla.redhat.com/show_bug.cgi?id=1679197 [rhbz#1687562]: https://bugzilla.redhat.com/show_bug.cgi?id=1687562 [rhbz#1687965]: https://bugzilla.redhat.com/show_bug.cgi?id=1687965 ## [0.10.1] - 2018-11-23 ### Removed - Pcs-0.10 removes support for CMAN, Corosync 1.x, Corosync 2.x and Pacemaker 1.x based clusters. For managing those clusters use pcs-0.9.x. - Pcs-0.10 requires Python 3.6 and Ruby 2.2, support for older Python and Ruby versions has been removed. - `pcs resource failcount reset` command has been removed as `pcs resource cleanup` is doing exactly the same job. ([rhbz#1427273]) - Deprecated commands `pcs cluster remote-node add | remove` have been removed as they were replaced with `pcs cluster node add-guest | remove-guest` - Ability to create master resources has been removed as they are deprecated in Pacemaker 2.x ([rhbz#1542288]) - Instead of `pcs resource create ... master` use `pcs resource create ... promotable` or `pcs resource create ... clone promotable=true` - Instead of `pcs resource master` use `pcs resource promotable` or `pcs resource clone ... promotable=true` - Deprecated --clone option from `pcs resource create` command - Ability to manage node attributes with `pcs property set|unset|show` commands (using `--node` option). The same functionality is still available using `pcs node attribute` command. - Undocumented version of the `pcs constraint colocation add` command, its syntax was `pcs constraint colocation add [score] [options]` - Deprecated commands `pcs cluster standby | unstandby`, use `pcs node standby | unstandby` instead - Deprecated command `pcs cluster quorum unblock` which was replaced by `pcs quorum unblock` - Subcommand `pcs status groups` as it was not showing a cluster status but cluster configuration. The same functionality is still available using command `pcs resource group list` - Undocumented command `pcs acl target`, use `pcs acl user` instead ### Added - Validation for an unaccessible resource inside a bundle ([rhbz#1462248]) - Options to filter failures by an operation and its interval in `pcs resource cleanup` and `pcs resource failcount show` commands ([rhbz#1427273]) - Commands for listing and testing watchdog devices ([rhbz#1578891]) - Commands for creating promotable clone resources `pcs resource promotable` and `pcs resource create ... promotable` ([rhbz#1542288]) - `pcs resource update` and `pcs resource meta` commands change master resources to promotable clone resources because master resources are deprecated in Pacemaker 2.x ([rhbz#1542288]) - Support for the `promoted-max` bundle option replacing the `masters` option in Pacemaker 2.x ([rhbz#1542288]) - Support for OP\_NO\_RENEGOTIATION option when OpenSSL supports it (even with Python 3.6) ([rhbz#1566430]) - Support for container types `rkt` and `podman` into bundle commands ([rhbz#1619620]) - Support for promotable clone resources in pcsd and web UI ([rhbz#1542288]) - Obsoleting parameters of resource and fence agents are now supported and preferred over deprecated parameters ([rhbz#1436217]) - `pcs status` now shows failed and pending fencing actions and `pcs status --full` shows the whole fencing history. Pacemaker supporting fencing history is required. ([rhbz#1615891]) - `pcs stonith history` commands for displaying, synchronizing and cleaning up fencing history. Pacemaker supporting fencing history is required. ([rhbz#1620190]) - Validation of node existence in a cluster when creating location constraints ([rhbz#1553718]) - Command `pcs client local-auth` for authentication of pcs client against local pcsd. This is required when a non-root user wants to execute a command which requires root permissions (e.g. `pcs cluster start`). ([rhbz#1554302]) - Command `pcs resource group list` which has the same functionality as removed command `pcs resource show --groups` ### Fixed - Fixed encoding of the CIB\_user\_groups cookie in communication between nodes. - `pcs cluster cib-push diff-against=` does not consider an empty diff as an error ([ghpull#166]) - `pcs cluster cib-push diff-against=` exits gracefully with an error message if crm\_feature\_set < 3.0.9 ([rhbz#1488044]) - `pcs resource update` does not create an empty meta\_attributes element any more ([rhbz#1568353]) - `pcs resource debug-*` commands provide debug messages even with pacemaker-1.1.18 and newer ([rhbz#1574898]) - Improve `pcs quorum device add` usage and man page ([rhbz#1476862]) - Removing resources using web UI when the operation takes longer than expected ([rhbz#1579911]) - Removing a cluster node no longer leaves the node in the CIB and therefore cluster status even if the removal is run on the node which is being removed ([rhbz#1595829]) - Possible race condition causing an HTTP 408 error when sending larger files via pcs ([rhbz#1600169]) - Configuring QDevice works even if NSS with the new db format (cert9.db, key4.db, pkcs11.txt) is used ([rhbz#1596721]) - Options starting with '-' and '--' are no longer accepted by commands for which those options have no effect ([rhbz#1533866]) - When a user makes an error in a pcs command, usage for that specific command is printed instead of printing the whole usage - Show more user friendly error message when testing watchdog device and multiple devices are present ([rhbz#1578891]) - Do not distinguish between supported and unsupported watchdog devices as SBD cannot reliably provide such information ([rhbz#1578891]) - `pcs config` no longer crashes when `crm_mon` prints something to stderr ([rhbz#1578955]) - `pcs resource bundle update` cmd for bundles which are using unsupported container backend ([rhbz#1619620]) - Do not crash if unable to load SSL certificate or key, log errors and exit gracefully instead ([rhbz#1638852]) - Fixed several issues in parsing `pcs constraint colocation add` command. - All `remove` subcommands now have `delete` aliases and vice versa. Previously, only some of them did and it was mostly undocumented. - The `pcs acl role delete` command no longer deletes ACL users and groups with no ACL roles assigned ### Changed - Authentication has been overhauled ([rhbz#1549535]): - The `pcs cluster auth` command only authenticates nodes in a local cluster and does not accept a node list. - The new command for authentication is `pcs host auth`. It allows to specify host names, addresses and pcsd ports. - Previously, running `pcs cluster auth A B C` caused A, B and C to be all authenticated against each other. Now, `pcs host auth A B C` makes the local host authenticated against A, B and C. This allows better control of what is authenticated against what. - The `pcs pcsd clear-auth` command has been replaced by `pcs pcsd deauth` and `pcs host deauth` commands. The new commands allows to deauthenticate a single host / token as well as all hosts / tokens. - These changes are not backward compatible. You should use the `pcs host auth` command to re-authenticate your hosts. - The `pcs cluster setup` command has been overhauled ([rhbz#1158816], [rhbz#1183103]): - It works with Corosync 3.x only and supports knet as well as udp/udpu. - Node names are now supported. - The number of Corosync options configurable by the command has been significantly increased. - The syntax of the command has been completely changed to accommodate the changes and new features. - Corosync encryption is enabled by default when knet is used ([rhbz#1648942]) - The `pcs cluster node add` command has been overhauled ([rhbz#1158816], [rhbz#1183103]) - It works with Corosync 3.x only and supports knet as well as udp/udpu. - Node names are now supported. - The syntax of the command has been changed to accommodate new features and to be consistent with other pcs commands. - The `pcs cluster node remove` has been overhauled ([rhbz#1158816], [rhbz#1595829]): - It works with Corosync 3.x only and supports knet as well as udp/udpu. - It is now possible to remove more than one node at once. - Removing a cluster node no longer leaves the node in the CIB and therefore cluster status even if the removal is run on the node which is being removed - Node names are fully supported now and are no longer coupled with node addresses. It is possible to set up a cluster where Corosync communicates over different addresses than pcs/pcsd. ([rhbz#1158816], [rhbz#1183103]) - Node names are now required while node addresses are optional in the `pcs cluster node add-guest` and `pcs cluster node add-remove` commands. Previously, it was the other way around. - Web UI has been updated following changes in authentication and support for Corosync 3.x ([rhbz#1158816], [rhbz#1183103], [rhbz#1549535]) - Commands related to resource failures have been overhauled to support changes in pacemaker. Failures are now tracked per resource operations on top of resources and nodes. ([rhbz#1427273], [rhbz#1588667]) - `--watchdog` and `--device` options of `pcs stonith sbd enable` and `pcs stonith sbd device setup` commands have been replaced with `watchdog` and `device` options respectively - Update pacemaker daemon names to match changes in pacemaker-2.0 ([rhbz#1573344]) - Watchdog devices are validated against a list provided by sbd ([rhbz#1578891]) - Resource operation option `requires` is no longer accepted to match changes in pacemaker-2.0 ([rhbz#1605185]) - Update pacemaker exit codes to match changes in pacemaker-2.0 ([rhbz#1536121]) - `pcs cluster cib-upgrade` no longer exits with an error if the CIB schema is already the latest available (this has been changed in pacemaker-2.0) - Pcs now configures corosync to put timestamps in its log ([rhbz#1615420]) - Option `-V` has been replaced with `--full` and a CIB file can be specified only using option `-f` in `pcs cluster verify` - Master resources are now called promotable clone resources to match changes in pacemaker-2.0 ([rhbz#1542288]) - Key size of default pcsd self-generated certificates increased from 2048b to 3072b ([rhbz#1638852]) - pcsd.service now depends on network-online.target ([rhbz#1640477]) - Split command `pcs resource [show]` into two new commands: - `pcs resource [status]` - same as `pcs resource [show]` - `pcs resource config` - same as `pcs resource [show] --full` or resource id specified instead of --full Respective changes have been made to `pcs stonith [show]` command. - Previously, `pcs cluster sync` synchronized only corosync configuration across all nodes configured in the cluster. This command will be changed in the future to sync all cluster configuration. New subcommand `pcs cluster sync corosync` has been introduced to sync only corosync configuration. For now, both commands have the same functionality. ### Security - CVE-2018-1086: Debug parameter removal bypass, allowing information disclosure ([rhbz#1557366]) - CVE-2018-1079: Privilege escalation via authorized user malicious REST call ([rhbz#1550243]) ### Deprecated - The `masters` bundle option is obsoleted by the `promoted-max` option in Pacemaker 2.x and therefore in pcs ([rhbz#1542288]) - `pcs cluster uidgid rm`, use `pcs cluster uidgid delete` or `pcs cluster uidgid remove` instead [ghpull#166]: https://github.com/ClusterLabs/pcs/pull/166 [rhbz#1158816]: https://bugzilla.redhat.com/show_bug.cgi?id=1158816 [rhbz#1183103]: https://bugzilla.redhat.com/show_bug.cgi?id=1183103 [rhbz#1427273]: https://bugzilla.redhat.com/show_bug.cgi?id=1427273 [rhbz#1436217]: https://bugzilla.redhat.com/show_bug.cgi?id=1436217 [rhbz#1462248]: https://bugzilla.redhat.com/show_bug.cgi?id=1462248 [rhbz#1476862]: https://bugzilla.redhat.com/show_bug.cgi?id=1476862 [rhbz#1488044]: https://bugzilla.redhat.com/show_bug.cgi?id=1488044 [rhbz#1533866]: https://bugzilla.redhat.com/show_bug.cgi?id=1533866 [rhbz#1536121]: https://bugzilla.redhat.com/show_bug.cgi?id=1536121 [rhbz#1542288]: https://bugzilla.redhat.com/show_bug.cgi?id=1542288 [rhbz#1549535]: https://bugzilla.redhat.com/show_bug.cgi?id=1549535 [rhbz#1550243]: https://bugzilla.redhat.com/show_bug.cgi?id=1550243 [rhbz#1553718]: https://bugzilla.redhat.com/show_bug.cgi?id=1553718 [rhbz#1554302]: https://bugzilla.redhat.com/show_bug.cgi?id=1554302 [rhbz#1557366]: https://bugzilla.redhat.com/show_bug.cgi?id=1557366 [rhbz#1566430]: https://bugzilla.redhat.com/show_bug.cgi?id=1566430 [rhbz#1568353]: https://bugzilla.redhat.com/show_bug.cgi?id=1568353 [rhbz#1573344]: https://bugzilla.redhat.com/show_bug.cgi?id=1573344 [rhbz#1574898]: https://bugzilla.redhat.com/show_bug.cgi?id=1574898 [rhbz#1578891]: https://bugzilla.redhat.com/show_bug.cgi?id=1578891 [rhbz#1578955]: https://bugzilla.redhat.com/show_bug.cgi?id=1578955 [rhbz#1579911]: https://bugzilla.redhat.com/show_bug.cgi?id=1579911 [rhbz#1588667]: https://bugzilla.redhat.com/show_bug.cgi?id=1588667 [rhbz#1595829]: https://bugzilla.redhat.com/show_bug.cgi?id=1595829 [rhbz#1596721]: https://bugzilla.redhat.com/show_bug.cgi?id=1596721 [rhbz#1600169]: https://bugzilla.redhat.com/show_bug.cgi?id=1600169 [rhbz#1605185]: https://bugzilla.redhat.com/show_bug.cgi?id=1605185 [rhbz#1615420]: https://bugzilla.redhat.com/show_bug.cgi?id=1615420 [rhbz#1615891]: https://bugzilla.redhat.com/show_bug.cgi?id=1615891 [rhbz#1619620]: https://bugzilla.redhat.com/show_bug.cgi?id=1619620 [rhbz#1620190]: https://bugzilla.redhat.com/show_bug.cgi?id=1620190 [rhbz#1638852]: https://bugzilla.redhat.com/show_bug.cgi?id=1638852 [rhbz#1640477]: https://bugzilla.redhat.com/show_bug.cgi?id=1640477 [rhbz#1648942]: https://bugzilla.redhat.com/show_bug.cgi?id=1648942 ## [0.9.163] - 2018-02-20 ### Added - Added `pcs status booth` as an alias to `pcs booth status` - A warning is displayed in `pcs status` and a stonith device detail in web UI when a stonith device has its `method` option set to `cycle` ([rhbz#1523378]) ### Fixed - `--skip-offline` is no longer ignored in the `pcs quorum device remove` command - pcs now waits up to 5 minutes (previously 10 seconds) for pcsd restart when synchronizing pcsd certificates - Usage and man page now correctly state it is possible to enable or disable several stonith devices at once - It is now possible to set the `action` option of stonith devices in web UI by using force ([rhbz#1421702]) - Do not crash when `--wait` is used in `pcs stonith create` ([rhbz#1522813]) - Nodes are now authenticated after running `pcs cluster auth` even if an existing corosync.conf defines no nodes ([ghissue#153], [rhbz#1517333]) - Pcs now properly exits with code 1 when an error occurs in `pcs cluster node add-remote` and `pcs cluster node add-guest` commands ([rhbz#1464781]) - Fixed a crash in the `pcs booth sync` command ([rhbz#1527530]) - Always replace the whole CIB instead of applying a diff when crm\_feature\_set <= 3.0.8 ([rhbz#1488044]) - Fixed `pcs cluster auth` in a cluster when not authenticated and using a non-default port ([rhbz#1415197]) - Fixed `pcs cluster auth` in a cluster when previously authenticated using a non-default port and reauthenticating using an implicit default port ([rhbz#1415197]) [ghissue#153]: https://github.com/ClusterLabs/pcs/issues/153 [rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197 [rhbz#1421702]: https://bugzilla.redhat.com/show_bug.cgi?id=1421702 [rhbz#1464781]: https://bugzilla.redhat.com/show_bug.cgi?id=1464781 [rhbz#1488044]: https://bugzilla.redhat.com/show_bug.cgi?id=1488044 [rhbz#1517333]: https://bugzilla.redhat.com/show_bug.cgi?id=1517333 [rhbz#1522813]: https://bugzilla.redhat.com/show_bug.cgi?id=1522813 [rhbz#1523378]: https://bugzilla.redhat.com/show_bug.cgi?id=1523378 [rhbz#1527530]: https://bugzilla.redhat.com/show_bug.cgi?id=1527530 ## [0.9.162] - 2017-11-15 ### Added - `pcs status --full` now displays information about tickets ([rhbz#1389943]) - Support for managing qdevice heuristics ([rhbz#1389209]) - SNMP agent providing information about cluster to the master agent. It supports only python 2.7 for now ([rhbz#1367808]). ### Fixed - Fixed crash when loading a huge xml ([rhbz#1506864]) - Fixed adding an existing cluster into the web UI ([rhbz#1415197]) - False warnings about failed actions when resource is master/unmaster from the web UI ([rhbz#1506220]) ### Changed - `pcs resource|stonith cleanup` no longer deletes the whole operation history of resources. Instead, it only deletes failed operations from the history. The original functionality is available in the `pcs resource|stonith refresh` command. ([rhbz#1508351], [rhbz#1508350]) [rhbz#1367808]: https://bugzilla.redhat.com/show_bug.cgi?id=1367808 [rhbz#1389209]: https://bugzilla.redhat.com/show_bug.cgi?id=1389209 [rhbz#1389943]: https://bugzilla.redhat.com/show_bug.cgi?id=1389943 [rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197 [rhbz#1506220]: https://bugzilla.redhat.com/show_bug.cgi?id=1506220 [rhbz#1506864]: https://bugzilla.redhat.com/show_bug.cgi?id=1506864 [rhbz#1508350]: https://bugzilla.redhat.com/show_bug.cgi?id=1508350 [rhbz#1508351]: https://bugzilla.redhat.com/show_bug.cgi?id=1508351 ## [0.9.161] - 2017-11-02 ### Added - List of pcs and pcsd capabilities ([rhbz#1230919]) ### Fixed - Fixed `pcs cluster auth` when already authenticated and using different port ([rhbz#1415197]) - It is now possible to restart a bundle resource on one node ([rhbz#1501274]) - `resource update` no longer exits with an error when the `remote-node` meta attribute is set to the same value that it already has ([rhbz#1502715], [ghissue#145]) - Listing and describing resource and stonith agents no longer crashes when agents' metadata contain non-ascii characters ([rhbz#1503110], [ghissue#151]) [ghissue#145]: https://github.com/ClusterLabs/pcs/issues/145 [ghissue#151]: https://github.com/ClusterLabs/pcs/issues/151 [rhbz#1230919]: https://bugzilla.redhat.com/show_bug.cgi?id=1230919 [rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197 [rhbz#1501274]: https://bugzilla.redhat.com/show_bug.cgi?id=1501274 [rhbz#1502715]: https://bugzilla.redhat.com/show_bug.cgi?id=1502715 [rhbz#1503110]: https://bugzilla.redhat.com/show_bug.cgi?id=1503110 ## [0.9.160] - 2017-10-09 ### Added - Configurable pcsd port ([rhbz#1415197]) - Description of the `--force` option added to man page and help ([rhbz#1491631]) ### Fixed - Fixed some crashes when pcs encounters a non-ascii character in environment variables, command line arguments and so on ([rhbz#1435697]) - Fixed detecting if systemd is in use ([ghissue#118]) - Upgrade CIB schema version when `resource-discovery` option is used in location constraints ([rhbz#1420437]) - Fixed error messages in `pcs cluster report` ([rhbz#1388783]) - Increase request timeout when starting a cluster with large number of nodes to prevent timeouts ([rhbz#1463327]) - Fixed "Unable to update cib" error caused by invalid resource operation IDs - `pcs resource op defaults` now fails on an invalid option ([rhbz#1341582]) - Fixed behaviour of `pcs cluster verify` command when entered with the filename argument ([rhbz#1213946]) ### Changed - CIB changes are now pushed to pacemaker as a diff in commands overhauled to the new architecture (previously the whole CIB was pushed). This resolves race conditions and ACLs related errors when pushing CIB. ([rhbz#1441673]) - All actions / operations defined in resource agent's metadata (except meta-data, status and validate-all) are now copied to the CIB when creating a resource. ([rhbz#1418199], [ghissue#132]) - Improve documentation of the `pcs stonith confirm` command ([rhbz#1489682]) ### Deprecated - This is the last version fully supporting CMAN clusters and python 2.6. Support for these will be gradually dropped. [ghissue#118]: https://github.com/ClusterLabs/pcs/issues/118 [ghissue#132]: https://github.com/ClusterLabs/pcs/issues/132 [rhbz#1213946]: https://bugzilla.redhat.com/show_bug.cgi?id=1213946 [rhbz#1341582]: https://bugzilla.redhat.com/show_bug.cgi?id=1341582 [rhbz#1388783]: https://bugzilla.redhat.com/show_bug.cgi?id=1388783 [rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197 [rhbz#1418199]: https://bugzilla.redhat.com/show_bug.cgi?id=1418199 [rhbz#1420437]: https://bugzilla.redhat.com/show_bug.cgi?id=1420437 [rhbz#1435697]: https://bugzilla.redhat.com/show_bug.cgi?id=1435697 [rhbz#1441673]: https://bugzilla.redhat.com/show_bug.cgi?id=1441673 [rhbz#1463327]: https://bugzilla.redhat.com/show_bug.cgi?id=1463327 [rhbz#1489682]: https://bugzilla.redhat.com/show_bug.cgi?id=1489682 [rhbz#1491631]: https://bugzilla.redhat.com/show_bug.cgi?id=1491631 ## [0.9.159] - 2017-06-30 ### Added - Option to create a cluster with or without corosync encryption enabled, by default the encryption is disabled ([rhbz#1165821]) - It is now possible to disable, enable, unmanage and manage bundle resources and set their meta attributes ([rhbz#1447910]) - Pcs now warns against using the `action` option of stonith devices ([rhbz#1421702]) ### Fixed - Fixed crash of the `pcs cluster setup` command when the `--force` flag was used ([rhbz#1176018]) - Fixed crash of the `pcs cluster destroy --all` command when the cluster was not running ([rhbz#1176018]) - Fixed crash of the `pcs config restore` command when restoring pacemaker authkey ([rhbz#1176018]) - Fixed "Error: unable to get cib" when adding a node to a stopped cluster ([rhbz#1176018]) - Fixed a crash in the `pcs cluster node add-remote` command when an id conflict occurs ([rhbz#1386114]) - Fixed creating a new cluster from the web UI ([rhbz#1284404]) - `pcs cluster node add-guest` now works with the flag `--skip-offline` ([rhbz#1176018]) - `pcs cluster node remove-guest` can be run again when the guest node was unreachable first time ([rhbz#1176018]) - Fixed "Error: Unable to read /etc/corosync/corosync.conf" when running `pcs resource create`([rhbz#1386114]) - It is now possible to set `debug` and `verbose` parameters of stonith devices ([rhbz#1432283]) - Resource operation ids are now properly validated and no longer ignored in `pcs resource create`, `pcs resource update` and `pcs resource op add` commands ([rhbz#1443418]) - Flag `--force` works correctly when an operation is not successful on some nodes during `pcs cluster node add-remote` or `pcs cluster node add-guest` ([rhbz#1464781]) ### Changed - Binary data are stored in corosync authkey ([rhbz#1165821]) - It is now mandatory to specify container type in the `resource bundle create` command - When creating a new cluster, corosync communication encryption is disabled by default (in 0.9.158 it was enabled by default, in 0.9.157 and older it was disabled) [rhbz#1165821]: https://bugzilla.redhat.com/show_bug.cgi?id=1165821 [rhbz#1176018]: https://bugzilla.redhat.com/show_bug.cgi?id=1176018 [rhbz#1284404]: https://bugzilla.redhat.com/show_bug.cgi?id=1284404 [rhbz#1386114]: https://bugzilla.redhat.com/show_bug.cgi?id=1386114 [rhbz#1421702]: https://bugzilla.redhat.com/show_bug.cgi?id=1421702 [rhbz#1432283]: https://bugzilla.redhat.com/show_bug.cgi?id=1432283 [rhbz#1443418]: https://bugzilla.redhat.com/show_bug.cgi?id=1443418 [rhbz#1447910]: https://bugzilla.redhat.com/show_bug.cgi?id=1447910 [rhbz#1464781]: https://bugzilla.redhat.com/show_bug.cgi?id=1464781 ## [0.9.158] - 2017-05-23 ### Added - Support for bundle resources (CLI only) ([rhbz#1433016]) - Commands for adding and removing guest and remote nodes including handling pacemaker authkey (CLI only) ([rhbz#1176018], [rhbz#1254984], [rhbz#1386114], [rhbz#1386512]) - Command `pcs cluster node clear` to remove a node from pacemaker's configuration and caches - Backing up and restoring cluster configuration by `pcs config backup` and `pcs config restore` commands now support corosync and pacemaker authkeys ([rhbz#1165821], [rhbz#1176018]) ### Deprecated - `pcs cluster remote-node add` and `pcs cluster remote-node remove `commands have been deprecated in favor of `pcs cluster node add-guest` and `pcs cluster node remove-guest` commands ([rhbz#1386512]) ### Fixed - Fixed a bug which under specific conditions caused pcsd to crash on start when running under systemd ([ghissue#134]) - `pcs resource unmanage` now sets the unmanaged flag to primitive resources even if a clone or master/slave resource is specified. Thus the primitive resources will not become managed just by uncloning. This also prevents some discrepancies between disabled monitor operations and the unmanaged flag. ([rhbz#1303969]) - `pcs resource unmanage --monitor` now properly disables monitor operations even if a clone or master/slave resource is specified. ([rhbz#1303969]) - `--help` option now shows help just for the specified command. Previously the usage for a whole group of commands was shown. - Fixed a crash when `pcs cluster cib-push` is called with an explicit value of the `--wait` flag ([rhbz#1422667]) - Handle pcsd crash when an unusable address is set in `PCSD_BIND_ADDR` ([rhbz#1373614]) - Removal of a pacemaker remote resource no longer causes the respective remote node to be fenced ([rhbz#1390609]) ### Changed - Newly created clusters are set up to encrypt corosync communication ([rhbz#1165821], [ghissue#98]) [ghissue#98]: https://github.com/ClusterLabs/pcs/issues/98 [ghissue#134]: https://github.com/ClusterLabs/pcs/issues/134 [rhbz#1176018]: https://bugzilla.redhat.com/show_bug.cgi?id=1176018 [rhbz#1254984]: https://bugzilla.redhat.com/show_bug.cgi?id=1254984 [rhbz#1303969]: https://bugzilla.redhat.com/show_bug.cgi?id=1303969 [rhbz#1373614]: https://bugzilla.redhat.com/show_bug.cgi?id=1373614 [rhbz#1386114]: https://bugzilla.redhat.com/show_bug.cgi?id=1386114 [rhbz#1386512]: https://bugzilla.redhat.com/show_bug.cgi?id=1386512 [rhbz#1390609]: https://bugzilla.redhat.com/show_bug.cgi?id=1390609 [rhbz#1422667]: https://bugzilla.redhat.com/show_bug.cgi?id=1422667 [rhbz#1433016]: https://bugzilla.redhat.com/show_bug.cgi?id=1433016 [rhbz#1165821]: https://bugzilla.redhat.com/show_bug.cgi?id=1165821 ## [0.9.157] - 2017-04-10 ### Added - Resources in location constraints now may be specified by resource name patterns in addition to resource names ([rhbz#1362493]) - Proxy settings description in pcsd configuration file ([rhbz#1315627]) - Man page for pcsd ([rhbz#1378742]) - Pcs now allows to set `trace_ra` and `trace_file` options of `ocf:heartbeat` and `ocf:pacemaker` resources ([rhbz#1421702]) - `pcs resource describe` and `pcs stonith describe` commands now show all information about the specified agent if the `--full` flag is used - `pcs resource manage | unmanage` enables respectively disables monitor operations when the `--monitor` flag is specified ([rhbz#1303969]) - Support for shared storage in SBD. Currently, there is very limited support in web UI ([rhbz#1413958]) ### Changed - It is now possible to specify more than one resource in the `pcs resource enable` and `pcs resource disable` commands. ### Fixed - Python 3: pcs no longer spams stderr with error messages when communicating with another node - Stopping a cluster does not timeout too early and it generally works better even if the cluster is running Virtual IP resources ([rhbz#1334429]) - `pcs booth remove` now works correctly even if the booth resource group is disabled (another fix) ([rhbz#1389941]) - Fixed Cross-site scripting (XSS) vulnerability in web UI ([CVE-2017-2661], [rhbz#1434111]) - Pcs no longer allows to create a stonith resource based on an agent whose name contains a colon ([rhbz#1415080]) - Pcs command now launches Python interpreter with "sane" options (python -Es) ([rhbz#1328882]) - Clufter is now supported on both Python 2 and Python 3 ([rhbz#1428350]) - Do not colorize clufter output if saved to a file [CVE-2017-2661]: https://access.redhat.com/security/cve/CVE-2017-2661 [rhbz#1303969]: https://bugzilla.redhat.com/show_bug.cgi?id=1303969 [rhbz#1315627]: https://bugzilla.redhat.com/show_bug.cgi?id=1315627 [rhbz#1328882]: https://bugzilla.redhat.com/show_bug.cgi?id=1328882 [rhbz#1334429]: https://bugzilla.redhat.com/show_bug.cgi?id=1334429 [rhbz#1362493]: https://bugzilla.redhat.com/show_bug.cgi?id=1362493 [rhbz#1378742]: https://bugzilla.redhat.com/show_bug.cgi?id=1378742 [rhbz#1389941]: https://bugzilla.redhat.com/show_bug.cgi?id=1389941 [rhbz#1413958]: https://bugzilla.redhat.com/show_bug.cgi?id=1413958 [rhbz#1415080]: https://bugzilla.redhat.com/show_bug.cgi?id=1415080 [rhbz#1421702]: https://bugzilla.redhat.com/show_bug.cgi?id=1421702 [rhbz#1428350]: https://bugzilla.redhat.com/show_bug.cgi?id=1428350 [rhbz#1434111]: https://bugzilla.redhat.com/show_bug.cgi?id=1434111 ## [0.9.156] - 2017-02-10 ### Added - Fencing levels now may be targeted in CLI by a node name pattern or a node attribute in addition to a node name ([rhbz#1261116]) - `pcs cluster cib-push` allows to push a diff obtained internally by comparing CIBs in specified files ([rhbz#1404233], [rhbz#1419903]) - Added flags `--wait`, `--disabled`, `--group`, `--after`, `--before` into the command `pcs stonith create` - Added commands `pcs stonith enable` and `pcs stonith disable` - Command line option --request-timeout ([rhbz#1292858]) - Check whenever proxy is set when unable to connect to a node ([rhbz#1315627]) ### Changed - `pcs node [un]standby` and `pcs node [un]maintenance` is now atomic even if more than one node is specified ([rhbz#1315992]) - Restarting pcsd initiated from pcs is now a synchronous operation ([rhbz#1284404]) - Stopped bundling fonts used in pcsd web UI ([ghissue#125]) - In `pcs resource create` flags `--master` and `--clone` changed to keywords `master` and `clone` - libcurl is now used for node to node communication ### Fixed - When upgrading CIB to the latest schema version, check for minimal common version across the cluster ([rhbz#1389443]) - `pcs booth remove` now works correctly even if the booth resource group is disabled ([rhbz#1389941]) - Adding a node in a CMAN cluster does not cause the new node to be fenced immediately ([rhbz#1394846]) - Show proper error message when there is an HTTP communication failure ([rhbz#1394273]) - Fixed searching for files to remove in the `/var/lib` directory ([ghpull#119], [ghpull#120]) - Fixed messages when managing services (start, stop, enable, disable...) - Fixed disabling services on systemd systems when using instances ([rhbz#1389501]) - Fixed parsing commandline options ([rhbz#1404229]) - Pcs does not exit with a false error message anymore when pcsd-cli.rb outputs to stderr ([ghissue#124]) - Pcs now exits with an error when both `--all` and a list of nodes is specified in the `pcs cluster start | stop | enable | disable` commands ([rhbz#1339355]) - built-in help and man page fixes and improvements ([rhbz#1347335]) - In `pcs resource create` the flag `--clone` no longer steals arguments from the keywords `meta` and `op` ([rhbz#1395226]) - `pcs resource create` does not produce invalid cib when group id is already occupied with non-resource element ([rhbz#1382004]) - Fixed misbehavior of the flag `--master` in `pcs resource create` command ([rhbz#1378107]) - Fixed tacit acceptance of invalid resource operation in `pcs resource create` ([rhbz#1398562]) - Fixed misplacing metadata for disabling when running `pcs resource create` with flags `--clone` and `--disabled` ([rhbz#1402475]) - Fixed incorrect acceptance of the invalid attribute of resource operation in `pcs resource create` ([rhbz#1382597]) - Fixed validation of options of resource operations in `pcs resource create` ([rhbz#1390071]) - Fixed silent omission of duplicate options ([rhbz#1390066]) - Added more validation for resource agent names ([rhbz#1387670]) - Fixed network communication issues in pcsd when a node was specified by an IPv6 address - Fixed JS error in web UI when empty cluster status is received ([rhbz#1396462]) - Fixed sending user group in cookies from Python 3 - Fixed pcsd restart in Python 3 - Fixed parsing XML in Python 3 (caused crashes when reading resource agents metadata) ([rhbz#1419639]) - Fixed the recognition of the structure of a resource agent name that contains a systemd instance ([rhbz#1419661]) ### Removed - Ruby 1.8 and 1.9 is no longer supported due to bad libcurl support [ghissue#124]: https://github.com/ClusterLabs/pcs/issues/124 [ghissue#125]: https://github.com/ClusterLabs/pcs/issues/125 [ghpull#119]: https://github.com/ClusterLabs/pcs/pull/119 [ghpull#120]: https://github.com/ClusterLabs/pcs/pull/120 [rhbz#1261116]: https://bugzilla.redhat.com/show_bug.cgi?id=1261116 [rhbz#1284404]: https://bugzilla.redhat.com/show_bug.cgi?id=1284404 [rhbz#1292858]: https://bugzilla.redhat.com/show_bug.cgi?id=1292858 [rhbz#1315627]: https://bugzilla.redhat.com/show_bug.cgi?id=1315627 [rhbz#1315992]: https://bugzilla.redhat.com/show_bug.cgi?id=1315992 [rhbz#1339355]: https://bugzilla.redhat.com/show_bug.cgi?id=1339355 [rhbz#1347335]: https://bugzilla.redhat.com/show_bug.cgi?id=1347335 [rhbz#1378107]: https://bugzilla.redhat.com/show_bug.cgi?id=1378107 [rhbz#1382004]: https://bugzilla.redhat.com/show_bug.cgi?id=1382004 [rhbz#1382597]: https://bugzilla.redhat.com/show_bug.cgi?id=1382597 [rhbz#1387670]: https://bugzilla.redhat.com/show_bug.cgi?id=1387670 [rhbz#1389443]: https://bugzilla.redhat.com/show_bug.cgi?id=1389443 [rhbz#1389501]: https://bugzilla.redhat.com/show_bug.cgi?id=1389501 [rhbz#1389941]: https://bugzilla.redhat.com/show_bug.cgi?id=1389941 [rhbz#1390066]: https://bugzilla.redhat.com/show_bug.cgi?id=1390066 [rhbz#1390071]: https://bugzilla.redhat.com/show_bug.cgi?id=1390071 [rhbz#1394273]: https://bugzilla.redhat.com/show_bug.cgi?id=1394273 [rhbz#1394846]: https://bugzilla.redhat.com/show_bug.cgi?id=1394846 [rhbz#1395226]: https://bugzilla.redhat.com/show_bug.cgi?id=1395226 [rhbz#1396462]: https://bugzilla.redhat.com/show_bug.cgi?id=1396462 [rhbz#1398562]: https://bugzilla.redhat.com/show_bug.cgi?id=1398562 [rhbz#1402475]: https://bugzilla.redhat.com/show_bug.cgi?id=1402475 [rhbz#1404229]: https://bugzilla.redhat.com/show_bug.cgi?id=1404229 [rhbz#1404233]: https://bugzilla.redhat.com/show_bug.cgi?id=1404233 [rhbz#1419639]: https://bugzilla.redhat.com/show_bug.cgi?id=1419639 [rhbz#1419661]: https://bugzilla.redhat.com/show_bug.cgi?id=1419661 [rhbz#1419903]: https://bugzilla.redhat.com/show_bug.cgi?id=1419903 ## [0.9.155] - 2016-11-03 ### Added - Show daemon status in `pcs status` on non-systemd machines - SBD support for cman clusters ([rhbz#1380352]) - Alerts management in pcsd ([rhbz#1376480]) ### Changed - Get all information about resource and stonith agents from pacemaker. Pcs now supports the same set of agents as pacemaker does. ([rhbz#1262001], [ghissue#81]) - `pcs resource create` now exits with an error if more than one resource agent matches the specified short agent name instead of randomly selecting one of the agents - Allow to remove multiple alerts and alert recipients at once ### Fixed - When stopping a cluster with some of the nodes unreachable, stop the cluster completely on all reachable nodes ([rhbz#1380372]) - Fixed pcsd crash when rpam rubygem is installed ([ghissue#109]) - Fixed occasional crashes / failures when using locale other than en\_US.UTF8 ([rhbz#1387106]) - Fixed starting and stopping cluster services on systemd machines without the `service` executable ([ghissue#115]) [ghissue#81]: https://github.com/ClusterLabs/pcs/issues/81 [ghissue#109]: https://github.com/ClusterLabs/pcs/issues/109 [ghissue#115]: https://github.com/ClusterLabs/pcs/issues/115 [rhbz#1262001]: https://bugzilla.redhat.com/show_bug.cgi?id=1262001 [rhbz#1376480]: https://bugzilla.redhat.com/show_bug.cgi?id=1376480 [rhbz#1380352]: https://bugzilla.redhat.com/show_bug.cgi?id=1380352 [rhbz#1380372]: https://bugzilla.redhat.com/show_bug.cgi?id=1380372 [rhbz#1387106]: https://bugzilla.redhat.com/show_bug.cgi?id=1387106 ## [0.9.154] - 2016-09-21 - There is no change log for this and previous releases. We are sorry. - Take a look at git history if you are interested. pcs-0.10.11/CONTRIBUTING.md000066400000000000000000000062671412706364600147140ustar00rootroot00000000000000# Contributing to the pcs project ## Running pcs and its test suite ### Python virtual environment * Using Python virtual environment (pyenv) is highly recommended, as it provides means of isolating development packages from system-wide packages. It allows to install specific versions of python packages, which pcs depends on, independently on the rest of the system. * In this tutorial, we choose to create a pyenv in `~/pyenvs/pcs` directory. * Create a base directory: `mkdir ~/pyenvs` * Create a pyenv: `python3 -m venv --system-site-packages ~/pyenvs/pcs` * To activate the pyenv, run `source ~/pyenvs/pcs/bin/activate` or `. ~/pyenvs/pcs/bin/activate` * To deactivate the pyenv, run `deactivate` ### Configure pcs * Go to pcs directory. * If you created a pyenv according to the previous section, make sure it is activated. * Run `./autogen.sh`. * This generates `configure` script based on `configure.ac` file. * It requires an annotated tag to be present in git repository. The easiest way to acomplish that is to add the upstream pcs repository as a remote repository. * Run `./configure`. * This checks all the dependencies and creates various files (including `Makefile` files) based on theirs `*.in` templates. * To list available options and their description, run `./configure -h`. * Recommended setup for development is to run `./configure --enable-local-build --enable-dev-tests --enable-destructive-tests --enable-concise-tests --enable-parallel-tests` * Run `make`. * This downloads and installs dependencies, such as python modules and rubygems. ### Run pcs and pcsd * To run pcs, type `pcs/pcs`. * To run pcsd, type `sripts/pcsd.sh`. ### Pcs test suite * To run all the tests, type `make check`. * You may run specific tests like this: * `make black_check` * `make mypy` * `make pylint` * `make tests_tier0` * `make tests_tier1` * `make pcsd-tests` * To run specific tests from python test suite, type `pcs_test/suite ` * When `make check` passes, you may want to run `make distcheck`. * This generates a distribution tarball and checks it. * The check is done by extracting files from the tarball, running `./configure` and `make check`. * Note, that `./configure` is run with no options, so it requires dependencies to be installed system wide. This can be overriden by running `make distcheck DISTCHECK_CONFIGURE_FLAGS='...'`. * The point of this test is to make sure all necessary files are present in the tarball. * To run black code formater, type `make black`. ### Distribution tarball * To create a tarball for distribution, run `make dist`. * The user of the tarball is supposed to run `./configure` with options they see fit. Then, they can run `make` with any target they need. ### Important notes * All system-dependent paths must be located in `pcs/settings.py.in` and `pcsd/settings.rb.in` files. * Do not forget to run `./configure` after changing any `*.in` file. * All files meant to be distributed must be listed in `EXTRA_DIST` variable in `Makefile.am` file in specific directory (`pcs`, `pcs/pcs`, `pcs/pcs_tests`, `pcs/pcsd`), with the exception of files created by autoconf / automake. pcs-0.10.11/COPYING000066400000000000000000000432541412706364600135130ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. pcs-0.10.11/MANIFEST.in000066400000000000000000000000741412706364600142070ustar00rootroot00000000000000include Makefile include COPYING graft pcsd prune pcsd/test pcs-0.10.11/Makefile.am000066400000000000000000000341661412706364600145160ustar00rootroot00000000000000EXTRA_DIST = \ autogen.sh \ CHANGELOG.md \ CONTRIBUTING.md \ dev_requirements.txt \ .eslintrc \ .gitlab-ci.yml \ make/gitlog-to-changelog \ make/git-version-gen \ make/release.mk \ MANIFEST.in \ mypy.ini \ parallel_tests_requirements.txt \ pylintrc \ pyproject.toml \ rpm/pcs.spec.in \ scripts/pcsd.sh.in \ .version AUTOMAKE_OPTIONS = foreign MAINTAINERCLEANFILES = \ aclocal.m4 \ autoconf \ autoheader \ automake \ autoscan.log \ compile \ config.guess \ config.sub \ configure \ configure.scan \ depcomp \ Gemfile \ Gemfile.lock \ install-sh \ libtool \ libtoolize \ ltmain.sh \ Makefile.in \ missing \ rpm/requirements.txt SPEC = rpm/$(PACKAGE_NAME).spec TARFILES = $(PACKAGE_NAME)-$(VERSION).tar.bz2 \ $(PACKAGE_NAME)-$(VERSION).tar.gz \ $(PACKAGE_NAME)-$(VERSION).tar.xz ACLOCAL_AMFLAGS = -I m4 SUBDIRS = pcs pcsd pcs_test PCS_PYTHON_PACKAGES = pcs/ pcs_test/ # depedency management # 1 - sources directory - with python package sources # 2 - destination directory - python package will be installed into the # `packages` subdirectory of this destination directory define build_python_bundle cd $(1) && \ PYTHONPATH=$(2)/packages/ \ LC_ALL=C.utf8 \ $(PYTHON) setup.py install --install-lib /packages/ --root $(2) endef PYAGENTX_URI="https://github.com/ondrejmular/pyagentx/archive/v${PYAGENTX_VERSION}.tar.gz" stamps/download_pyagentx: if ENABLE_DOWNLOAD if [ ! -f ${abs_top_builddir}/rpm/pyagentx-${PYAGENTX_VERSION}.tar.gz ]; then \ $(WGET) -q -O ${abs_top_builddir}/rpm/pyagentx-${PYAGENTX_VERSION}.tar.gz ${PYAGENTX_URI}; \ fi endif touch $@ if PIP_HAS_ISOLATION pipopts = --no-build-isolation endif stamps/download_python_deps: rpm/requirements.txt stamps/download_pyagentx dev_requirements.txt if ENABLE_DOWNLOAD PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring $(PIP) download --disable-pip-version-check --no-deps $(pipopts) --dest rpm/ --no-binary :all: -r rpm/requirements.txt endif touch $@ stamps/install_python_devel_deps: dev_requirements.txt if DEV_TESTS PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring $(PIP) install --upgrade -r $< endif touch $@ stamps/install_python_parallel_tests_deps: parallel_tests_requirements.txt if PARALLEL_TESTS PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring $(PIP) install --upgrade -r $< endif touch $@ if ENABLE_DOWNLOAD stamps/untar_python_src: stamps/download_python_deps else stamps/untar_python_src: endif $(MKDIR_P) ${abs_top_builddir}/$(PCS_BUNDLED_DIR_LOCAL)/src/ src=`ls -1 ${abs_top_builddir}/rpm/*.tar.gz ${abs_top_srcdir}/rpm/*.tar.gz 2>/dev/null || true | sort -u | grep -v pcs- || true` && \ for i in $$src; do \ $(TAR) xvz -C ${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/src -f $$i; \ done; touch $@ stamps/install_python_embedded_mods_local: stamps/untar_python_src if LOCAL_BUILD for i in ${abs_top_builddir}/$(PCS_BUNDLED_DIR_LOCAL)/src/*; do \ $(call build_python_bundle,$$i,/${abs_top_builddir}/$(PCS_BUNDLED_DIR_LOCAL)); \ done; endif touch $@ install_python_embedded_mods: if LOCAL_BUILD for i in ${abs_top_builddir}/$(PCS_BUNDLED_DIR_LOCAL)/src/*; do \ $(call build_python_bundle,$$i,$(or ${DESTDIR}, /)/$(PCS_BUNDLED_DIR)); \ done; endif stamps/install_ruby_deps_local: if LOCAL_BUILD if INSTALL_EMBEDDED_GEMS if ENABLE_DOWNLOAD rm -rf .bundle Gemfile.lock $(MKDIR_P) .bundle echo '---' > .bundle/config echo 'BUNDLE_DISABLE_SHARED_GEMS: "true"' >> .bundle/config echo 'BUNDLE_PATH: "$(PCSD_BUNDLED_DIR_ROOT_LOCAL)"' >> .bundle/config echo 'BUNDLE_CACHE_PATH: "$(PCSD_BUNDLED_CACHE_DIR)"' >> .bundle/config echo 'BUNDLE_BUILD: \"$(ruby_LIBS)\"' >> .bundle/config $(BUNDLE) cp -rp $(PCSD_BUNDLED_DIR_LOCAL)/* $(PCSD_BUNDLED_DIR_ROOT_LOCAL)/ rm -rf $$(realpath $(PCSD_BUNDLED_DIR_LOCAL)/../) rm -rf .bundle Gemfile.lock else gem_files=`$(FIND) "$(PCSD_BUNDLED_CACHE_DIR)" -type f -name '*.gem'` && \ if test "x$${gem_files}" != "x"; then \ $(GEM) install \ --force --verbose --no-document --local --no-user-install \ -i "$(PCSD_BUNDLED_DIR_ROOT_LOCAL)" \ $${gem_files} \ -- \ '--with-ldflags=$(ruby_LIBS)'; \ fi endif endif touch $@ endif stamps/download_rpm_ruby_deps: stamps/install_ruby_deps_local if ENABLE_DOWNLOAD cp ${PCSD_BUNDLED_CACHE_DIR}/*.gem rpm/ || true endif touch $@ install-exec-local: install_python_embedded_mods stamps/install_ruby_deps_local $(MKDIR_P) $(or ${DESTDIR}, /)/$(PYTHON_SITELIB)/pcs/ $(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS} --record $(or ${DESTDIR}, /)/$(PYTHON_SITELIB)/pcs/deinstall.txt \ --install-scripts $(SBINDIR) --install-purelib $(PYTHON_SITELIB) --install-platlib $(PYTHON_SITELIB) $(MKDIR_P) ${DESTDIR}/$(LIB_DIR)/pcs mv ${DESTDIR}/$(SBINDIR)/pcs_internal ${DESTDIR}/$(LIB_DIR)/pcs/pcs_internal mv ${DESTDIR}/$(SBINDIR)/pcs_snmp_agent ${DESTDIR}/$(LIB_DIR)/pcs/pcs_snmp_agent uninstall-local: rm -rf $(or ${DESTDIR}, /)/$(PCS_BUNDLED_DIR) mv ${DESTDIR}/$(LIB_DIR)/pcs/pcs_internal ${DESTDIR}/$(SBINDIR)/pcs_internal || : mv ${DESTDIR}/$(LIB_DIR)/pcs/pcs_snmp_agent ${DESTDIR}/$(SBINDIR)/pcs_snmp_agent || : for i in $(shell cat $(or ${DESTDIR}, /)/$(PYTHON_SITELIB)/pcs/deinstall.txt); do \ rm -rf $(or ${DESTDIR}, /)/$${i}; \ done rm -rf $(or ${DESTDIR}, /)/$(PYTHON_SITELIB)/pcs/deinstall.txt rmdir ${DESTDIR}/$(LIB_DIR)/pcs dist_doc_DATA = README.md CHANGELOG.md # testing if CONCISE_TESTS python_test_options = else python_test_options = -v --vanilla endif pylint: if DEV_TESTS if PARALLEL_PYLINT pylint_options = --jobs=0 else pylint_options = endif export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \ $(TIME) $(PYTHON) -m pylint --rcfile pylintrc --persistent=n --reports=n --score=n --disable similarities ${pylint_options} ${PCS_PYTHON_PACKAGES} endif black_check: pyproject.toml if DEV_TESTS export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \ $(TIME) $(PYTHON) -m black --config pyproject.toml --check ${PCS_PYTHON_PACKAGES} endif black: pyproject.toml if DEV_TESTS export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \ $(PYTHON) -m black --config pyproject.toml ${PCS_PYTHON_PACKAGES} endif mypy: if DEV_TESTS export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \ $(TIME) $(PYTHON) -m mypy --config-file mypy.ini --package pcs endif tests_tier0: export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \ $(PYTHON) ${abs_builddir}/pcs_test/suite.py ${python_test_options} --tier0 tests_tier1: if EXECUTE_TIER1_TESTS export PYTHONPATH=${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL}/packages && \ $(PYTHON) ${abs_builddir}/pcs_test/suite.py $(python_test_options) --tier1 endif pcsd-tests: GEM_HOME=${abs_top_builddir}/${PCSD_BUNDLED_DIR_ROOT_LOCAL} \ $(RUBY) \ -I${abs_top_builddir}/pcsd \ -I${abs_top_builddir}/pcsd/test \ ${abs_top_builddir}/pcsd/test/test_all_suite.rb if LOCAL_BUILD check-local-deps: stamps/install_python_embedded_mods_local stamps/install_ruby_deps_local stamps/install_python_devel_deps stamps/install_python_parallel_tests_deps else check-local-deps: endif all: check-local-deps test-tree-prep: if [ "${abs_top_builddir}" != "${abs_top_srcdir}" ]; then \ echo "Generating builddir symlinks for testing"; \ src_realpath=$(shell realpath ${abs_top_srcdir}); \ for i in `find "$$src_realpath/" -type d | \ grep -v "${abs_top_builddir}" | \ sed -e 's#^'$$src_realpath'/##g'`; do \ $(MKDIR_P) ${abs_top_builddir}/$${i}; \ done; \ find "$$src_realpath/" -type f | { while read src; do \ process=no; \ copy=no; \ case $$src in \ ${abs_top_builddir}*) \ ;; \ *Makefile.*|*.in) \ ;; \ *pcs_test/resources/*.conf) \ copy=yes; \ ;; \ *pcs_test/resources/qdevice-certs*) \ copy=yes; \ ;; \ *pcsd/test/*.conf*) \ copy=yes; \ ;; \ *) \ process=yes; \ ;; \ esac ; \ dst=`echo $$src | sed -e 's#^'$$src_realpath'/##g'`; \ if [ $${process} == yes ]; then \ rm -f ${abs_top_builddir}/$$dst; \ $(LN_S) $$src ${abs_top_builddir}/$$dst; \ fi; \ if [ $${copy} == yes ]; then \ rm -f ${abs_top_builddir}/$$dst; \ cp $$src ${abs_top_builddir}/$$dst; \ chmod u+w ${abs_top_builddir}/$$dst; \ fi; \ done; }; \ fi test-tree-clean: if [ "${abs_top_builddir}" != "${abs_top_srcdir}" ]; then \ echo "Cleaning symlinks for testing" ; \ find "${abs_top_builddir}/" -type l -delete; \ find ${abs_top_builddir} -type d -name qdevice-certs -exec rm -rf {} \; 2>/dev/null || : ;\ find ${abs_top_builddir} -type f -name "*.conf*" -exec rm -rf {} \; 2>/dev/null || : ;\ find "${abs_top_builddir}/" -type d -empty -delete; \ fi find ${abs_top_builddir} -type d -name __pycache__ -exec rm -rf {} \; 2>/dev/null || : check-local: check-local-deps test-tree-prep pylint black_check mypy tests_tier0 tests_tier1 pcsd-tests test-tree-clean clean-local: test-tree-clean $(PYTHON) setup.py clean rm -rf Gemfile.lock .bundle pcs_test/resources/temp rm -rf $(PACKAGE_NAME)-$(VERSION).tar.* rpm/*tar* rpm/*.gem rpm/*.rpm rm -rf stamps/* # this will get rid of "libtoolized" m4 files distclean-local: rm -rf Gemfile rm -rf .mypy_cache rm -rf rpm/requirements.txt rpm/Gemfile rpm/pcs-* rm -rf stamps rm -rf ${abs_top_builddir}/${PCS_BUNDLED_DIR_LOCAL} ${abs_top_builddir}/${PCSD_BUNDLED_DIR_ROOT_LOCAL} rm -rf $(filter-out \ $(top_builddir)/m4/ac_compare_versions.m4 \ $(top_builddir)/m4/ac_pip_module.m4 \ $(top_builddir)/m4/ac_ruby_gem.m4 \ $(top_builddir)/m4/ax_prog_date.m4, \ $(wildcard $(top_builddir)/m4/*.m4)) # release/versioning BUILT_SOURCES = .version .version: echo $(VERSION) > $@-t && mv $@-t $@ dist-hook: gen-ChangeLog echo $(VERSION) > $(distdir)/.tarball-version echo $(SOURCE_EPOCH) > $(distdir)/source_epoch gen_start_date = 2000-01-01 .PHONY: gen-ChangeLog gen-ChangeLog: if test -d $(abs_srcdir)/.git; then \ LC_ALL=C $(top_srcdir)/make/gitlog-to-changelog \ --since=$(gen_start_date) > $(distdir)/cl-t; \ rm -f $(distdir)/ChangeLog; \ mv $(distdir)/cl-t $(distdir)/ChangeLog; \ fi ## make rpm/srpm section. $(SPEC): $(SPEC).in .version config.status stamps/download_python_deps stamps/download_rpm_ruby_deps rm -f $@-t $@ date="`LC_ALL=C $(UTC_DATE_AT)$(SOURCE_EPOCH) "+%a %b %d %Y"`" && \ gvgver="`cd $(abs_srcdir); make/git-version-gen --fallback $(VERSION) .tarball-version .gitarchivever`" && \ if [ "$$gvgver" = "`echo $$gvgver | sed 's/-/./'`" ];then \ rpmver="$$gvgver" && \ alphatag="" && \ dirty="" && \ numcomm="0"; \ else \ gitver="`echo $$gvgver | sed 's/\(.*\)\./\1-/'`" && \ rpmver=`echo $$gitver | sed 's/-.*//g'` && \ alphatag=`echo $$gvgver | sed 's/[^-]*-\([^-]*\).*/\1/'` && \ numcomm=`echo $$gitver | sed 's/[^-]*-\([^-]*\).*/\1/'` && \ dirty="" && \ if [ "`echo $$gitver | sed 's/^.*-dirty$$//g'`" = "" ];then \ dirty="dirty"; \ fi \ fi && \ if [ -n "$$dirty" ]; then dirty="dirty"; else dirty=""; fi && \ if [ "$$numcomm" = "0" ]; then \ sed \ -e "s#@version@#$$rpmver#g" \ -e "s#%glo.*alpha.*##g" \ -e "s#%glo.*numcomm.*##g" \ -e "s#@dirty@#$$dirty#g" \ -e "s#@date@#$$date#g" \ -e "s#@pcs_bundled_dir@#${PCS_BUNDLED_DIR_LOCAL}#g" \ $(abs_srcdir)/$@.in > $@-t; \ else \ sed \ -e "s#@version@#$$rpmver#g" \ -e "s#@alphatag@#$$alphatag#g" \ -e "s#@numcomm@#$$numcomm#g" \ -e "s#@dirty@#$$dirty#g" \ -e "s#@date@#$$date#g" \ -e "s#@pcs_bundled_dir@#${PCS_BUNDLED_DIR_LOCAL}#g" \ $(abs_srcdir)/$@.in > $@-t; \ fi; \ if [ -z "$$dirty" ]; then sed -i -e "s#%glo.*dirty.*##g" $@-t; fi && \ sed -i -e "s#@pyagentx_version@#${PYAGENTX_VERSION}#g" $@-t && \ pylist="`ls rpm/*.tar.gz | grep -v ^rpm/pyagentx- | grep -v ^rpm/pcs- | sed -e 's#rpm/##g' -e 's#.tar.gz##'`" && \ pysrc="`base=42; for i in $$pylist; do echo 'Source'$$base': '$$i'.tar.gz' && let "base=base+1"; done`" && \ $(AWK) -i inplace -v r="$$pysrc" '{gsub(/@pysrc@/,r)}1' $@-t; \ pybundle="`for i in $$pylist; do echo $$i | grep -v ^distro- | grep -v ^dataclasses- | sed 's/\(.*\)-\(.*\)/Provides: bundled(\1) = \2/'; done`" && \ $(AWK) -i inplace -v r="$$pybundle" '{gsub(/@pybundle@/,r)}1' $@-t; \ pydataclassesbundle="`for i in $$pylist; do echo $$i | grep ^dataclasses- | sed 's/\(.*\)-\(.*\)/Provides: bundled(\1) = \2/'; done`" && \ $(AWK) -i inplace -v r="$$pydataclassesbundle" '{gsub(/@pydataclassesbundle@/,r)}1' $@-t; \ pydistrobundle="`for i in $$pylist; do echo $$i | grep ^distro- | sed 's/\(.*\)-\(.*\)/Provides: bundled(\1) = \2/'; done`" && \ $(AWK) -i inplace -v r="$$pydistrobundle" '{gsub(/@pydistrobundle@/,r)}1' $@-t; \ pycache="`echo $(MKDIR_P) $(PCS_BUNDLED_DIR_LOCAL)/src; base=41; for i in $$pylist pyagentx; do echo 'cp -f %SOURCE'$$base' rpm/' && let "base=base+1"; done`" && \ $(AWK) -i inplace -v r="$$pycache" '{gsub(/@pycache@/,r)}1' $@-t; \ gemlist="`for i in $$($(FIND) rpm/ -type f -name '*.gem'); do echo $$i | sed -e 's#rpm/##g' -e 's#.gem##g'; done`" && \ gemsrc="`base=80; for i in $$gemlist; do echo 'Source'$$base': '$$i'.gem' && let "base=base+1"; done`" && \ $(AWK) -i inplace -v r="$$gemsrc" '{gsub(/@gemsrc@/,r)}1' $@-t; \ gembundle="`for i in $$gemlist; do echo $$i | sed 's/\(.*\)-\(.*\)/Provides: bundled(\1) = \2/'; done`" && \ $(AWK) -i inplace -v r="$$gembundle" '{gsub(/@gembundle@/,r)}1' $@-t; \ gemcache="`echo $(MKDIR_P) $(PCSD_BUNDLED_CACHE_DIR); base=80; for i in $$gemlist; do echo 'cp -f %SOURCE'$$base' $(PCSD_BUNDLED_CACHE_DIR)' && let "base=base+1"; done`" && \ $(AWK) -i inplace -v r="$$gemcache" '{gsub(/@gemcache@/,r)}1' $@-t; chmod a-w $@-t mv $@-t $@ rm -f $@-t* $(TARFILES): $(MAKE) dist cp $(TARFILES) $(abs_top_builddir)/rpm RPMBUILDOPTS = --define "_sourcedir $(abs_top_builddir)/rpm" \ --define "_specdir $(abs_top_builddir)/rpm" \ --define "_builddir $(abs_top_builddir)/rpm" \ --define "_srcrpmdir $(abs_top_builddir)/rpm" \ --define "_rpmdir $(abs_top_builddir)/rpm" srpm: clean $(MAKE) $(SPEC) $(TARFILES) rpmbuild $(RPMBUILDOPTS) --nodeps -bs $(SPEC) rpm: clean $(MAKE) $(SPEC) $(TARFILES) rpmbuild $(RPMBUILDOPTS) -ba $(SPEC) clean-generic: rm -rf $(SPEC) $(TARFILES) $(PACKAGE_NAME)-$(VERSION) *.rpm pcs-0.10.11/README.md000066400000000000000000000121571412706364600137350ustar00rootroot00000000000000## PCS - Pacemaker/Corosync Configuration System Pcs is a Corosync and Pacemaker configuration tool. It permits users to easily view, modify and create Pacemaker based clusters. Pcs contains pcsd, a pcs daemon, which operates as a remote server for pcs and provides a web UI. --- ### Pcs Versions There are three pcs branches: * master * This is where pcs-0.11 lives. * Clusters running Pacemaker 2.1+ on top of Corosync 3.x are supported. * The main development happens here. * pcs-0.10 * Clusters running Pacemaker 2.0+ on top of Corosync 3.x are supported. * This branch is in maintenance mode - bugs are being fixed but only a subset of new features lands here. * pcs-0.9 * Clusters running Pacemaker 1.x on top of Corosync 2.x or Corosync 1.x with CMAN are supported. * This branch is in no longer maintained. --- ### Dependencies These are the runtime dependencies of pcs and pcsd: * python 3.6+ * python3-cryptography * python3-dateutil 2.7.0+ * python3-distro (for python 3.8+) * python3-lxml * python3-pycurl * python3-setuptools * python3-setuptools\_scm * python3-pyparsing * python3-tornado 6.1.0+ * python dataclasses (`pip install dataclasses`; required only for python 3.6, already included in 3.7+) * [dacite](https://github.com/konradhalas/dacite) * ruby 2.2.0+ * killall (package psmisc) * corosync 3.x * pacemaker 2.x It is also recommended to have these: * python3-clufter * liberation fonts (package liberation-sans-fonts or fonts-liberation or fonts-liberation2) * overpass fonts (package overpass-fonts) --- ### Installation from Source Apart from the dependencies listed above, these are also required for installation: * python development files (packages python3-devel, python3-setuptools, python3-setuptools\_scm, python3-wheel) * ruby development files (package ruby-devel) * rubygems * rubygem bundler (package rubygem-bundler or ruby-bundler or bundler) * autoconf, automake * gcc * gcc-c++ * FFI development files (package libffi-devel or libffi-dev) * fontconfig * printf (package coreutils) * redhat-rpm-config (if you are using Fedora) * wget (to download bundled libraries) During the installation, all required rubygems are automatically downloaded and compiled. To install pcs and pcsd run the following in terminal: ```shell ./autogen.sh ./configure # alternatively './configure --enable-local-build' can be used to also download # missing dependecies make make install ``` If you are using GNU/Linux with systemd, it is now time to: ```shell systemctl daemon-reload ``` Start pcsd and make it start on boot: ```shell systemctl start pcsd systemctl enable pcsd ``` --- ### Packages Currently this is built into Fedora, RHEL and its clones and Debian and its derivates. * [Fedora package git repositories](https://src.fedoraproject.org/rpms/pcs) * [Current Fedora .spec](https://src.fedoraproject.org/rpms/pcs/blob/master/f/pcs.spec) * [Debian-HA project home page](https://wiki.debian.org/Debian-HA) --- ### Quick Start * **Authenticate cluster nodes** Set the same password for the `hacluster` user on all nodes. ```shell passwd hacluster ``` To authenticate the nodes, run the following command on one of the nodes (replacing node1, node2, node3 with a list of nodes in your future cluster). Specify all your cluster nodes in the command. Make sure pcsd is running on all nodes. ```shell pcs host auth node1 node2 node3 -u hacluster ``` * **Create a cluster** To create a cluster run the following command on one node (replacing cluster\_name with a name of your cluster and node1, node2, node3 with a list of nodes in the cluster). `--start` and `--enable` will start your cluster and configure the nodes to start the cluster on boot respectively. ```shell pcs cluster setup cluster_name node1 node2 node3 --start --enable ``` * **Check the cluster status** After a few moments the cluster should startup and you can get the status of the cluster. ```shell pcs status ``` * **Add cluster resources** After this you can add stonith agents and resources: ```shell pcs stonith create --help ``` and ```shell pcs resource create --help ``` --- ### Accessing the Web UI Apart from command line interface you can use web user interface to view and configure your cluster. To access the web UI open a browser to the following URL (replace nodename with an address of your node): ``` https://nodename:2224 ``` Login as the `hacluster` user. --- ### Further Documentation [ClusterLabs website](https://clusterlabs.org) is an excellent place to learn more about Pacemaker clusters. * [ClusterLabs quick start](https://clusterlabs.org/quickstart.html) * [Clusters from Scratch](https://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/2.0/html/Clusters_from_Scratch/index.html) * [ClusterLabs documentation page](https://clusterlabs.org/pacemaker/doc/) --- ### Inquiries If you have any bug reports or feature requests please feel free to open a github issue on the pcs project. Alternatively you can use ClusterLabs [users mailinglist](https://oss.clusterlabs.org/mailman/listinfo/users) which is also a great place to ask Pacemaker clusters related questions. pcs-0.10.11/RELEASE.md000066400000000000000000000013551412706364600140560ustar00rootroot00000000000000## How to release new pcs version ### Bump changelog version * Run `make -f make/release.mk bump-changelog version=`. * This will create commit with updated CHANGELOGE.md * Merge commit to upstream (via PR or push it directly) ### Create tarballs with new release version * Run `make -f make/release.mk tarballs version= "configure_options=--enable-local-build"` * The should be next pcs version (e.g. version=0.10.9) * Test generated tarballs ### Create annotated tag * Run `make -f make/release.mk tag version= "configure_options=--enable-local-build" release=yes` * If your upstream remote branch is origin, run `make -f make/release.mk publish release=yes` or `git push ` pcs-0.10.11/autogen.sh000077500000000000000000000002651412706364600144540ustar00rootroot00000000000000#!/bin/sh # # Copyright (C) 2020 Red Hat, Inc. All rights reserved. # # Run this to generate all the initial makefiles, etc. autoreconf -i -v && echo Now run ./configure and make pcs-0.10.11/configure.ac000066400000000000000000000437031412706364600147450ustar00rootroot00000000000000# Process this file with autoconf to produce a configure script. AC_PREREQ([2.63]) AC_INIT([pcs], m4_esyscmd([make/git-version-gen .tarball-version .gitarchivever]), [developers@clusterlabs.org]) AC_CONFIG_AUX_DIR([.]) AM_INIT_AUTOMAKE([dist-bzip2 dist-xz -Wno-portability tar-pax]) AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_SRCDIR([setup.py.in]) AC_CANONICAL_HOST AC_LANG([C]) # Sanitize path if test "$prefix" = "NONE"; then prefix="/usr" if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi if test "$libdir" = "\${exec_prefix}/lib"; then if test -e /usr/lib64; then libdir="/usr/lib64" else libdir="/usr/lib" fi fi fi case $exec_prefix in NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac # need to expand a bunch of paths to make sure # the embedded values in files are absolute paths eval SBINDIR="`eval echo ${sbindir}`" AC_SUBST([SBINDIR]) eval LOCALSTATEDIR="`eval echo ${localstatedir}`" AC_SUBST([LOCALSTATEDIR]) eval LIBDIR="`eval echo ${libdir}`" # Checks for programs. # check stolen from gnulib/m4/gnu-make.m4 if ! ${MAKE-make} --version /cannot/make/this >/dev/null 2>&1; then AC_MSG_ERROR([you don't seem to have GNU make; it is required]) fi AC_PROG_LN_S AC_PROG_INSTALL AC_PROG_MAKE_SET AC_PROG_AWK AC_PROG_MKDIR_P PKG_PROG_PKG_CONFIG # check for python AM_PATH_PYTHON([3.6]) eval PYTHON_SITELIB="`eval echo ${pythondir}`" AC_SUBST([PYTHON_SITELIB]) # required to detect / install python modules if ! $PYTHON -m pip > /dev/null 2>&1; then AC_MSG_ERROR([Python module pip not found]) fi PIP="$PYTHON -m pip" AC_SUBST([PIP]) # use a wrapper to call into PKG_CHECK_VAR to allow to set a default AC_DEFUN([PCS_PKG_CHECK_VAR], [ varname=$1 default=$4 AC_MSG_CHECKING([for pkg-conf $2 var $3]) PKG_CHECK_VAR([$1], [$2], [$3]) AS_VAR_IF([$1], [""], [AS_VAR_IF([default], [""], AC_MSG_ERROR([not found]), [AS_VAR_COPY([$varname], [default]) && AC_MSG_RESULT([not found, using default ${!varname}])])], [AC_MSG_RESULT([yes (detected: ${!varname})])]) ]) # check for systemd PKG_CHECK_MODULES([systemd], [systemd]) PCS_PKG_CHECK_VAR([SYSTEMD_UNIT_DIR_TMP], [systemd], [systemdsystemunitdir], [/usr/lib/systemd/system]) if test "${prefix}" != "/usr"; then SYSTEMD_UNIT_DIR="${prefix}/$SYSTEMD_UNIT_DIR_TMP" else SYSTEMD_UNIT_DIR="$SYSTEMD_UNIT_DIR_TMP" fi AC_SUBST([SYSTEMD_UNIT_DIR]) PCS_PKG_CHECK_VAR([SYSTEMD_UNIT_PATH], [systemd], [systemdsystemunitpath], [/etc/systemd/system:/etc/systemd/system:/run/systemd/system:/usr/local/lib/systemd/system:/usr/lib/systemd/system:/usr/lib/systemd/system:/lib/systemd/system]) AC_SUBST([SYSTEMD_UNIT_PATH]) # check for ruby AC_PATH_PROG([RUBY], [ruby]) if test x$RUBY = x; then AC_MSG_ERROR([Unable to find ruby binary]) fi # opensuse has a versioned ruby-$version.pc file # that does not match fedora or rhel ruby.pc # so we need to detect it rubymod=`pkg-config --list-all | awk '{print $1}' | grep ^ruby | sort -n | tail -n 1` PKG_CHECK_MODULES([ruby], [$rubymod >= 2.5]) PCS_PKG_CHECK_VAR([RUBY_VER], [$rubymod], [ruby_version]) AC_CHECK_PROGS([GEM], [gem]) if test "x$GEM" = "x"; then AC_MSG_ERROR([Unable to find gem binary]) fi # used to measure time for some tests, not critical if not available AC_CHECK_PROGS([TIME], [time]) # required to build rpm and pyagentx AC_CHECK_PROGS([TAR], [tar]) if test "x$TAR" = "x"; then AC_MSG_ERROR([Unable to find tar binary.]) fi # configure options section AC_ARG_ENABLE([dev-tests], [AS_HELP_STRING([--enable-dev-tests], [Enable extra developers tests (black, mypy, pylint) (default: no)])], [dev_tests="yes"]) AM_CONDITIONAL([DEV_TESTS], [test "x$dev_tests" = "xyes"]) AC_ARG_ENABLE([destructive-tests], [AS_HELP_STRING([--enable-destructive-tests], [Automatically execute potentially dangerous tests when running make check (default: no)])], [destructive_tests="yes"]) AM_CONDITIONAL([EXECUTE_TIER1_TESTS], [test "x$destructive_tests" = "xyes"]) AC_ARG_ENABLE([concise-tests], [AS_HELP_STRING([--enable-concise-tests], [Make tests output brief by not printing a name of each test (default: no)])], [concise_tests="yes"]) AM_CONDITIONAL([CONCISE_TESTS], [test "x$concise_tests" = "xyes"]) AC_ARG_ENABLE([parallel-tests], [AS_HELP_STRING([--enable-parallel-tests], [Python parallel testing requires pip modules that are not packaged anywhere (default: no)])], [parallel_tests="yes"]) AM_CONDITIONAL([PARALLEL_TESTS], [test "x$parallel_tests" = "xyes"]) AC_ARG_ENABLE([parallel-pylint], [AS_HELP_STRING([--enable-parallel-pylint], [Enable running pylint in multiple threads (default: no)])], [parallel_pylint="yes"]) AM_CONDITIONAL([PARALLEL_PYLINT], [test "x$parallel_pylint" = "xyes"]) AC_ARG_ENABLE([local-build], [AS_HELP_STRING([--enable-local-build], [Download and install all dependencies as user / bundles])], [local_build="yes"]) AM_CONDITIONAL([LOCAL_BUILD], [test "x$local_build" = "xyes"]) AC_ARG_ENABLE([tests-only], [AS_HELP_STRING([--enable-tests-only], [Check only for tests dependencies])], [tests_only="yes"]) AC_ARG_ENABLE([individual-bundling], [AS_HELP_STRING([--enable-individual-bundling], [Bundle only missing python packages /ruby gems instead of all of them])], [individual_bundling="yes"]) AC_ARG_ENABLE([use-local-cache-only], [AS_HELP_STRING([--enable-use-local-cache-only], [Use only local cache to build bundles and disable downloads])], [cache_only="yes"]) # this will catch both ID and ID_LIKE from os-release AC_ARG_WITH([distro], [AS_HELP_STRING([--with-distro=DIR], [Set defaults to specified distro. Default: autodetected])], [DISTRO="$withval"]) if test "x$cache_only" != "xyes"; then AC_CHECK_PROGS([WGET], [wget]) if test "x$WGET" = "x"; then AC_MSG_ERROR([Unable to find wget binary.]) fi fi # required by pcsd build/install to symlink BaseOS fonts if test "x$tests_only" != "xyes"; then AC_CHECK_PROGS([FCMATCH], [fc-match]) if test "x$FCMATCH" = "x"; then AC_MSG_ERROR([Unable to find fc-match binary]) fi fi if test "x$DISTRO" = "x"; then AC_MSG_CHECKING([linux distribution]) if test -f /etc/os-release; then DISTRO=$(cat /etc/os-release | grep ^ID= | cut -d "=" -f 2 | sed -s 's#"##g') if test "x$DISTRO" = "x"; then AC_MSG_ERROR([Unable to detect linux distribution. Please specify --with-distro=]) fi DISTROS=$(cat /etc/os-release | grep ^ID_LIKE= | cut -d "=" -f 2 | sed -s 's#"##g') fi AC_MSG_RESULT([$DISTRO]) else AC_MSG_RESULT([Distro detection disabled. Setting forced to: $DISTRO]) fi AC_MSG_CHECKING([default settings for $DISTRO $DISTROS]) for i in $DISTRO $DISTROS; do case $i in debian|ubuntu) FOUND_DISTRO=1 CONFIGDIR="$sysconfdir/default" PCSLIBDIR="$prefix/share" PCMKDAEMONDIR="$prefix/lib/pacemaker" COROSYNCLOGDIR="$localstatedir/log/corosync" DISTROEXT=debian break ;; fedora|rhel|centos|opensuse*) FOUND_DISTRO=1 CONFIGDIR="$sysconfdir/sysconfig" PCSLIBDIR="$LIBDIR" PCMKDAEMONDIR="$prefix/libexec/pacemaker" COROSYNCLOGDIR="$localstatedir/log/cluster" DISTROEXT=fedora break ;; esac done if test "x$FOUND_DISTRO" = "x"; then AC_MSG_RESULT([not found]) AC_MSG_ERROR([Unknown distribution $DISTRO. Please contact pcs upstream project to add support, or check --with-distro= value]) else AC_MSG_RESULT([$i (or alike) default settings will be used]) fi AC_SUBST([DISTROEXT]) AC_ARG_WITH([default-config-dir], [AS_HELP_STRING([--with-default-config-dir=DIR], [pcs config directory. Default: autodetected])], [CONF_DIR="$withval"], [CONF_DIR="$CONFIGDIR"]) AC_SUBST([CONF_DIR]) AC_ARG_WITH([pcs-lib-dir], [AS_HELP_STRING([--with-pcs-lib-dir=DIR], [pcs lib directory. Default: autodetected])], [LIB_DIR="$withval"], [LIB_DIR="$PCSLIBDIR"]) AC_SUBST([LIB_DIR]) AC_ARG_WITH([snmp-mibs-dir], [AS_HELP_STRING([--with-snmp-mibs-dir=DIR], [snmp MIB directory. Default: $prefix/share/snmp/mibs])], [SNMP_MIB_DIR="$withval"], [SNMP_MIB_DIR="$prefix/share/snmp/mibs"]) AC_SUBST([SNMP_MIB_DIR]) # python detection section PCS_BUNDLED_DIR_LOCAL="pcs_bundled" AC_SUBST([PCS_BUNDLED_DIR_LOCAL]) PCS_BUNDLED_DIR="$LIB_DIR/pcs/$PCS_BUNDLED_DIR_LOCAL" AC_SUBST([PCS_BUNDLED_DIR]) mkdir -p $ac_pwd/stamps mkdir -p $ac_pwd/rpm/ rm -rf $ac_pwd/rpm/requirements.txt touch $ac_pwd/rpm/requirements.txt # PCS_BUNDLE_PYMOD([module], [version]) AC_DEFUN([PCS_BUNDLE_PYMOD], [ echo "$1 $2" | sed -e 's# ##g' >> $ac_pwd/rpm/requirements.txt if test "x$cache_only" = "xyes"; then src=`ls rpm/$1-*` if test "x$src" = "x"; then AC_MSG_ERROR([cache only build required but no source detected in rpm/]) fi fi ]) # PCS_CHECK_PYMOD([module], [version], [embedded=yes]) AC_DEFUN([PCS_CHECK_PYMOD], [ if test "x$local_build" = "xyes" && test "x$3" = "xyes"; then AC_PIP_MODULE([$1], [$2], [bundle_module=no], [bundle_module=yes], [bundle_module=yes]) if test "x$bundle_module" = "xyes" || test "x$individual_bundling" != "xyes"; then PCS_BUNDLE_PYMOD([$1], [$2]) fi else AC_PIP_MODULE([$1], [$2], [], [AC_MSG_ERROR([Python module $1 not found])]) fi ]) # required by rpm build PYAGENTX_VERSION=0.4.pcs.2 AC_SUBST([PYAGENTX_VERSION]) # those MUST be available in BaseOS # required for --no-build-isolation AC_PIP_MODULE([pip], [>= 10.0.0], [have_isolation=yes], [have_isolation=no], [have_isolation=no]) AM_CONDITIONAL([PIP_HAS_ISOLATION], [test "x$have_isolation" = xyes]) if test "x$tests_only" != "xyes"; then PCS_CHECK_PYMOD([setuptools]) PCS_CHECK_PYMOD([setuptools-scm]) if test "x$local_build" = "xyes"; then PCS_CHECK_PYMOD([wheel]) fi PCS_CHECK_PYMOD([cryptography]) PCS_CHECK_PYMOD([lxml]) PCS_CHECK_PYMOD([pycurl]) PCS_CHECK_PYMOD([pyparsing]) # those are kind of problematic. # use them all from the BaseOS or embedded them all as necessary (--enable-local-build) PCS_CHECK_PYMOD([dacite], [], [yes]) PCS_CHECK_PYMOD([tornado], [>= 6.0.0], [yes]) PCS_CHECK_PYMOD([python-dateutil], [>= 2.7.0], [yes]) # python 3.6 needs dataclasses as well (added in 3.7) if test "$PYTHON_VERSION" = "3.6"; then PCS_CHECK_PYMOD([dataclasses], [], [yes]) fi # python 3.8+ needs distro as well (removed from upstream lib) if printf '%s\n%s\n' "3.8" "$PYTHON_VERSION" | sort -V -C; then PCS_CHECK_PYMOD([distro], [], [yes]) fi # special case, because we need to download from github AC_PIP_MODULE([pyagentx]) if test "x$HAVE_PIPMOD_PYAGENTX" = "xno" && test "x$local_build" != "xyes"; then AC_MSG_ERROR([Python module pyagentx not found]) fi fi # ruby gem section PCSD_BUNDLED_DIR_ROOT_LOCAL="pcsd/vendor/bundle/" PCSD_BUNDLED_DIR_LOCAL="$PCSD_BUNDLED_DIR_ROOT_LOCAL/ruby/$RUBY_VER/" PCSD_BUNDLED_CACHE_DIR="$PCSD_BUNDLED_DIR_ROOT_LOCAL/cache" AC_SUBST([PCSD_BUNDLED_DIR_ROOT_LOCAL]) AC_SUBST([PCSD_BUNDLED_DIR_LOCAL]) AC_SUBST([PCSD_BUNDLED_CACHE_DIR]) rm -rf Gemfile Gemfile.lock echo "source 'https://rubygems.org'" > Gemfile echo "" >> Gemfile # PCS_BUNDLE_GEM([module]) AC_DEFUN([PCS_BUNDLE_GEM], [ echo "gem '$1'" >> Gemfile if test "x$cache_only" = "xyes"; then src=`ls $PCSD_BUNDLED_CACHE_DIR/$1-*` if test "x$src" = "x"; then AC_MSG_ERROR([cache only build required but no source detected in $PCSD_BUNDLED_CACHE_DIR]) fi fi ]) # PCS_CHECK_GEM([module], [version]) AC_DEFUN([PCS_CHECK_GEM], [ if test "x$local_build" = "xyes"; then AC_RUBY_GEM([$1], [$2], [], [PCS_BUNDLE_GEM([$1])]) else AC_RUBY_GEM([$1], [$2], [], [AC_MSG_ERROR([ruby gem $1 not found])]) fi ]) PCS_CHECK_GEM([power_assert]) PCS_CHECK_GEM([test-unit]) if test "x$tests_only" != "xyes"; then PCS_CHECK_GEM([backports]) PCS_CHECK_GEM([daemons]) PCS_CHECK_GEM([ethon]) PCS_CHECK_GEM([ffi]) PCS_CHECK_GEM([eventmachine]) PCS_CHECK_GEM([json]) PCS_CHECK_GEM([mustermann]) PCS_CHECK_GEM([open4]) PCS_CHECK_GEM([rack]) PCS_CHECK_GEM([rack-protection]) PCS_CHECK_GEM([rack-test]) PCS_CHECK_GEM([sinatra]) PCS_CHECK_GEM([tilt]) PCS_CHECK_GEM([thin]) PCS_CHECK_GEM([rexml]) PCS_CHECK_GEM([webrick]) fi if test "x$local_build" = "xyes" && test $(wc -l < Gemfile) -gt 2; then GEM_HOME="$LIB_DIR/$PCSD_BUNDLED_DIR_ROOT_LOCAL" SYSTEMD_GEM_HOME="Environment=GEM_HOME=$GEM_HOME" if test "x$cache_only" != "xyes"; then AC_CHECK_PROGS([BUNDLE], [bundle]) if test "x$BUNDLE" = "x"; then AC_MSG_ERROR([Unable to find bundle binary required to install missing ruby gems]) fi fi fi AC_SUBST([BUNDLE]) AC_SUBST([GEM]) AC_SUBST([GEM_HOME]) AC_SUBST([SYSTEMD_GEM_HOME]) AM_CONDITIONAL([INSTALL_EMBEDDED_GEMS], [test -n "$GEM_HOME"]) AM_CONDITIONAL([ENABLE_DOWNLOAD], [test "x$cache_only" != "xyes"]) # detect different paths required to generate default settings AC_PATH_PROG([BASH], [bash]) if test "x$BASH" = "x"; then AC_MSG_ERROR([Unable to find bash in $PATH]) fi AC_PATH_PROG([SYSTEMCTL], [systemctl]) if test "x$SYSTEMCTL" = "x"; then AC_PATH_PROG([SERVICE], [service]) if test "x$SERVICE" = "x"; then AC_MSG_ERROR([Unable to find systemctl or service in $PATH]) fi fi if test "x$tests_only" != "xyes"; then AC_PATH_PROG([KILLALL], [killall]) if test "x$KILLALL" = "x"; then AC_MSG_ERROR([Unable to find killall in $PATH]) fi fi # yes this is absurd but we need full path for some # python calls AC_PATH_PROG([RM], [rm]) if test "x$RM" = "x"; then AC_MSG_ERROR([Unable to find rm in $PATH]) fi AC_PATH_PROG([FIND], [find]) if test "x$FIND" = "x"; then AC_MSG_ERROR([Unable to find find in $PATH]) fi # NOTE: some of those pacemaker var are only available # in pacemaker.pc with pacemaker >= 2.0.5 PCS_PKG_CHECK_VAR([PCMK_USER], [pacemaker], [daemon_user], [hacluster]) PCS_PKG_CHECK_VAR([PCMK_GROUP], [pacemaker], [daemon_group], [haclient]) PCS_PKG_CHECK_VAR([PCMK_DAEMON_DIR], [pacemaker], [daemondir], [$PCMKDAEMONDIR]) PCS_PKG_CHECK_VAR([PCMKEXECPREFIX], [pacemaker], [exec_prefix], [/usr]) PCS_PKG_CHECK_VAR([PCMKPREFIX], [pacemaker], [prefix], [/usr]) if test "$PCMKPREFIX" = "/usr"; then PCMKCONFDIR="/etc" PCMKLOCALSTATEDIR="/var" else PCMKCONFDIR="$PCMKPREFIX/etc" PCMKLOCALSTATEDIR="$PCMKPREFIX/var" fi AC_SUBST([PCMKCONFDIR]) AC_SUBST([PCMKLOCALSTATEDIR]) PCS_PKG_CHECK_VAR([PCMK_CIB_DIR], [pacemaker], [configdir], [/var/lib/pacemaker/cib]) PCS_PKG_CHECK_VAR([PCMK_SCHEMA_DIR], [pacemaker], [schemadir], [/usr/share/pacemaker]) PCS_PKG_CHECK_VAR([COROEXECPREFIX], [corosync], [exec_prefix], [/usr]) PCS_PKG_CHECK_VAR([COROPREFIX], [corosync], [prefix], [/usr]) if test "$COROPREFIX" = "/usr"; then COROCONFDIR="/etc" else COROCONFDIR="$COROPREFIX/etc" fi AC_SUBST([COROCONFDIR]) eval COROSYNCLOGDIR="`eval echo ${COROSYNCLOGDIR}`" PCS_PKG_CHECK_VAR([COROLOGDIR], [corosync], [logdir], [$COROSYNCLOGDIR]) PCS_PKG_CHECK_VAR([COROQDEVEXECPREFIX], [corosync-qdevice], [exec_prefix], [/usr]) PCS_PKG_CHECK_VAR([COROQDEVCONFDIR], [corosync-qdevice], [confdir], [/etc/corosync]) PCS_PKG_CHECK_VAR([SBDCONFDIR], [sbd], [confdir], [$CONFIGDIR]) PCS_PKG_CHECK_VAR([SBDEXECPREFIX], [sbd], [exec_prefix], [/usr]) PCS_PKG_CHECK_VAR([FASEXECPREFIX], [fence-agents], [exec_prefix], [/usr]) PCS_PKG_CHECK_VAR([RA_API_DTD], [resource-agents], [ra_api_dtd], [/usr/share/resource-agents/ra-api-1.dtd]) PCS_PKG_CHECK_VAR([RA_TMP_DIR], [resource-agents], [ra_tmp_dir], [/run/resource-agents]) PCS_PKG_CHECK_VAR([BOOTHCONFDIR], [booth], [confdir], [/etc/booth]) PCS_PKG_CHECK_VAR([BOOTHEXECPREFIX], [booth], [exec_prefix], [/usr]) # required for man page and spec file generation AX_PROG_DATE AS_IF([test "$ax_cv_prog_date_gnu_date:$ax_cv_prog_date_gnu_utc" = yes:yes], [UTC_DATE_AT="date -u -d@"], [AS_IF([test "x$ax_cv_prog_date_bsd_date" = xyes], [UTC_DATE_AT="date -u -r"], [AC_MSG_ERROR([date utility unable to convert epoch to UTC])])]) AC_SUBST([UTC_DATE_AT]) AC_ARG_VAR([SOURCE_EPOCH],[last modification date of the source]) AC_MSG_NOTICE([trying to determine source epoch]) AC_MSG_CHECKING([for source epoch in \$SOURCE_EPOCH]) AS_IF([test -n "$SOURCE_EPOCH"], [AC_MSG_RESULT([yes])], [AC_MSG_RESULT([no]) AC_MSG_CHECKING([for source epoch in source_epoch file]) AS_IF([test -e "$srcdir/source_epoch"], [read SOURCE_EPOCH <"$srcdir/source_epoch" AC_MSG_RESULT([yes])], [AC_MSG_RESULT([no]) AC_MSG_CHECKING([for source epoch baked in by gitattributes export-subst]) SOURCE_EPOCH='1633445798' # template for rewriting by git-archive AS_CASE([$SOURCE_EPOCH], [?Format:*], # was not rewritten [AC_MSG_RESULT([no]) AC_MSG_CHECKING([for source epoch in \$SOURCE_DATE_EPOCH]) AS_IF([test "x$SOURCE_DATE_EPOCH" != x], [SOURCE_EPOCH="$SOURCE_DATE_EPOCH" AC_MSG_RESULT([yes])], [AC_MSG_RESULT([no]) AC_MSG_CHECKING([whether git log can provide a source epoch]) SOURCE_EPOCH=f${SOURCE_EPOCH#\$F} # convert into git log --pretty format SOURCE_EPOCH=$(cd "$srcdir" && git log -1 --pretty=${SOURCE_EPOCH%$} 2>/dev/null) AS_IF([test -n "$SOURCE_EPOCH"], [AC_MSG_RESULT([yes])], [AC_MSG_RESULT([no, using current time and breaking reproducibility]) SOURCE_EPOCH=$(date +%s)])])], [AC_MSG_RESULT([yes])] )]) ]) AC_MSG_NOTICE([using source epoch $($UTC_DATE_AT$SOURCE_EPOCH +'%F')]) UTC_DATE=$($UTC_DATE_AT$SOURCE_EPOCH +'%F') AC_SUBST([UTC_DATE]) AC_CONFIG_FILES([Makefile setup.py setup.cfg pcs/Makefile pcs/settings.py pcs/snmp/pcs_snmp_agent.service pcs/snmp/settings.py pcs/snmp/pcs_snmp_agent.8 pcs/pcs.8 pcs_test/Makefile pcs_test/settings.py pcsd/Makefile pcsd/pcsd.8 pcsd/pcsd-cli.rb pcsd/pcsd-ruby.service pcsd/pcsd.service pcsd/settings.rb pcsd/logrotate/pcsd]) AC_CONFIG_FILES([pcs/pcs], [chmod +x pcs/pcs]) AC_CONFIG_FILES([pcs/pcs_internal], [chmod +x pcs/pcs_internal]) AC_CONFIG_FILES([pcs/snmp/pcs_snmp_agent], [chmod +x pcs/snmp/pcs_snmp_agent]) AC_CONFIG_FILES([pcs_test/smoke.sh], [chmod +x pcs_test/smoke.sh]) AC_CONFIG_FILES([pcs_test/pcs_for_tests], [chmod +x pcs_test/pcs_for_tests]) AC_CONFIG_FILES([pcs_test/suite], [chmod +x pcs_test/suite]) AC_CONFIG_FILES([pcs_test/tools/bin_mock/pcmk/crm_resource], [chmod +x pcs_test/tools/bin_mock/pcmk/crm_resource]) AC_CONFIG_FILES([pcsd/pcsd], [chmod +x pcsd/pcsd]) AC_CONFIG_FILES([scripts/pcsd.sh], [chmod +x scripts/pcsd.sh]) AC_OUTPUT pcs-0.10.11/dev_requirements.txt000066400000000000000000000002121412706364600165650ustar00rootroot00000000000000lxml-stubs pylint==2.9.6 astroid==2.6.6 mypy==0.910 black==21.7b0 types-cryptography types-dataclasses types-pycurl types-python-dateutil pcs-0.10.11/m4/000077500000000000000000000000001412706364600127705ustar00rootroot00000000000000pcs-0.10.11/m4/ac_compare_versions.m4000066400000000000000000000025741412706364600172630ustar00rootroot00000000000000dnl @synopsis AC_COMPARE_VERSIONS([verA], [op], [verB] [, action-if-true] [, action-if-false]) dnl dnl Compare two versions based on "op" dnl dnl op can be: dnl dnl lt or < dnl le or <= dnl eq or == dnl ge or >= dnl gt or > dnl dnl @category InstalledPackages dnl @author Fabio M. Di Nitto . dnl @version 2020-11-19 dnl @license AllPermissive AC_DEFUN([AC_COMPARE_VERSIONS],[ result=false verA="$1" op="$2" verB="$3" if test "x$verA" == "x" || test "x$verB" == "x" || test "x$op" == x; then AC_MSG_ERROR([ac_compare_versions: Missing parameters]) fi case "$op" in "lt"|"<") printf '%s\n%s\n' "$verA" "$verB" | sort -V -C if test $? -eq 0 && test "$verA" != "$verB"; then result=true fi ;; "le"|"<=") printf '%s\n%s\n' "$verA" "$verB" | sort -V -C if test $? -eq 0; then result=true fi ;; "eq"|"==") if test "$verB" = "$verA"; then result=true fi ;; "ge"|">=") printf '%s\n%s\n' "$verB" "$verA" | sort -V -C if test $? -eq 0; then result=true fi ;; "gt"|">") printf '%s\n%s\n' "$verB" "$verA" | sort -V -C if test $? -eq 0 && test "$verA" != "$verB"; then result=true fi ;; *) AC_MSG_ERROR([Unknown operand: $op]) ;; esac if test "x$result" = "xtrue"; then true # need to make shell happy if 4 is empty $4 else true # need to make shell happy if 5 is empty $5 fi ]) pcs-0.10.11/m4/ac_pip_module.m4000066400000000000000000000032221412706364600160310ustar00rootroot00000000000000dnl @synopsis AC_PIP_MODULE(modname[, version][, action-if-found][, action-if-not-found][, action-if-version-mismatch][, pythonpath]) dnl dnl Checks for pip module. dnl dnl If fatal is non-empty then absence of a module will trigger an dnl error. dnl dnl @category InstalledPackages dnl @author Fabio M. Di Nitto . dnl @version 2020-11-19 dnl @license AllPermissive AC_DEFUN([AC_PIP_MODULE],[ module="$1" reqversion="$2" AC_MSG_CHECKING([pip module: $module $reqversion]) pipcommonopts="list --format freeze --disable-pip-version-check" if test -n "$6"; then pipoutput=$(PYTHONPATH=$6 $PIP $pipcommonopts | grep ^${module}==) else pipoutput=$($PIP $pipcommonopts | grep ^${module}==) fi if test "x$pipoutput" != "x"; then curversion=$(echo $pipoutput | sed -e 's#.*==##g') checkver=ok if test "x$reqversion" != x; then comp=$(echo $reqversion | cut -d " " -f 1) tmpversion=$(echo $reqversion | cut -d " " -f 2) AC_COMPARE_VERSIONS([$curversion], [$comp], [$tmpversion], [checkver=ok], [checkver=nok]) fi if test "x$checkver" = "xok"; then AC_MSG_RESULT([yes (detected: $curversion)]) eval AS_TR_CPP(HAVE_PIPMOD_$module)=yes eval AS_TR_CPP(HAVE_PIPMOD_$module_version)=$curversion $3 else if test -n "$5"; then AC_MSG_RESULT([no (detected: $curversion)]) eval AS_TR_CPP(HAVE_PIPMOD_$module)=no eval AS_TR_CPP(HAVE_PIPMOD_$module_version)=$curversion $5 else AC_MSG_ERROR([python $module version $curversion detected. Requested "$comp $tmpversion"]) fi fi else AC_MSG_RESULT([no]) eval AS_TR_CPP(HAVE_PIPMOD_$module)=no eval AS_TR_CPP(HAVE_PIPMOD_$module_version)="" $4 fi ]) pcs-0.10.11/m4/ac_ruby_gem.m4000066400000000000000000000023231412706364600155060ustar00rootroot00000000000000dnl @synopsis AC_RUBY_GEM(gem[, version][, action-if-found][, action-if-not-found][, gemhome]) dnl dnl Checks for Ruby gem. dnl dnl @category InstalledPackages dnl @author Fabio M. Di Nitto . dnl @version 2020-11-23 dnl @license AllPermissive AC_DEFUN([AC_RUBY_GEM],[ module="$1" reqversion="$2" AC_MSG_CHECKING([ruby gem: $module]) if test -n "$5"; then gemoutput=$(GEM_HOME=$5 $GEM list --local | grep "^$module " 2>/dev/null) else gemoutput=$($GEM list --local | grep "^$module " 2>/dev/null) fi if test "x$gemoutput" != "x"; then curversion=$(echo $gemoutput | sed -e 's#.*(##g' -e 's#)##'g -e 's#default: ##g') if test "x$reqversion" != x; then comp=$(echo $reqversion | cut -d " " -f 1) tmpversion=$(echo $reqversion | cut -d " " -f 2) AC_COMPARE_VERSIONS([$curversion], [$comp], [$tmpversion],, [AC_MSG_ERROR([ruby gem $module version $curversion detected. Requested "$comp $tmpversion"])]) fi AC_MSG_RESULT([yes (detected: $curversion)]) eval AS_TR_CPP(HAVE_RUBYGEM_$module)=yes eval AS_TR_CPP(HAVE_RUBYGEM_$module_version)=$curversion $3 else AC_MSG_RESULT([no]) eval AS_TR_CPP(HAVE_RUBYGEM_$module)=no eval AS_TR_CPP(HAVE_RUBYGEM_$module_version)="" $4 fi ]) pcs-0.10.11/m4/ax_prog_date.m4000066400000000000000000000114241412706364600156700ustar00rootroot00000000000000# =========================================================================== # https://www.gnu.org/software/autoconf-archive/ax_prog_date.html # =========================================================================== # # SYNOPSIS # # AX_PROG_DATE() # # DESCRIPTION # # This macro tries to determine the type of the date (1) command and some # of its non-standard capabilities. # # The type is determined as follow: # # * If the version string contains "GNU", then: # - The variable ax_cv_prog_date_gnu is set to "yes". # - The variable ax_cv_prog_date_type is set to "gnu". # # * If date supports the "-v 1d" option, then: # - The variable ax_cv_prog_date_bsd is set to "yes". # - The variable ax_cv_prog_date_type is set to "bsd". # # * If both previous checks fail, then: # - The variable ax_cv_prog_date_type is set to "unknown". # # The following capabilities of GNU date are checked: # # * If date supports the --date arg option, then: # - The variable ax_cv_prog_date_gnu_date is set to "yes". # # * If date supports the --utc arg option, then: # - The variable ax_cv_prog_date_gnu_utc is set to "yes". # # The following capabilities of BSD date are checked: # # * If date supports the -v 1d option, then: # - The variable ax_cv_prog_date_bsd_adjust is set to "yes". # # * If date supports the -r arg option, then: # - The variable ax_cv_prog_date_bsd_date is set to "yes". # # All the aforementioned variables are set to "no" before a check is # performed. # # LICENSE # # Copyright (c) 2017 Enrico M. Crisostomo # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 3 AC_DEFUN([AX_PROG_DATE], [dnl AC_CACHE_CHECK([for GNU date], [ax_cv_prog_date_gnu], [ ax_cv_prog_date_gnu=no if date --version 2>/dev/null | head -1 | grep -q GNU then ax_cv_prog_date_gnu=yes fi ]) AC_CACHE_CHECK([for BSD date], [ax_cv_prog_date_bsd], [ ax_cv_prog_date_bsd=no if date -v 1d > /dev/null 2>&1 then ax_cv_prog_date_bsd=yes fi ]) AC_CACHE_CHECK([for date type], [ax_cv_prog_date_type], [ ax_cv_prog_date_type=unknown if test "x${ax_cv_prog_date_gnu}" = "xyes" then ax_cv_prog_date_type=gnu elif test "x${ax_cv_prog_date_bsd}" = "xyes" then ax_cv_prog_date_type=bsd fi ]) AS_VAR_IF([ax_cv_prog_date_gnu], [yes], [ AC_CACHE_CHECK([whether GNU date supports --date], [ax_cv_prog_date_gnu_date], [ ax_cv_prog_date_gnu_date=no if date --date=@1512031231 > /dev/null 2>&1 then ax_cv_prog_date_gnu_date=yes fi ]) AC_CACHE_CHECK([whether GNU date supports --utc], [ax_cv_prog_date_gnu_utc], [ ax_cv_prog_date_gnu_utc=no if date --utc > /dev/null 2>&1 then ax_cv_prog_date_gnu_utc=yes fi ]) ]) AS_VAR_IF([ax_cv_prog_date_bsd], [yes], [ AC_CACHE_CHECK([whether BSD date supports -r], [ax_cv_prog_date_bsd_date], [ ax_cv_prog_date_bsd_date=no if date -r 1512031231 > /dev/null 2>&1 then ax_cv_prog_date_bsd_date=yes fi ]) ]) AS_VAR_IF([ax_cv_prog_date_bsd], [yes], [ AC_CACHE_CHECK([whether BSD date supports -v], [ax_cv_prog_date_bsd_adjust], [ ax_cv_prog_date_bsd_adjust=no if date -v 1d > /dev/null 2>&1 then ax_cv_prog_date_bsd_adjust=yes fi ]) ]) ])dnl AX_PROG_DATE pcs-0.10.11/make/000077500000000000000000000000001412706364600133655ustar00rootroot00000000000000pcs-0.10.11/make/git-version-gen000077500000000000000000000223661412706364600163410ustar00rootroot00000000000000#!/bin/sh # Print a version string. scriptversion=2018-08-31.20; # UTC # Copyright (C) 2012-2020 Red Hat, Inc. # Copyright (C) 2007-2016 Free Software Foundation, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # This script is derived from GIT-VERSION-GEN from GIT: http://git.or.cz/. # It may be run two ways: # - from a git repository in which the "git describe" command below # produces useful output (thus requiring at least one signed tag) # - from a non-git-repo directory containing a .tarball-version file, which # presumes this script is invoked like "./git-version-gen .tarball-version". # In order to use intra-version strings in your project, you will need two # separate generated version string files: # # .tarball-version - present only in a distribution tarball, and not in # a checked-out repository. Created with contents that were learned at # the last time autoconf was run, and used by git-version-gen. Must not # be present in either $(srcdir) or $(builddir) for git-version-gen to # give accurate answers during normal development with a checked out tree, # but must be present in a tarball when there is no version control system. # Therefore, it cannot be used in any dependencies. GNUmakefile has # hooks to force a reconfigure at distribution time to get the value # correct, without penalizing normal development with extra reconfigures. # # .version - present in a checked-out repository and in a distribution # tarball. Usable in dependencies, particularly for files that don't # want to depend on config.h but do want to track version changes. # Delete this file prior to any autoconf run where you want to rebuild # files to pick up a version string change; and leave it stale to # minimize rebuild time after unrelated changes to configure sources. # # As with any generated file in a VC'd directory, you should add # /.version to .gitignore, so that you don't accidentally commit it. # .tarball-version is never generated in a VC'd directory, so needn't # be listed there. # # In order to use git archive versions another two files has to be presented: # # .gitarchive-version - present in checked-out repository and git # archive tarball, but not in the distribution tarball. Used as a last # option for version. File must contain special string $Format:%d$, # which is substitued by git on archive operation. # # .gitattributes - present in checked-out repository and git archive # tarball, but not in the distribution tarball. Must set export-subst # attribute for .gitarchive-version file. # # Use the following line in your configure.ac, so that $(VERSION) will # automatically be up-to-date each time configure is run (and note that # since configure.ac no longer includes a version string, Makefile rules # should not depend on configure.ac for version updates). # # AC_INIT([GNU project], # m4_esyscmd([build-aux/git-version-gen .tarball-version]), # [bug-project@example]) # # Then use the following lines in your Makefile.am, so that .version # will be present for dependencies, and so that .version and # .tarball-version will exist in distribution tarballs. # # EXTRA_DIST = $(top_srcdir)/.version # BUILT_SOURCES = $(top_srcdir)/.version # $(top_srcdir)/.version: # echo $(VERSION) > $@-t && mv $@-t $@ # dist-hook: # echo $(VERSION) > $(distdir)/.tarball-version me=$0 version="git-version-gen $scriptversion Copyright 2011 Free Software Foundation, Inc. There is NO warranty. You may redistribute this software under the terms of the GNU General Public License. For more information about these matters, see the files named COPYING." usage="\ Usage: $me [OPTION]... \$srcdir/.tarball-version [\$srcdir/.gitarchive-version] [TAG-NORMALIZATION-SED-SCRIPT] Print a version string. Options: --prefix PREFIX prefix of git tags (default 'v') --fallback VERSION fallback version to use if \"git --version\" fails --help display this help and exit --version output version information and exit Running without arguments will suffice in most cases." prefix=v fallback= while test $# -gt 0; do case $1 in --help) echo "$usage"; exit 0;; --version) echo "$version"; exit 0;; --prefix) shift; prefix="$1";; --fallback) shift; fallback="$1";; -*) echo "$0: Unknown option '$1'." >&2 echo "$0: Try '--help' for more information." >&2 exit 1;; *) if test "x$tarball_version_file" = x; then tarball_version_file="$1" elif test "x$gitarchive_version_file" = x; then gitarchive_version_file="$1" elif test "x$tag_sed_script" = x; then tag_sed_script="$1" else echo "$0: extra non-option argument '$1'." >&2 exit 1 fi;; esac shift done if test "x$tarball_version_file" = x; then echo "$usage" exit 1 fi tag_sed_script="${tag_sed_script:-s/x/x/}" nl=' ' # Avoid meddling by environment variable of the same name. v= v_from_git= # First see if there is a tarball-only version file. # then try "git describe", then default. if test -f $tarball_version_file then v=`cat $tarball_version_file` || v= case $v in *$nl*) v= ;; # reject multi-line output [0-9]*) ;; *) v= ;; esac test "x$v" = x \ && echo "$0: WARNING: $tarball_version_file is missing or damaged" 1>&2 fi if test "x$v" != x then : # use $v # Otherwise, if there is at least one git commit involving the working # directory, and "git describe" output looks sensible, use that to # derive a version string. elif test "`git log -1 --pretty=format:x . 2>&1`" = x \ && v=`git describe --abbrev=4 --match="$prefix*" HEAD 2>/dev/null \ || git describe --abbrev=4 HEAD 2>/dev/null` \ && v=`printf '%s\n' "$v" | sed "$tag_sed_script"` \ && case $v in $prefix[0-9]*) ;; *) (exit 1) ;; esac then # Is this a new git that lists number of commits since the last # tag or the previous older version that did not? # Newer: v6.10-77-g0f8faeb # Older: v6.10-g0f8faeb case $v in *-*-*) : git describe is okay three part flavor ;; *-*) : git describe is older two part flavor # Recreate the number of commits and rewrite such that the # result is the same as if we were using the newer version # of git describe. vtag=`echo "$v" | sed 's/-.*//'` commit_list=`git rev-list "$vtag"..HEAD 2>/dev/null` \ || { commit_list=failed; echo "$0: WARNING: git rev-list failed" 1>&2; } numcommits=`echo "$commit_list" | wc -l` v=`echo "$v" | sed "s/\(.*\)-\(.*\)/\1-$numcommits-\2/"`; test "$commit_list" = failed && v=UNKNOWN ;; esac # Change the first '-' to a '.', so version-comparing tools work properly. # Remove the "g" in git describe's output string, to save a byte. v=`echo "$v" | sed 's/-/./;s/\(.*\)-g/\1-/'`; v_from_git=1 elif test "x$fallback" = x || git --version >/dev/null 2>&1; then if test -f $gitarchive_version_file then v=`sed "s/^.*tag: \($prefix[0-9)][^,)]*\).*\$/\1/" $gitarchive_version_file \ | sed "$tag_sed_script"` || exit 1 case $v in *$nl*) v= ;; # reject multi-line output $prefix[0-9]*) ;; *) v= ;; esac test -z "$v" \ && echo "$0: WARNING: $gitarchive_version_file doesn't contain valid version tag" 1>&2 \ && v=UNKNOWN elif test "x$fallback" = x; then v=UNKNOWN else v=$fallback fi else v=$fallback fi if test "x$fallback" = x -a "$v" = "UNKNOWN" then echo "$0: ERROR: Can't find valid version. Please use valid git repository," \ "released tarball or version tagged archive" 1>&2 exit 1 fi v=`echo "$v" |sed "s/^$prefix//"` # Test whether to append the "-dirty" suffix only if the version # string we're using came from git. I.e., skip the test if it's "UNKNOWN" # or if it came from .tarball-version. if test "x$v_from_git" != x; then # Don't declare a version "dirty" merely because a time stamp has changed. git update-index --refresh > /dev/null 2>&1 dirty=`exec 2>/dev/null;git diff-index --name-only HEAD` || dirty= case "$dirty" in '') ;; *) # Append the suffix only if there isn't one already. case $v in *-dirty) ;; *) v="$v-dirty" ;; esac ;; esac fi # Omit the trailing newline, so that m4_esyscmd can use the result directly. printf %s "$v" # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: pcs-0.10.11/make/gitlog-to-changelog000077500000000000000000000126451412706364600171550ustar00rootroot00000000000000eval '(exit $?0)' && eval 'exec perl -wS "$0" ${1+"$@"}' & eval 'exec perl -wS "$0" $argv:q' if 0; # Convert git log output to ChangeLog format. my $VERSION = '2009-10-30 13:46'; # UTC # The definition above must lie within the first 8 lines in order # for the Emacs time-stamp write hook (at end) to update it. # If you change this file with Emacs, please let the write hook # do its job. Otherwise, update this string manually. # Copyright (C) 2008-2010 Free Software Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Written by Jim Meyering use strict; use warnings; use Getopt::Long; use POSIX qw(strftime); (my $ME = $0) =~ s|.*/||; # use File::Coda; # http://meyering.net/code/Coda/ END { defined fileno STDOUT or return; close STDOUT and return; warn "$ME: failed to close standard output: $!\n"; $? ||= 1; } sub usage ($) { my ($exit_code) = @_; my $STREAM = ($exit_code == 0 ? *STDOUT : *STDERR); if ($exit_code != 0) { print $STREAM "Try `$ME --help' for more information.\n"; } else { print $STREAM < ChangeLog $ME -- -n 5 foo > last-5-commits-to-branch-foo EOF } exit $exit_code; } # If the string $S is a well-behaved file name, simply return it. # If it contains white space, quotes, etc., quote it, and return the new string. sub shell_quote($) { my ($s) = @_; if ($s =~ m![^\w+/.,-]!) { # Convert each single quote to '\'' $s =~ s/\'/\'\\\'\'/g; # Then single quote the string. $s = "'$s'"; } return $s; } sub quoted_cmd(@) { return join (' ', map {shell_quote $_} @_); } { my $since_date = '1970-01-01 UTC'; my $format_string = '%s%n%b%n'; GetOptions ( help => sub { usage 0 }, version => sub { print "$ME version $VERSION\n"; exit }, 'since=s' => \$since_date, 'format=s' => \$format_string, ) or usage 1; my @cmd = (qw (git log --log-size), "--since=$since_date", '--pretty=format:%ct %an <%ae>%n%n'.$format_string, @ARGV); open PIPE, '-|', @cmd or die ("$ME: failed to run `". quoted_cmd (@cmd) ."': $!\n" . "(Is your Git too old? Version 1.5.1 or later is required.)\n"); my $prev_date_line = ''; while (1) { defined (my $in = ) or last; $in =~ /^log size (\d+)$/ or die "$ME:$.: Invalid line (expected log size):\n$in"; my $log_nbytes = $1; my $log; my $n_read = read PIPE, $log, $log_nbytes; $n_read == $log_nbytes or die "$ME:$.: unexpected EOF\n"; my @line = split "\n", $log; my $author_line = shift @line; defined $author_line or die "$ME:$.: unexpected EOF\n"; $author_line =~ /^(\d+) (.*>)$/ or die "$ME:$.: Invalid line " . "(expected date/author/email):\n$author_line\n"; my $date_line = sprintf "%s $2\n", strftime ("%F", localtime ($1)); # If this line would be the same as the previous date/name/email # line, then arrange not to print it. if ($date_line ne $prev_date_line) { $prev_date_line eq '' or print "\n"; print $date_line; } $prev_date_line = $date_line; # Omit "Signed-off-by..." lines. @line = grep !/^Signed-off-by: .*>$/, @line; # If there were any lines if (@line == 0) { warn "$ME: warning: empty commit message:\n $date_line\n"; } else { # Remove leading and trailing blank lines. while ($line[0] =~ /^\s*$/) { shift @line; } while ($line[$#line] =~ /^\s*$/) { pop @line; } # Prefix each non-empty line with a TAB. @line = map { length $_ ? "\t$_" : '' } @line; print "\n", join ("\n", @line), "\n"; } defined ($in = ) or last; $in ne "\n" and die "$ME:$.: unexpected line:\n$in"; } close PIPE or die "$ME: error closing pipe from " . quoted_cmd (@cmd) . "\n"; # FIXME-someday: include $PROCESS_STATUS in the diagnostic } # Local Variables: # mode: perl # indent-tabs-mode: nil # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "my $VERSION = '" # time-stamp-format: "%:y-%02m-%02d %02H:%02M" # time-stamp-time-zone: "UTC" # time-stamp-end: "'; # UTC" # End: pcs-0.10.11/make/release.mk000066400000000000000000000037401412706364600153420ustar00rootroot00000000000000# to build official release tarballs, handle tagging and publish. project = pcs deliverables = $(project)-$(version).sha256 \ $(project)-$(version).tar.bz2 \ $(project)-$(version).tar.gz \ $(project)-$(version).tar.xz changelogfile = CHANGELOG.md .PHONY: all all: tag tarballs .PHONY: checks checks: ifeq (,$(version)) @echo ERROR: need to define version= @exit 1 endif @if [ ! -d .git ]; then \ echo This script needs to be executed from top level cluster git tree; \ exit 1; \ fi @if [ -n "$$(git status --untracked-files=no --porcelain 2>/dev/null)" ]; then \ echo Stash or rollback the uncommitted changes in git first; \ exit 1; \ fi .PHONY: setup setup: checks ./autogen.sh ./configure $(configure_options) $(MAKE) maintainer-clean .PHONY: tag tag: setup ./tag-$(version) tag-$(version): ifeq (,$(release)) @echo Building test release $(version), no tagging echo '$(version)' > .tarball-version else # following will be captured by git-version-gen automatically git tag -a -m "v$(version) release" v$(version) HEAD @touch $@ endif .PHONY: tarballs tarballs: tag ./autogen.sh ./configure $(configure_options) $(MAKE) distcheck "DISTCHECK_CONFIGURE_FLAGS=$(configure_options)" .PHONY: sha256 sha256: $(project)-$(version).sha256 $(deliverables): tarballs $(project)-$(version).sha256: # checksum anything from deliverables except for in-prep checksums file sha256sum $(deliverables:$@=) | sort -k2 > $@ .PHONY: publish publish: ifeq (,$(release)) @echo Building test release $(version), no publishing! else git push --follow-tags origin @echo Hey you! Yeah you, looking somewhere else! @echo Remember to notify cluster-devel/RH and users/ClusterLabs MLs. endif .PHONY: bump-changelog bump-changelog: checks sed -i 's/\#\# \[Unreleased\]/\#\# \[$(version)\] - $(shell date +%Y-%m-%d)/' \ $(changelogfile) git commit -a -m "Bumped to $(version)" .PHONY: clean clean: rm -rf $(project)* tag-* .tarball-version pcs-0.10.11/mypy.ini000066400000000000000000000054151412706364600141540ustar00rootroot00000000000000[mypy] mypy_path = ./pcs/bundled/packages # Modules and packages with full support have more strict checks enabled [mypy-pcs.cli.common.printable_tree] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.cli.common.routing] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.cli.nvset] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.cli.reports.*] disallow_untyped_defs = True [mypy-pcs.cli.resource.relations] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.cli.routing.*] disallow_untyped_defs = True [mypy-pcs.cli.rule] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.cli.tag.command] disallow_untyped_defs = True [mypy-pcs.common.interface.*] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.common.pacemaker.*] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.common.reports.*] disallow_untyped_defs = True [mypy-pcs.common.reports.constraints.*] # this is a temporary solution for legacy code disallow_untyped_defs = False [mypy-pcs.common.ssl] disallow_untyped_defs = True [mypy-pcs.common.types] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.common.validate] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.common.services.*] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.entry_points.*] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.lib.cib.nvpair_multi] disallow_untyped_defs = True [mypy-pcs.lib.cib.resource.relations] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.lib.cib.rule.*] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.lib.cib.tag] disallow_untyped_defs = True [mypy-pcs.lib.commands.cib_options] disallow_untyped_defs = True [mypy-pcs.lib.commands.dr] disallow_untyped_defs = True [mypy-pcs.lib.commands.status] disallow_untyped_defs = True [mypy-pcs.lib.commands.tag] disallow_untyped_defs = True [mypy-pcs.lib.dr.*] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.lib.resource_agent] disallow_untyped_defs = True disallow_untyped_calls = True [mypy-pcs.lib.services] disallow_untyped_defs = True [mypy-pcs.lib.validate] disallow_untyped_defs = True disallow_untyped_calls = True # Modules with issues in typehinting: # TODO: fix [mypy-pcs.daemon.*] ignore_errors = True # We don't want to type check tests [mypy-pcs_test.*] ignore_errors = True # External libraries [mypy-clufter.*] ignore_missing_imports = True [mypy-dacite] ignore_missing_imports = True [mypy-distro] ignore_missing_imports = True [mypy-pyagentx] ignore_errors = True ignore_missing_imports = True [mypy-pyparsing] ignore_missing_imports = True [mypy-xml.dom.*] ignore_missing_imports = True pcs-0.10.11/parallel_tests_requirements.txt000066400000000000000000000000511412706364600210260ustar00rootroot00000000000000concurrencytest testtools python-subunit pcs-0.10.11/pcs/000077500000000000000000000000001412706364600132355ustar00rootroot00000000000000pcs-0.10.11/pcs/COPYING000066400000000000000000000432541412706364600143000ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. pcs-0.10.11/pcs/Makefile.am000066400000000000000000000224441412706364600152770ustar00rootroot00000000000000MAINTAINERCLEANFILES = Makefile.in # install bashcompletiondir = $(prefix)/share/bash-completion/completions/ dist_bashcompletion_DATA= bash_completion/pcs snmpmibsdir = $(SNMP_MIB_DIR) dist_snmpmibs_DATA = snmp/mibs/PCMK-PCS-MIB.txt \ snmp/mibs/PCMK-PCS-V1-MIB.txt man8_MANS = pcs.8 snmp/pcs_snmp_agent.8 sysconfigdir = $(CONF_DIR) dist_sysconfig_DATA = snmp/conf/pcs_snmp_agent servicedir = $(SYSTEMD_UNIT_DIR) service_DATA = snmp/pcs_snmp_agent.service EXTRA_DIST = \ acl.py \ alert.py \ app.py \ cli/booth/command.py \ cli/booth/env.py \ cli/booth/__init__.py \ cli/cluster/command.py \ cli/cluster/__init__.py \ cli/common/capabilities.py \ cli/common/completion.py \ cli/common/env_cli.py \ cli/common/errors.py \ cli/common/__init__.py \ cli/common/lib_wrapper.py \ cli/common/middleware.py \ cli/common/parse_args.py \ cli/common/printable_tree.py \ cli/common/routing.py \ cli/common/tools.py \ cli/constraint_colocation/command.py \ cli/constraint_colocation/__init__.py \ cli/constraint/command.py \ cli/constraint/__init__.py \ cli/constraint_order/command.py \ cli/constraint_order/__init__.py \ cli/constraint/parse_args.py \ cli/constraint_ticket/command.py \ cli/constraint_ticket/__init__.py \ cli/constraint_ticket/parse_args.py \ cli/dr.py \ client.py \ cli/fencing_topology.py \ cli/file/__init__.py \ cli/file/metadata.py \ cli/__init__.py \ cli/nvset.py \ cli/reports/__init__.py \ cli/reports/messages.py \ cli/reports/output.py \ cli/reports/processor.py \ cli/resource/__init__.py \ cli/resource/parse_args.py \ cli/resource/relations.py \ cli/routing/acl.py \ cli/routing/alert.py \ cli/routing/booth.py \ cli/routing/client.py \ cli/routing/cluster.py \ cli/routing/config.py \ cli/routing/constraint.py \ cli/routing/dr.py \ cli/routing/host.py \ cli/routing/__init__.py \ cli/routing/node.py \ cli/routing/pcsd.py \ cli/routing/prop.py \ cli/routing/qdevice.py \ cli/routing/quorum.py \ cli/routing/resource.py \ cli/routing/status.py \ cli/routing/stonith.py \ cli/routing/tag.py \ cli/rule.py \ cli/tag/command.py \ cli/tag/__init__.py \ cluster.py \ common/corosync_conf.py \ common/const.py \ common/dr.py \ common/fencing_topology.py \ common/file.py \ common/file_type_codes.py \ common/host.py \ common/__init__.py \ common/communication/__init__.py \ common/communication/const.py \ common/communication/dto.py \ common/communication/types.py \ common/interface/dto.py \ common/interface/__init__.py \ common/node_communicator.py \ common/pacemaker/__init__.py \ common/pacemaker/nvset.py \ common/pacemaker/resource/__init__.py \ common/pacemaker/resource/relations.py \ common/pacemaker/role.py \ common/pacemaker/rule.py \ common/pcs_pycurl.py \ common/reports/codes.py \ common/reports/conversions.py \ common/reports/const.py \ common/reports/constraints/colocation.py \ common/reports/constraints/common.py \ common/reports/constraints/__init__.py \ common/reports/constraints/order.py \ common/reports/constraints/ticket.py \ common/reports/dto.py \ common/reports/__init__.py \ common/reports/item.py \ common/reports/messages.py \ common/reports/processor.py \ common/reports/types.py \ common/services/common.py \ common/services/drivers/__init__.py \ common/services/drivers/systemd.py \ common/services/drivers/sysvinit_rhel.py \ common/services_dto.py \ common/services/errors.py \ common/services/__init__.py \ common/services/interfaces/executor.py \ common/services/interfaces/__init__.py \ common/services/interfaces/manager.py \ common/services/types.py \ common/ssl.py \ common/str_tools.py \ common/tools.py \ common/types.py \ common/validate.py \ config.py \ constraint.py \ daemon/app/common.py \ daemon/app/__init__.py \ daemon/app/session.py \ daemon/app/sinatra_common.py \ daemon/app/sinatra_remote.py \ daemon/app/sinatra_ui.py \ daemon/app/test/__init__.py \ daemon/app/ui_common.py \ daemon/app/ui.py \ daemon/auth.py \ daemon/env.py \ daemon/http_server.py \ daemon/__init__.py \ daemon/log.py \ daemon/ruby_pcsd.py \ daemon/run.py \ daemon/session.py \ daemon/ssl.py \ daemon/systemd.py \ entry_points/cli.py \ entry_points/common.py \ entry_points/daemon.py \ entry_points/__init__.py \ entry_points/internal.py \ entry_points/snmp_agent.py \ host.py \ __init__.py \ lib/booth/config_facade.py \ lib/booth/config_files.py \ lib/booth/config_parser.py \ lib/booth/config_validators.py \ lib/booth/constants.py \ lib/booth/env.py \ lib/booth/__init__.py \ lib/booth/resource.py \ lib/booth/status.py \ lib/booth/sync.py \ lib/cib/acl.py \ lib/cib/alert.py \ lib/cib/constraint/colocation.py \ lib/cib/constraint/constraint.py \ lib/cib/constraint/__init__.py \ lib/cib/constraint/order.py \ lib/cib/constraint/resource_set.py \ lib/cib/constraint/ticket.py \ lib/cib/fencing_topology.py \ lib/cib/__init__.py \ lib/cib/node.py \ lib/cib/nvpair_multi.py \ lib/cib/nvpair.py \ lib/cib/resource/bundle.py \ lib/cib/resource/clone.py \ lib/cib/resource/common.py \ lib/cib/resource/group.py \ lib/cib/resource/guest_node.py \ lib/cib/resource/hierarchy.py \ lib/cib/resource/__init__.py \ lib/cib/resource/operations.py \ lib/cib/resource/primitive.py \ lib/cib/resource/relations.py \ lib/cib/resource/remote_node.py \ lib/cib/rule/cib_to_dto.py \ lib/cib/rule/cib_to_str.py \ lib/cib/rule/expression_part.py \ lib/cib/rule/in_effect.py \ lib/cib/rule/__init__.py \ lib/cib/rule/parsed_to_cib.py \ lib/cib/rule/parser.py \ lib/cib/rule/tools.py \ lib/cib/rule/validator.py \ lib/cib/sections.py \ lib/cib/status.py \ lib/cib/stonith.py \ lib/cib/tag.py \ lib/cib/tools.py \ lib/commands/acl.py \ lib/commands/alert.py \ lib/commands/booth.py \ lib/commands/cib_options.py \ lib/commands/cluster.py \ lib/commands/constraint/colocation.py \ lib/commands/constraint/common.py \ lib/commands/constraint/__init__.py \ lib/commands/constraint/order.py \ lib/commands/constraint/ticket.py \ lib/commands/dr.py \ lib/commands/fencing_topology.py \ lib/commands/__init__.py \ lib/commands/node.py \ lib/commands/pcsd.py \ lib/commands/qdevice.py \ lib/commands/quorum.py \ lib/commands/remote_node.py \ lib/commands/resource_agent.py \ lib/commands/resource.py \ lib/commands/sbd.py \ lib/commands/scsi.py \ lib/commands/services.py \ lib/commands/status.py \ lib/commands/stonith_agent.py \ lib/commands/stonith.py \ lib/commands/tag.py \ lib/communication/booth.py \ lib/communication/cluster.py \ lib/communication/corosync.py \ lib/communication/__init__.py \ lib/communication/nodes.py \ lib/communication/qdevice_net.py \ lib/communication/qdevice.py \ lib/communication/sbd.py \ lib/communication/scsi.py \ lib/communication/status.py \ lib/communication/tools.py \ lib/corosync/config_facade.py \ lib/corosync/config_parser.py \ lib/corosync/config_validators.py \ lib/corosync/constants.py \ lib/corosync/__init__.py \ lib/corosync/live.py \ lib/corosync/node.py \ lib/corosync/qdevice_client.py \ lib/corosync/qdevice_net.py \ lib/dr/config/facade.py \ lib/dr/config/__init__.py \ lib/dr/env.py \ lib/dr/__init__.py \ lib/env.py \ lib/errors.py \ lib/exchange_formats.md \ lib/external.py \ lib/file/__init__.py \ lib/file/instance.py \ lib/file/metadata.py \ lib/file/raw_file.py \ lib/file/toolbox.py \ lib/__init__.py \ lib/interface/config.py \ lib/interface/__init__.py \ lib/node_communication_format.py \ lib/node_communication.py \ lib/node.py \ lib/pacemaker/api_result.py \ lib/pacemaker/__init__.py \ lib/pacemaker/live.py \ lib/pacemaker/simulate.py \ lib/pacemaker/state.py \ lib/pacemaker/values.py \ lib/resource_agent.py \ lib/sbd.py \ lib/services.py \ lib/tools.py \ lib/validate.py \ lib/xml_tools.py \ node.py \ pcsd.py \ pcs_internal.py \ prop.py \ qdevice.py \ quorum.py \ resource.py \ rule.py \ snmp/agentx/__init__.py \ snmp/agentx/types.py \ snmp/agentx/updater.py \ snmp/conf/pcs_snmp_agent \ snmp/__init__.py \ snmp/mibs/PCMK-PCS-MIB.txt \ snmp/mibs/PCMK-PCS-V1-MIB.txt \ snmp/pcs_snmp_agent.8 \ snmp/pcs_snmp_agent.py \ snmp/updaters/__init__.py \ snmp/updaters/v1.py \ status.py \ stonith.py \ usage.py \ utils.py pcs-0.10.11/pcs/__init__.py000066400000000000000000000000001412706364600153340ustar00rootroot00000000000000pcs-0.10.11/pcs/acl.py000066400000000000000000000216311412706364600143510ustar00rootroot00000000000000from pcs import ( prop, utils, ) from pcs.cli.common.errors import CmdLineInputError from pcs.cli.reports.output import warn from pcs.common.str_tools import indent from pcs.lib.pacemaker.values import is_true def _print_list_of_objects(obj_list, transformation_fn): out = [] for obj in obj_list: out += transformation_fn(obj) if out: print("\n".join(out)) def show_acl_config(lib, argv, modifiers): warn( "This command is deprecated and will be removed. " "Please use 'pcs acl config' instead.", stderr=True, ) return acl_config(lib, argv, modifiers) def acl_config(lib, argv, modifiers): """ Options: * -f - CIB file """ # TODO move to lib once lib supports cluster properties # enabled/disabled should be part of the structure returned # by lib.acl.get_config modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() properties = utils.get_set_properties( defaults=prop.get_default_properties() ) acl_enabled = properties.get("enable-acl", "").lower() if is_true(acl_enabled): print("ACLs are enabled") else: print("ACLs are disabled, run 'pcs acl enable' to enable") print() data = lib.acl.get_config() _print_list_of_objects(data.get("target_list", []), target_to_str) _print_list_of_objects(data.get("group_list", []), group_to_str) _print_list_of_objects(data.get("role_list", []), role_to_str) def acl_enable(lib, argv, modifiers): """ Options: * -f - CIB file """ # TODO move to lib once lib supports cluster properties modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() prop.set_property(lib, ["enable-acl=true"], modifiers.get_subset("-f")) def acl_disable(lib, argv, modifiers): """ Options: * -f - CIB file """ # TODO move to lib once lib supports cluster properties modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() prop.set_property(lib, ["enable-acl=false"], modifiers.get_subset("-f")) def user_create(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() user_name, role_list = argv[0], argv[1:] lib.acl.create_target(user_name, role_list) def user_delete(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 1: raise CmdLineInputError() lib.acl.remove_target(argv[0]) def group_create(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() group_name, role_list = argv[0], argv[1:] lib.acl.create_group(group_name, role_list) def group_delete(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 1: raise CmdLineInputError() lib.acl.remove_group(argv[0]) def argv_to_permission_info_list(argv): """ Commandline options: no options """ if len(argv) % 3 != 0: raise CmdLineInputError() # wrapping by list, # because in python3 zip() returns an iterator instead of a list # and the loop below makes iteration over it permission_info_list = list( zip( [permission.lower() for permission in argv[::3]], [scope_type.lower() for scope_type in argv[1::3]], argv[2::3], ) ) for permission, scope_type, dummy_scope in permission_info_list: if permission not in ["read", "write", "deny"] or scope_type not in [ "xpath", "id", ]: raise CmdLineInputError() return permission_info_list def role_create(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() role_id = argv.pop(0) description = "" desc_key = "description=" if argv and argv[0].startswith(desc_key) and len(argv[0]) > len(desc_key): description = argv.pop(0)[len(desc_key) :] permission_info_list = argv_to_permission_info_list(argv) lib.acl.create_role(role_id, permission_info_list, description) def role_delete(lib, argv, modifiers): """ Options: * -f - CIB file * --autodelete - autodelete empty targets, groups """ modifiers.ensure_only_supported("-f", "--autodelete") if len(argv) != 1: raise CmdLineInputError() lib.acl.remove_role( argv[0], autodelete_users_groups=modifiers.get("--autodelete") ) def _role_assign_unassign(argv, keyword, not_specific_fn, user_fn, group_fn): """ Commandline options: no options """ # TODO deprecate ambiguous syntax: # - pcs role assign [to] [user|group] # - pcs role unassign [from] [user|group] # The problem is, that 'user|group' is optional, therefore pcs guesses # which one it is. # We haven't deprecated it yet, as groups don't work in pacemaker, # therefore there would be no benefit from deprecating it. argv_len = len(argv) if argv_len < 2: raise CmdLineInputError() if argv_len == 2: not_specific_fn(*argv) elif argv_len == 3: role_id, something, ug_id = argv if something == keyword: not_specific_fn(role_id, ug_id) elif something == "user": user_fn(role_id, ug_id) elif something == "group": group_fn(role_id, ug_id) else: raise CmdLineInputError() elif argv_len == 4 and argv[1] == keyword and argv[2] in ["group", "user"]: role_id, _, user_group, ug_id = argv if user_group == "user": user_fn(role_id, ug_id) else: group_fn(role_id, ug_id) else: raise CmdLineInputError() def role_assign(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") _role_assign_unassign( argv, "to", # TODO deprecate # Use assign_role_to_target or assign_role_to_group instead. # We haven't deprecated it yet, as groups don't work in pacemaker, # therefore there would be no benefit from deprecating it. lib.acl.assign_role_not_specific, lib.acl.assign_role_to_target, lib.acl.assign_role_to_group, ) def role_unassign(lib, argv, modifiers): """ Options: * -f - CIB file * --autodelete - autodelete empty targets, groups """ modifiers.ensure_only_supported("-f", "--autodelete") _role_assign_unassign( argv, "from", # TODO deprecate # Use unassign_role_from_target or unassign_role_from_group instead. # We haven't deprecated it yet, as groups don't work in pacemaker, # therefore there would be no benefit from deprecating it. lambda role_id, ug_id: lib.acl.unassign_role_not_specific( role_id, ug_id, modifiers.get("--autodelete") ), lambda role_id, ug_id: lib.acl.unassign_role_from_target( role_id, ug_id, modifiers.get("--autodelete") ), lambda role_id, ug_id: lib.acl.unassign_role_from_group( role_id, ug_id, modifiers.get("--autodelete") ), ) def permission_add(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) < 4: raise CmdLineInputError() role_id, argv_next = argv[0], argv[1:] lib.acl.add_permission(role_id, argv_to_permission_info_list(argv_next)) def run_permission_delete(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 1: raise CmdLineInputError() lib.acl.remove_permission(argv[0]) def _target_group_to_str(type_name, obj): return ["{0}: {1}".format(type_name.title(), obj.get("id"))] + indent( [" ".join(["Roles:"] + obj.get("role_list", []))] ) def target_to_str(target): return _target_group_to_str("user", target) def group_to_str(group): return _target_group_to_str("group", group) def role_to_str(role): out = [] if role.get("description"): out.append("Description: {0}".format(role.get("description"))) out += map(_permission_to_str, role.get("permission_list", [])) return ["Role: {0}".format(role.get("id"))] + indent(out) def _permission_to_str(permission): out = ["Permission:", permission.get("kind")] if permission.get("xpath") is not None: out += ["xpath", permission.get("xpath")] elif permission.get("reference") is not None: out += ["id", permission.get("reference")] out.append("({0})".format(permission.get("id"))) return " ".join(out) pcs-0.10.11/pcs/alert.py000066400000000000000000000151421412706364600147210ustar00rootroot00000000000000import json from functools import partial from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import prepare_options, group_by_keywords from pcs.cli.reports.output import warn from pcs.common.str_tools import indent parse_cmd_sections = partial(group_by_keywords, implicit_first_group_key="main") def ensure_only_allowed_options(parameter_dict, allowed_list): for arg, value in parameter_dict.items(): if arg not in allowed_list: raise CmdLineInputError( "Unexpected parameter '{0}={1}'".format(arg, value) ) def alert_add(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() sections = parse_cmd_sections(argv, set(["options", "meta"])) main_args = prepare_options(sections["main"]) ensure_only_allowed_options(main_args, ["id", "description", "path"]) lib.alert.create_alert( main_args.get("id", None), main_args.get("path", None), prepare_options(sections["options"]), prepare_options(sections["meta"]), main_args.get("description", None), ) def alert_update(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() alert_id = argv[0] sections = parse_cmd_sections(argv[1:], set(["options", "meta"])) main_args = prepare_options(sections["main"]) ensure_only_allowed_options(main_args, ["description", "path"]) lib.alert.update_alert( alert_id, main_args.get("path", None), prepare_options(sections["options"]), prepare_options(sections["meta"]), main_args.get("description", None), ) def alert_remove(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() lib.alert.remove_alert(argv) def recipient_add(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) * --force - allows not unique recipient values """ modifiers.ensure_only_supported("-f", "--force") if len(argv) < 2: raise CmdLineInputError() alert_id = argv[0] sections = parse_cmd_sections(argv[1:], set(["options", "meta"])) main_args = prepare_options(sections["main"]) ensure_only_allowed_options(main_args, ["description", "id", "value"]) lib.alert.add_recipient( alert_id, main_args.get("value", None), prepare_options(sections["options"]), prepare_options(sections["meta"]), recipient_id=main_args.get("id", None), description=main_args.get("description", None), allow_same_value=modifiers.get("--force"), ) def recipient_update(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) * --force - allows not unique recipient values """ modifiers.ensure_only_supported("-f", "--force") if not argv: raise CmdLineInputError() recipient_id = argv[0] sections = parse_cmd_sections(argv[1:], set(["options", "meta"])) main_args = prepare_options(sections["main"]) ensure_only_allowed_options(main_args, ["description", "value"]) lib.alert.update_recipient( recipient_id, prepare_options(sections["options"]), prepare_options(sections["meta"]), recipient_value=main_args.get("value", None), description=main_args.get("description", None), allow_same_value=modifiers.get("--force"), ) def recipient_remove(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() lib.alert.remove_recipient(argv) def _nvset_to_str(nvset_obj): # TODO duplicite to pcs.resource._nvpairs_strings key_val = { nvpair_obj["name"]: nvpair_obj["value"] for nvpair_obj in nvset_obj } output = [] for name, value in sorted(key_val.items()): if " " in value: value = f'"{value}"' output.append(f"{name}={value}") return " ".join(output) def __description_attributes_to_str(obj): output = [] if obj.get("description"): output.append("Description: {desc}".format(desc=obj["description"])) if obj.get("instance_attributes"): output.append( "Options: {attributes}".format( attributes=_nvset_to_str(obj["instance_attributes"]) ) ) if obj.get("meta_attributes"): output.append( "Meta options: {attributes}".format( attributes=_nvset_to_str(obj["meta_attributes"]) ) ) return output def _alert_to_str(alert): content = [] content.extend(__description_attributes_to_str(alert)) recipients = [] for recipient in alert.get("recipient_list", []): recipients.extend(_recipient_to_str(recipient)) if recipients: content.append("Recipients:") content.extend(indent(recipients, 1)) return [ "Alert: {alert_id} (path={path})".format( alert_id=alert["id"], path=alert["path"] ) ] + indent(content, 1) def _recipient_to_str(recipient): return [ "Recipient: {id} (value={value})".format( value=recipient["value"], id=recipient["id"] ) ] + indent(__description_attributes_to_str(recipient), 1) def print_alert_show(lib, argv, modifiers): warn( "This command is deprecated and will be removed. " "Please use 'pcs alert config' instead.", stderr=True, ) return print_alert_config(lib, argv, modifiers) def print_alert_config(lib, argv, modifiers): """ Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() print("\n".join(alert_config_lines(lib))) def alert_config_lines(lib): lines = ["Alerts:"] alert_list = lib.alert.get_all_alerts() if alert_list: for alert in alert_list: lines.extend(indent(_alert_to_str(alert), 1)) else: lines.append(" No alerts defined") return lines def print_alerts_in_json(lib, argv, modifiers): """ This is used only by pcsd, will be removed in new architecture Options: * -f - CIB file (in lib wrapper) """ modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() print(json.dumps(lib.alert.get_all_alerts())) pcs-0.10.11/pcs/app.py000066400000000000000000000220161412706364600143700ustar00rootroot00000000000000import getopt import os import sys import logging from pcs import ( settings, usage, utils, ) from pcs.cli.common import ( capabilities, completion, errors, parse_args, routing, ) from pcs.cli.reports import process_library_reports, output from pcs.cli.routing import ( acl, alert, booth, client, cluster, config, constraint, dr, host, node, pcsd, prop, qdevice, quorum, resource, status, stonith, tag, ) from pcs.lib.errors import LibraryError def _non_root_run(argv_cmd): """ This function will run commands which has to be run as root for users which are not root. If it required to run such command as root it will do that by sending it to the local pcsd and then it will exit. """ # matching the commands both in here and in pcsd expects -o and --options # to be at the end of a command argv_and_options = argv_cmd[:] for option, value in utils.pcs_options.items(): if parse_args.is_option_expecting_value(option): argv_and_options.extend([option, value]) else: argv_and_options.append(option) # specific commands need to be run under root account, pass them to pcsd # don't forget to allow each command in pcsd.rb in "post /run_pcs do" root_command_list = [ ["cluster", "auth", "..."], ["cluster", "corosync", "..."], ["cluster", "destroy", "..."], ["cluster", "disable", "..."], ["cluster", "enable", "..."], ["cluster", "node", "..."], ["cluster", "pcsd-status", "..."], # TODO deprecated, remove command ["cluster", "start", "..."], ["cluster", "stop", "..."], ["cluster", "sync", "..."], # ['config', 'restore', '...'], # handled in config.config_restore ["host", "auth", "..."], ["host", "deauth", "..."], ["pcsd", "deauth", "..."], ["pcsd", "status", "..."], ["pcsd", "sync-certificates"], ["quorum", "device", "status", "..."], ["quorum", "status", "..."], ["status"], ["status", "corosync", "..."], ["status", "pcsd", "..."], ["status", "quorum", "..."], ["status", "status", "..."], ] for root_cmd in root_command_list: if (argv_and_options == root_cmd) or ( root_cmd[-1] == "..." and argv_and_options[: len(root_cmd) - 1] == root_cmd[:-1] ): # handle interactivity of 'pcs cluster auth' if argv_and_options[0:2] in [["cluster", "auth"], ["host", "auth"]]: if "-u" not in utils.pcs_options: username = utils.get_terminal_input("Username: ") argv_and_options.extend(["-u", username]) if "-p" not in utils.pcs_options: password = utils.get_terminal_password() argv_and_options.extend(["-p", password]) # call the local pcsd err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( argv_and_options ) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) if std_out.strip(): print(std_out) if std_err.strip(): sys.stderr.write(std_err) sys.exit(exitcode) usefile = False filename = "" def main(argv=None): # pylint: disable=global-statement # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements if completion.has_applicable_environment(os.environ): print( completion.make_suggestions( os.environ, usage.generate_completion_tree_from_usage() ) ) sys.exit() argv = argv if argv else sys.argv[1:] utils.subprocess_setup() global filename, usefile utils.pcs_options = {} # we want to support optional arguments for --wait, so if an argument # is specified with --wait (ie. --wait=30) then we use them waitsecs = None new_argv = [] for arg in argv: if arg.startswith("--wait="): tempsecs = arg.replace("--wait=", "") if tempsecs: waitsecs = tempsecs arg = "--wait" new_argv.append(arg) argv = new_argv try: if "--" in argv: pcs_options, argv = getopt.gnu_getopt( argv, parse_args.PCS_SHORT_OPTIONS, parse_args.PCS_LONG_OPTIONS ) else: # DEPRECATED # TODO remove # We want to support only the -- version ( args_without_negative_nums, args_filtered_out, ) = parse_args.filter_out_non_option_negative_numbers(argv) if args_filtered_out: options_str = "', '".join(args_filtered_out) output.warn( f"Using '{options_str}' without '--' is deprecated, those " "parameters will be considered position independent " "options in future pcs versions" ) pcs_options, dummy_argv = getopt.gnu_getopt( args_without_negative_nums, parse_args.PCS_SHORT_OPTIONS, parse_args.PCS_LONG_OPTIONS, ) argv = parse_args.filter_out_options(argv) except getopt.GetoptError as err: usage.main() print(err) if err.opt in {"V", "clone", "device", "watchdog"}: # Print error messages which point users to the changes section in # pcs manpage. # TODO remove # To be removed in the next significant version. print(f"Hint: {errors.HINT_SYNTAX_CHANGE}") sys.exit(1) full = False for option, dummy_value in pcs_options: if option == "--full": full = True break for opt, val in pcs_options: if not opt in utils.pcs_options: utils.pcs_options[opt] = val else: # If any options are a list then they've been entered twice which # isn't valid utils.err("%s can only be used once" % opt) if opt in ("-h", "--help"): if not argv: usage.main() sys.exit() else: argv = [argv[0], "help"] + argv[1:] elif opt == "-f": usefile = True filename = val utils.usefile = usefile utils.filename = filename elif opt == "--corosync_conf": settings.corosync_conf_file = val elif opt == "--version": print(settings.pcs_version) if full: print( " ".join( sorted( [ feat["id"] for feat in capabilities.get_pcs_capabilities() ] ) ) ) sys.exit() elif opt == "--fullhelp": usage.full_usage() sys.exit() elif opt == "--wait": utils.pcs_options[opt] = waitsecs elif opt == "--request-timeout": request_timeout_valid = False try: timeout = int(val) if timeout > 0: utils.pcs_options[opt] = timeout request_timeout_valid = True except ValueError: pass if not request_timeout_valid: utils.err( ( "'{0}' is not a valid --request-timeout value, use " "a positive integer" ).format(val) ) # initialize logger logging.getLogger("pcs") if (os.getuid() != 0) and (argv and argv[0] != "help") and not usefile: _non_root_run(argv) cmd_map = { "resource": resource.resource_cmd, "cluster": cluster.cluster_cmd, "stonith": stonith.stonith_cmd, "property": prop.property_cmd, "constraint": constraint.constraint_cmd, "acl": acl.acl_cmd, "status": status.status_cmd, "config": config.config_cmd, "pcsd": pcsd.pcsd_cmd, "node": node.node_cmd, "quorum": quorum.quorum_cmd, "qdevice": qdevice.qdevice_cmd, "alert": alert.alert_cmd, "booth": booth.booth_cmd, "host": host.host_cmd, "client": client.client_cmd, "dr": dr.dr_cmd, "tag": tag.tag_cmd, "help": lambda lib, argv, modifiers: usage.main(), } try: routing.create_router(cmd_map, [])( utils.get_library_wrapper(), argv, utils.get_input_modifiers() ) except LibraryError as e: process_library_reports(e.args) except errors.CmdLineInputError: if argv and argv[0] in cmd_map: usage.show(argv[0], []) else: usage.main() sys.exit(1) pcs-0.10.11/pcs/bash_completion/000077500000000000000000000000001412706364600164035ustar00rootroot00000000000000pcs-0.10.11/pcs/bash_completion/pcs000066400000000000000000000020051412706364600171100ustar00rootroot00000000000000# bash completion for pcs _pcs_completion(){ LENGTHS=() for WORD in "${COMP_WORDS[@]}"; do LENGTHS+=(${#WORD}) done COMPREPLY=( $( \ env COMP_WORDS="${COMP_WORDS[*]}" \ COMP_LENGTHS="${LENGTHS[*]}" \ COMP_CWORD=$COMP_CWORD \ PCS_AUTO_COMPLETE=1 pcs \ ) ) #examples what we get: #pcs #COMP_WORDS: pcs COMP_LENGTHS: 3 #pcs co #COMP_WORDS: pcs co COMP_LENGTHS: 3 2 # pcs config #COMP_WORDS: pcs config COMP_LENGTHS: 3 6 # pcs config " #COMP_WORDS: pcs config " COMP_LENGTHS: 3 6 4 # pcs config "'\\n #COMP_WORDS: pcs config "'\\n COMP_LENGTHS: 3 6 5'" } # -o default # Use readline's default filename completion if the compspec generates no # matches. # -F function # The shell function function is executed in the current shell environment. # When it finishes, the possible completions are retrieved from the value of # the COMPREPLY array variable. complete -o default -F _pcs_completion pcs pcs-0.10.11/pcs/cli/000077500000000000000000000000001412706364600140045ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/__init__.py000066400000000000000000000000001412706364600161030ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/booth/000077500000000000000000000000001412706364600151175ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/booth/__init__.py000066400000000000000000000000001412706364600172160ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/booth/command.py000066400000000000000000000206101412706364600171060ustar00rootroot00000000000000from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import group_by_keywords, prepare_options def config_setup(lib, arg_list, modifiers): """ create booth config Options: * --force - overwrite existing * --booth-conf - booth config file * --booth-key - booth authkey file * --name - name of a booth instance """ modifiers.ensure_only_supported( "--force", "--booth-conf", "--booth-key", "--name", ) peers = group_by_keywords( arg_list, set(["sites", "arbitrators"]), keyword_repeat_allowed=False ) if "sites" not in peers or not peers["sites"]: raise CmdLineInputError() lib.booth.config_setup( peers["sites"], peers["arbitrators"], instance_name=modifiers.get("--name"), overwrite_existing=modifiers.get("--force"), ) def config_destroy(lib, arg_list, modifiers): """ destroy booth config Options: --force - ignore config load issues --name - name of a booth instance """ modifiers.ensure_only_supported("--force", "--name") if arg_list: raise CmdLineInputError() lib.booth.config_destroy( instance_name=modifiers.get("--name"), ignore_config_load_problems=modifiers.get("--force"), ) def config_show(lib, arg_list, modifiers): """ print booth config Options: * --name - name of a booth instace * --request-timeout - HTTP timeout for getting config from remote host """ modifiers.ensure_only_supported("--name", "--request-timeout") if len(arg_list) > 1: raise CmdLineInputError() node = None if not arg_list else arg_list[0] print( lib.booth.config_text( instance_name=modifiers.get("--name"), node_name=node ) .decode("utf-8") .rstrip() ) def config_ticket_add(lib, arg_list, modifiers): """ add ticket to current configuration Options: * --force * --booth-conf - booth config file * --booth-key - booth auth key file * --name - name of a booth instace """ modifiers.ensure_only_supported( "--force", "--booth-conf", "--name", "--booth-key" ) if not arg_list: raise CmdLineInputError lib.booth.config_ticket_add( arg_list[0], prepare_options(arg_list[1:]), instance_name=modifiers.get("--name"), allow_unknown_options=modifiers.get("--force"), ) def config_ticket_remove(lib, arg_list, modifiers): """ add ticket to current configuration Options: * --booth-conf - booth config file * --booth-key - booth auth key file * --name - name of a booth instace """ modifiers.ensure_only_supported("--booth-conf", "--name", "--booth-key") if len(arg_list) != 1: raise CmdLineInputError lib.booth.config_ticket_remove( arg_list[0], instance_name=modifiers.get("--name"), ) def _ticket_operation(lib_call, arg_list, booth_name): """ Commandline options: * --name - name of a booth instance """ site_ip = None if len(arg_list) == 2: site_ip = arg_list[1] elif len(arg_list) != 1: raise CmdLineInputError() ticket = arg_list[0] lib_call(ticket, site_ip=site_ip, instance_name=booth_name) def ticket_revoke(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") _ticket_operation( lib.booth.ticket_revoke, arg_list, modifiers.get("--name") ) def ticket_grant(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") _ticket_operation(lib.booth.ticket_grant, arg_list, modifiers.get("--name")) def create_in_cluster(lib, arg_list, modifiers): """ Options: * --force - allows to create booth resource even if its agent is not installed * -f - CIB file * --name - name of a booth instance """ modifiers.ensure_only_supported("--force", "-f", "--name") if len(arg_list) != 2 or arg_list[0] != "ip": raise CmdLineInputError() lib.booth.create_in_cluster( arg_list[1], instance_name=modifiers.get("--name"), allow_absent_resource_agent=modifiers.get("--force"), ) def get_remove_from_cluster(resource_remove): # TODO resource_remove is provisional hack until resources are not moved to # lib def remove_from_cluster(lib, arg_list, modifiers): """ Options: * --force - allow remove of multiple * -f - CIB file * --name - name of a booth instance """ modifiers.ensure_only_supported("--force", "-f", "--name") if arg_list: raise CmdLineInputError() lib.booth.remove_from_cluster( resource_remove, instance_name=modifiers.get("--name"), allow_remove_multiple=modifiers.get("--force"), ) return remove_from_cluster def get_restart(resource_restart): # TODO resource_restart is provisional hack until resources are not moved to # lib def restart(lib, arg_list, modifiers): """ Options: * --force - allow multiple * --name - name of a booth instance """ modifiers.ensure_only_supported("--force", "--name") if arg_list: raise CmdLineInputError() lib.booth.restart( lambda resource_id_list: resource_restart( lib, resource_id_list, modifiers.get_subset("--force") ), instance_name=modifiers.get("--name"), allow_multiple=modifiers.get("--force"), ) return restart def sync(lib, arg_list, modifiers): """ Options: * --skip-offline - skip offline nodes * --name - name of a booth instance * --booth-conf - booth config file * --booth-key - booth authkey file * --request-timeout - HTTP timeout for file ditribution """ modifiers.ensure_only_supported( "--skip-offline", "--name", "--booth-conf", "--booth-key", "--request-timeout", ) if arg_list: raise CmdLineInputError() lib.booth.config_sync( instance_name=modifiers.get("--name"), skip_offline_nodes=modifiers.get("--skip-offline"), ) def enable(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() lib.booth.enable_booth(instance_name=modifiers.get("--name")) def disable(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() lib.booth.disable_booth(instance_name=modifiers.get("--name")) def start(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() lib.booth.start_booth(instance_name=modifiers.get("--name")) def stop(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() lib.booth.stop_booth(instance_name=modifiers.get("--name")) def pull(lib, arg_list, modifiers): """ Options: * --name - name of a booth instance * --request-timeout - HTTP timeout for file ditribution """ modifiers.ensure_only_supported("--name", "--request-timeout") if len(arg_list) != 1: raise CmdLineInputError() lib.booth.pull_config( arg_list[0], instance_name=modifiers.get("--name"), ) def status(lib, arg_list, modifiers): """ Options: * --name - name of booth instance """ modifiers.ensure_only_supported("--name") if arg_list: raise CmdLineInputError() booth_status = lib.booth.get_status(instance_name=modifiers.get("--name")) if booth_status.get("ticket"): print("TICKETS:") print(booth_status["ticket"]) if booth_status.get("peers"): print("PEERS:") print(booth_status["peers"]) if booth_status.get("status"): print("DAEMON STATUS:") print(booth_status["status"]) pcs-0.10.11/pcs/cli/booth/env.py000066400000000000000000000061521412706364600162650ustar00rootroot00000000000000from pcs.common import ( file as pcs_file, file_type_codes, reports, ) from pcs.common.reports.item import ReportItem from pcs.cli.reports import output from pcs.cli.file import metadata from pcs.lib.errors import LibraryError def middleware_config(config_path, key_path): if config_path and not key_path: raise output.error( "When --booth-conf is specified, " "--booth-key must be specified as well" ) if key_path and not config_path: raise output.error( "When --booth-key is specified, " "--booth-conf must be specified as well" ) is_mocked_environment = config_path and key_path if is_mocked_environment: config_file = pcs_file.RawFile( metadata.for_file_type(file_type_codes.BOOTH_CONFIG, config_path) ) key_file = pcs_file.RawFile( metadata.for_file_type(file_type_codes.BOOTH_KEY, key_path) ) def create_booth_env(): try: config_data = config_file.read() if config_file.exists() else None key_data = key_file.read() if key_file.exists() else None # TODO write custom error handling, do not use pcs.lib specific code # and LibraryError except pcs_file.RawFileError as e: raise LibraryError( ReportItem.error( reports.messages.FileIoError( e.metadata.file_type_code, e.action, e.reason, file_path=e.metadata.path, ) ) ) from e return { "config_data": config_data, "key_data": key_data, "key_path": key_path, } def flush(modified_env): if not is_mocked_environment: return if not modified_env: # TODO now this would not happen # for more information see comment in # pcs.cli.common.lib_wrapper.lib_env_to_cli_env raise output.error("Error during library communication") try: key_file.write( modified_env["key_file"]["content"], can_overwrite=True ) config_file.write( modified_env["config_file"]["content"], can_overwrite=True ) # TODO write custom error handling, do not use pcs.lib specific code # and LibraryError except pcs_file.RawFileError as e: raise LibraryError( ReportItem.error( reports.messages.FileIoError( e.metadata.file_type_code, e.action, e.reason, file_path=e.metadata.path, ) ) ) from e def apply(next_in_line, env, *args, **kwargs): env.booth = create_booth_env() if is_mocked_environment else {} result_of_next = next_in_line(env, *args, **kwargs) if is_mocked_environment: flush(env.booth["modified_env"]) return result_of_next return apply pcs-0.10.11/pcs/cli/cluster/000077500000000000000000000000001412706364600154655ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/cluster/__init__.py000066400000000000000000000000001412706364600175640ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/cluster/command.py000066400000000000000000000123411412706364600174560ustar00rootroot00000000000000from pcs.cli.resource.parse_args import ( parse_create_simple as parse_resource_create_args, ) from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import prepare_options def _node_add_remote_separate_name_and_addr(arg_list): """ Commandline options: no options """ node_name = arg_list[0] if len(arg_list) == 1: node_addr = None rest_args = [] elif "=" in arg_list[1] or arg_list[1] in ["op", "meta"]: node_addr = None rest_args = arg_list[1:] else: node_addr = arg_list[1] rest_args = arg_list[2:] return node_name, node_addr, rest_args def node_add_remote(lib, arg_list, modifiers): """ Options: * --wait * --force - allow incomplete distribution of files, allow pcmk remote service to fail * --skip-offline - skip offline nodes * --request-timeout - HTTP request timeout * --no-default-ops - do not use default operations For tests: * --corosync_conf * -f """ modifiers.ensure_only_supported( "--wait", "--force", "--skip-offline", "--request-timeout", "--corosync_conf", "-f", "--no-default-ops", ) if not arg_list: raise CmdLineInputError() node_name, node_addr, rest_args = _node_add_remote_separate_name_and_addr( arg_list ) parts = parse_resource_create_args(rest_args) force = modifiers.get("--force") lib.remote_node.node_add_remote( node_name, node_addr, parts["op"], parts["meta"], parts["options"], skip_offline_nodes=modifiers.get("--skip-offline"), allow_incomplete_distribution=force, allow_pacemaker_remote_service_fail=force, allow_invalid_operation=force, allow_invalid_instance_attributes=force, use_default_operations=not modifiers.get("--no-default-ops"), wait=modifiers.get("--wait"), ) def create_node_remove_remote(remove_resource): def node_remove_remote(lib, arg_list, modifiers): """ Options: * --force - allow multiple nodes removal, allow pcmk remote service to fail, don't stop a resource before its deletion (this is side effect of old resource delete command used here) * --skip-offline - skip offline nodes * --request-timeout - HTTP request timeout For tests: * --corosync_conf * -f """ modifiers.ensure_only_supported( "--force", "--skip-offline", "--request-timeout", "--corosync_conf", "-f", ) if len(arg_list) != 1: raise CmdLineInputError() lib.remote_node.node_remove_remote( arg_list[0], remove_resource, skip_offline_nodes=modifiers.get("--skip-offline"), allow_remove_multiple_nodes=modifiers.get("--force"), allow_pacemaker_remote_service_fail=modifiers.get("--force"), ) return node_remove_remote def node_add_guest(lib, arg_list, modifiers): """ Options: * --wait * --force - allow incomplete distribution of files, allow pcmk remote service to fail * --skip-offline - skip offline nodes * --request-timeout - HTTP request timeout For tests: * --corosync_conf * -f """ modifiers.ensure_only_supported( "--wait", "--force", "--skip-offline", "--request-timeout", "--corosync_conf", "-f", ) if len(arg_list) < 2: raise CmdLineInputError() node_name = arg_list[0] resource_id = arg_list[1] meta_options = prepare_options(arg_list[2:]) lib.remote_node.node_add_guest( node_name, resource_id, meta_options, skip_offline_nodes=modifiers.get("--skip-offline"), allow_incomplete_distribution=modifiers.get("--force"), allow_pacemaker_remote_service_fail=modifiers.get("--force"), wait=modifiers.get("--wait"), ) def node_remove_guest(lib, arg_list, modifiers): """ Options: * --wait * --force - allow multiple nodes removal, allow pcmk remote service to fail * --skip-offline - skip offline nodes * --request-timeout - HTTP request timeout For tests: * --corosync_conf * -f """ modifiers.ensure_only_supported( "--wait", "--force", "--skip-offline", "--request-timeout", "--corosync_conf", "-f", ) if len(arg_list) != 1: raise CmdLineInputError() lib.remote_node.node_remove_guest( arg_list[0], skip_offline_nodes=modifiers.get("--skip-offline"), allow_remove_multiple_nodes=modifiers.get("--force"), allow_pacemaker_remote_service_fail=modifiers.get("--force"), wait=modifiers.get("--wait"), ) def node_clear(lib, arg_list, modifiers): """ Options: * --force - allow to clear a cluster node """ modifiers.ensure_only_supported("--force") if len(arg_list) != 1: raise CmdLineInputError() lib.cluster.node_clear( arg_list[0], allow_clear_cluster_node=modifiers.get("--force") ) pcs-0.10.11/pcs/cli/common/000077500000000000000000000000001412706364600152745ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/common/__init__.py000066400000000000000000000000001412706364600173730ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/common/capabilities.py000066400000000000000000000026531412706364600203050ustar00rootroot00000000000000import os.path from textwrap import dedent from lxml import etree from pcs import settings from pcs.cli.reports.output import error from pcs.common.tools import xml_fromstring def get_capabilities_definition(): """ Read and parse capabilities file The point is to return all data in python structures for further processing. """ filename = os.path.join(settings.pcsd_exec_location, "capabilities.xml") try: with open(filename, mode="r") as file_: capabilities_xml = xml_fromstring(file_.read()) except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e: raise error( "Cannot read capabilities definition file '{0}': '{1}'".format( filename, str(e) ) ) from e capabilities = [] for feat_xml in capabilities_xml.findall(".//capability"): feat = dict(feat_xml.attrib) desc = feat_xml.find("./description") # dedent and strip remove indentation in the XML file feat["description"] = "" if desc is None else dedent(desc.text).strip() capabilities.append(feat) return capabilities def get_pcs_capabilities(): """ Get pcs capabilities form the capabilities file """ return [ { "id": feat["id"], "description": feat["description"], } for feat in get_capabilities_definition() if feat["in-pcs"] == "1" ] pcs-0.10.11/pcs/cli/common/completion.py000066400000000000000000000057661412706364600200350ustar00rootroot00000000000000def has_applicable_environment(environment): """ dict environment - very likely os.environ """ return bool( all( key in environment for key in [ "COMP_WORDS", "COMP_LENGTHS", "COMP_CWORD", "PCS_AUTO_COMPLETE", ] ) and environment["PCS_AUTO_COMPLETE"].strip() not in ("0", "") and environment["COMP_CWORD"].isdigit() ) def make_suggestions(environment, suggestion_tree): """ dict environment - very likely os.environ dict suggestion_tree - {'acl': {'role': {'create': ...}}}... """ if not has_applicable_environment(environment): raise EnvironmentError("Environment is not completion read") try: typed_word_list = _split_words( environment["COMP_WORDS"], environment["COMP_LENGTHS"].split(" "), ) except EnvironmentError: return "" return "\n".join( _find_suggestions( suggestion_tree, typed_word_list, int(environment["COMP_CWORD"]) ) ) def _split_words(joined_words, word_lengths): cursor_position = 0 words_string_len = len(joined_words) word_list = [] for length in word_lengths: if not length.isdigit(): raise EnvironmentError( "Length of word '{0}' is not digit".format(length) ) next_position = cursor_position + int(length) if next_position > words_string_len: raise EnvironmentError( "Expected lengths are bigger than word lengths" ) if ( next_position != words_string_len and not joined_words[next_position].isspace() ): raise EnvironmentError("Words separator is not expected space") word_list.append(joined_words[cursor_position:next_position]) cursor_position = next_position + 1 if words_string_len > next_position: raise EnvironmentError("Expected lengths are smaller then word lengths") return word_list def _find_suggestions(suggestion_tree, typed_word_list, word_under_cursor_idx): if not 1 <= word_under_cursor_idx <= len(typed_word_list): return [] if len(typed_word_list) == word_under_cursor_idx: # not started type the last word yet word_under_cursor = "" else: word_under_cursor = typed_word_list[word_under_cursor_idx] words_for_current_cursor_position = _get_subcommands( suggestion_tree, typed_word_list[1:word_under_cursor_idx] ) return [ word for word in words_for_current_cursor_position if word.startswith(word_under_cursor) ] def _get_subcommands(suggestion_tree, previous_subcommand_list): subcommand_tree = suggestion_tree for subcommand in previous_subcommand_list: if subcommand not in subcommand_tree: return [] subcommand_tree = subcommand_tree[subcommand] return sorted(list(subcommand_tree.keys())) pcs-0.10.11/pcs/cli/common/env_cli.py000066400000000000000000000006511412706364600172670ustar00rootroot00000000000000class Env: # pylint: disable=too-many-instance-attributes, too-few-public-methods def __init__(self): self.cib_data = None self.user = None self.groups = None self.corosync_conf_data = None self.booth = None self.pacemaker = None self.known_hosts_getter = None self.debug = False self.request_timeout = None self.report_processor = None pcs-0.10.11/pcs/cli/common/errors.py000066400000000000000000000031251412706364600171630ustar00rootroot00000000000000from typing import Optional ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE = ( "Cannot specify both --all and a list of nodes." ) SEE_MAN_CHANGES = "See 'man pcs' -> Changes in pcs-0.10." HINT_SYNTAX_CHANGE = ( "Syntax has changed from previous version. " + SEE_MAN_CHANGES ) def msg_command_replaced(*new_commands): new = "', '".join(new_commands) return f"This command has been replaced with '{new}'. {SEE_MAN_CHANGES}" def raise_command_replaced(*new_commands): raise CmdLineInputError(message=msg_command_replaced(*new_commands)) class CmdLineInputError(Exception): """ Exception express that user entered incorrect commad in command line. """ def __init__( self, message: Optional[str] = None, hint: Optional[str] = None, show_both_usage_and_message: bool = False, ) -> None: """ message -- explains what was wrong with the entered command hint -- provides an additional hint how to proceed show_both_usage_and_message -- show both the message and usage The routine which handles this exception behaves according to whether the message was specified (prints this message to user) or not (prints appropriate part of documentation). If show_both_usage_and_message is True, documentation will be printed first and the message will be printed after that. Hint is printed every time as the last item. """ super().__init__(message) self.message = message self.hint = hint self.show_both_usage_and_message = show_both_usage_and_message pcs-0.10.11/pcs/cli/common/lib_wrapper.py000066400000000000000000000435101412706364600201570ustar00rootroot00000000000000import logging from collections import namedtuple from typing import Dict, Any from pcs.cli.common import middleware from pcs.lib.commands import ( acl, alert, booth, cib_options, cluster, dr, fencing_topology, node, pcsd, qdevice, quorum, remote_node, resource, resource_agent, sbd, services, scsi, status, stonith, stonith_agent, tag, ) from pcs.lib.commands.constraint import ( colocation as constraint_colocation, order as constraint_order, ticket as constraint_ticket, ) from pcs.lib.env import LibraryEnvironment # Note: not properly typed _CACHE: Dict[Any, Any] = {} def wrapper(dictionary): return namedtuple("wrapper", dictionary.keys())(**dictionary) def cli_env_to_lib_env(cli_env): return LibraryEnvironment( logging.getLogger("pcs"), cli_env.report_processor, cli_env.user, cli_env.groups, cli_env.cib_data, cli_env.corosync_conf_data, booth_files_data=cli_env.booth, known_hosts_getter=cli_env.known_hosts_getter, request_timeout=cli_env.request_timeout, ) def lib_env_to_cli_env(lib_env, cli_env): if not lib_env.is_cib_live: cli_env.cib_data = lib_env.final_mocked_cib_content if not lib_env.is_corosync_conf_live: cli_env.corosync_conf_data = lib_env.get_corosync_conf_data() # TODO # We expect that when there is booth set up in cli_env then there is booth # set up in lib_env as well. The code works like that now. Once we start # communicate over the network, we must do extra checks in here to make # sure what the status really is. # this applies generally, not only for booth # corosync_conf and cib suffers with this problem as well but in this cases # it is dangerously hidden: when inconsistency between cli and lib # environment occurs, original content is put to file (which is wrong) if cli_env.booth: cli_env.booth["modified_env"] = lib_env.get_booth_env(name="").export() return cli_env def bind(cli_env, run_with_middleware, run_library_command): def run(cli_env, *args, **kwargs): lib_env = cli_env_to_lib_env(cli_env) lib_call_result = run_library_command(lib_env, *args, **kwargs) # midlewares needs finish its work and they see only cli_env # so we need reflect some changes to cli_env lib_env_to_cli_env(lib_env, cli_env) return lib_call_result def decorated_run(*args, **kwargs): return run_with_middleware(run, cli_env, *args, **kwargs) return decorated_run def bind_all(env, run_with_middleware, dictionary): return wrapper( dict( (exposed_fn, bind(env, run_with_middleware, library_fn)) for exposed_fn, library_fn in dictionary.items() ) ) def get_module(env, middleware_factory, name): if name not in _CACHE: _CACHE[name] = load_module(env, middleware_factory, name) return _CACHE[name] def load_module(env, middleware_factory, name): # pylint: disable=too-many-return-statements, too-many-branches if name == "acl": return bind_all( env, middleware.build(middleware_factory.cib), { "create_role": acl.create_role, "remove_role": acl.remove_role, "assign_role_not_specific": acl.assign_role_not_specific, "assign_role_to_target": acl.assign_role_to_target, "assign_role_to_group": acl.assign_role_to_group, "unassign_role_not_specific": acl.unassign_role_not_specific, "unassign_role_from_target": acl.unassign_role_from_target, "unassign_role_from_group": acl.unassign_role_from_group, "create_target": acl.create_target, "create_group": acl.create_group, "remove_target": acl.remove_target, "remove_group": acl.remove_group, "add_permission": acl.add_permission, "remove_permission": acl.remove_permission, "get_config": acl.get_config, }, ) if name == "alert": return bind_all( env, middleware.build(middleware_factory.cib), { "create_alert": alert.create_alert, "update_alert": alert.update_alert, "remove_alert": alert.remove_alert, "add_recipient": alert.add_recipient, "update_recipient": alert.update_recipient, "remove_recipient": alert.remove_recipient, "get_all_alerts": alert.get_all_alerts, }, ) if name == "booth": return bind_all( env, middleware.build( middleware_factory.booth_conf, middleware_factory.cib ), { "config_setup": booth.config_setup, "config_destroy": booth.config_destroy, "config_text": booth.config_text, "config_ticket_add": booth.config_ticket_add, "config_ticket_remove": booth.config_ticket_remove, "create_in_cluster": booth.create_in_cluster, "remove_from_cluster": booth.remove_from_cluster, "restart": booth.restart, "config_sync": booth.config_sync, "enable_booth": booth.enable_booth, "disable_booth": booth.disable_booth, "start_booth": booth.start_booth, "stop_booth": booth.stop_booth, "pull_config": booth.pull_config, "get_status": booth.get_status, "ticket_grant": booth.ticket_grant, "ticket_revoke": booth.ticket_revoke, }, ) if name == "cluster": return bind_all( env, middleware.build(middleware_factory.cib), { "add_link": cluster.add_link, "add_nodes": cluster.add_nodes, "corosync_authkey_change": cluster.corosync_authkey_change, "config_update": cluster.config_update, "config_update_local": cluster.config_update_local, "get_corosync_conf_struct": cluster.get_corosync_conf_struct, "node_clear": cluster.node_clear, "remove_links": cluster.remove_links, "remove_nodes": cluster.remove_nodes, "remove_nodes_from_cib": cluster.remove_nodes_from_cib, "setup": cluster.setup, "setup_local": cluster.setup_local, "update_link": cluster.update_link, "verify": cluster.verify, }, ) if name == "dr": return bind_all( env, middleware.build(middleware_factory.corosync_conf_existing), { "get_config": dr.get_config, "destroy": dr.destroy, "set_recovery_site": dr.set_recovery_site, "status_all_sites_plaintext": dr.status_all_sites_plaintext, }, ) if name == "remote_node": return bind_all( env, middleware.build( middleware_factory.cib, middleware_factory.corosync_conf_existing, ), { "node_add_remote": remote_node.node_add_remote, "node_add_guest": remote_node.node_add_guest, "node_remove_remote": remote_node.node_remove_remote, "node_remove_guest": remote_node.node_remove_guest, }, ) if name == "constraint_colocation": return bind_all( env, middleware.build(middleware_factory.cib), { "create_with_set": constraint_colocation.create_with_set, "config": constraint_colocation.config, }, ) if name == "constraint_order": return bind_all( env, middleware.build(middleware_factory.cib), { "create_with_set": constraint_order.create_with_set, "config": constraint_order.config, }, ) if name == "constraint_ticket": return bind_all( env, middleware.build(middleware_factory.cib), { "create_with_set": constraint_ticket.create_with_set, "config": constraint_ticket.config, "create": constraint_ticket.create, "remove": constraint_ticket.remove, }, ) if name == "fencing_topology": return bind_all( env, middleware.build(middleware_factory.cib), { "add_level": fencing_topology.add_level, "get_config": fencing_topology.get_config, "remove_all_levels": fencing_topology.remove_all_levels, "remove_levels_by_params": ( fencing_topology.remove_levels_by_params ), "verify": fencing_topology.verify, }, ) if name == "node": return bind_all( env, middleware.build(middleware_factory.cib), { "maintenance_unmaintenance_all": ( node.maintenance_unmaintenance_all ), "maintenance_unmaintenance_list": ( node.maintenance_unmaintenance_list ), "maintenance_unmaintenance_local": ( node.maintenance_unmaintenance_local ), "standby_unstandby_all": node.standby_unstandby_all, "standby_unstandby_list": node.standby_unstandby_list, "standby_unstandby_local": node.standby_unstandby_local, }, ) if name == "pcsd": return bind_all( env, middleware.build(), {"synchronize_ssl_certificate": pcsd.synchronize_ssl_certificate}, ) if name == "qdevice": return bind_all( env, middleware.build(), { "qdevice_status_text": qdevice.qdevice_status_text, "qdevice_setup": qdevice.qdevice_setup, "qdevice_destroy": qdevice.qdevice_destroy, "qdevice_start": qdevice.qdevice_start, "qdevice_stop": qdevice.qdevice_stop, "qdevice_kill": qdevice.qdevice_kill, "qdevice_enable": qdevice.qdevice_enable, "qdevice_disable": qdevice.qdevice_disable, # following commands are internal use only, called from pcsd "client_net_setup": qdevice.client_net_setup, "client_net_import_certificate": ( qdevice.client_net_import_certificate ), "client_net_destroy": qdevice.client_net_destroy, "qdevice_net_sign_certificate_request": ( qdevice.qdevice_net_sign_certificate_request ), }, ) if name == "quorum": return bind_all( env, middleware.build(middleware_factory.corosync_conf_existing), { "add_device": quorum.add_device, "get_config": quorum.get_config, "remove_device": quorum.remove_device, "remove_device_heuristics": quorum.remove_device_heuristics, "set_expected_votes_live": quorum.set_expected_votes_live, "set_options": quorum.set_options, "status_text": quorum.status_text, "status_device_text": quorum.status_device_text, "update_device": quorum.update_device, }, ) if name == "resource_agent": return bind_all( env, middleware.build(), { "describe_agent": resource_agent.describe_agent, "list_agents": resource_agent.list_agents, "list_agents_for_standard_and_provider": ( resource_agent.list_agents_for_standard_and_provider ), "list_ocf_providers": resource_agent.list_ocf_providers, "list_standards": resource_agent.list_standards, }, ) if name == "resource": return bind_all( env, middleware.build( middleware_factory.cib, middleware_factory.corosync_conf_existing, ), { "ban": resource.ban, "bundle_create": resource.bundle_create, "bundle_reset": resource.bundle_reset, "bundle_update": resource.bundle_update, "create": resource.create, "create_as_clone": resource.create_as_clone, "create_in_group": resource.create_in_group, "create_into_bundle": resource.create_into_bundle, "disable": resource.disable, "disable_safe": resource.disable_safe, "disable_simulate": resource.disable_simulate, "enable": resource.enable, "get_failcounts": resource.get_failcounts, "group_add": resource.group_add, "manage": resource.manage, "move": resource.move, "move_autoclean": resource.move_autoclean, "get_resource_relations_tree": ( resource.get_resource_relations_tree ), "unmanage": resource.unmanage, "unmove_unban": resource.unmove_unban, }, ) if name == "cib_options": return bind_all( env, middleware.build( middleware_factory.cib, ), { "operation_defaults_config": cib_options.operation_defaults_config, "operation_defaults_create": cib_options.operation_defaults_create, "operation_defaults_remove": cib_options.operation_defaults_remove, "operation_defaults_update": cib_options.operation_defaults_update, "resource_defaults_config": cib_options.resource_defaults_config, "resource_defaults_create": cib_options.resource_defaults_create, "resource_defaults_remove": cib_options.resource_defaults_remove, "resource_defaults_update": cib_options.resource_defaults_update, }, ) if name == "status": return bind_all( env, middleware.build( middleware_factory.cib, middleware_factory.corosync_conf_existing, ), { "full_cluster_status_plaintext": ( status.full_cluster_status_plaintext ), }, ) if name == "stonith": return bind_all( env, middleware.build( middleware_factory.cib, middleware_factory.corosync_conf_existing, ), { "create": stonith.create, "create_in_group": stonith.create_in_group, "history_get_text": stonith.history_get_text, "history_cleanup": stonith.history_cleanup, "history_update": stonith.history_update, "update_scsi_devices": stonith.update_scsi_devices, "update_scsi_devices_add_remove": stonith.update_scsi_devices_add_remove, }, ) if name == "sbd": return bind_all( env, middleware.build(), { "enable_sbd": sbd.enable_sbd, "disable_sbd": sbd.disable_sbd, "get_cluster_sbd_status": sbd.get_cluster_sbd_status, "get_cluster_sbd_config": sbd.get_cluster_sbd_config, "get_local_sbd_config": sbd.get_local_sbd_config, "initialize_block_devices": sbd.initialize_block_devices, "get_local_devices_info": sbd.get_local_devices_info, "set_message": sbd.set_message, "get_local_available_watchdogs": ( sbd.get_local_available_watchdogs ), "test_local_watchdog": sbd.test_local_watchdog, }, ) if name == "services": return bind_all( env, middleware.build(), { "start_service": services.start_service, "stop_service": services.stop_service, "enable_service": services.enable_service, "disable_service": services.disable_service, "get_services_info": services.get_services_info, }, ) if name == "scsi": return bind_all( env, middleware.build(), { "unfence_node": scsi.unfence_node, }, ) if name == "stonith_agent": return bind_all( env, middleware.build(), { "describe_agent": stonith_agent.describe_agent, "list_agents": stonith_agent.list_agents, }, ) if name == "tag": return bind_all( env, middleware.build(middleware_factory.cib), { "config": tag.config, "create": tag.create, "remove": tag.remove, "update": tag.update, }, ) raise Exception("No library part '{0}'".format(name)) class Library: # pylint: disable=too-few-public-methods def __init__(self, env, middleware_factory): self.env = env self.middleware_factory = middleware_factory def __getattr__(self, name): return get_module(self.env, self.middleware_factory, name) pcs-0.10.11/pcs/cli/common/middleware.py000066400000000000000000000071641412706364600177730ustar00rootroot00000000000000from collections import namedtuple import fcntl from functools import partial from pcs.cli.reports.output import error def build(*middleware_list): def run(command, env, *args, **kwargs): next_in_line = command for next_command in reversed(middleware_list): next_in_line = partial(next_command, next_in_line) return next_in_line(env, *args, **kwargs) return run def cib(filename, touch_cib_file): """ return configured middleware that cares about local cib bool use_local_cib is flag if local cib was required callable load_cib_content returns local cib content, take no params callable write_cib put content of cib to required place """ def apply(next_in_line, env, *args, **kwargs): if filename: touch_cib_file(filename) try: with open(filename, mode="r") as cib_file: # the lock is released when the file gets closed on leaving # the with statement fcntl.flock(cib_file.fileno(), fcntl.LOCK_SH) original_content = cib_file.read() except EnvironmentError as e: raise error( "Cannot read cib file '{0}': '{1}'".format(filename, str(e)) ) from e env.cib_data = original_content result_of_next = next_in_line(env, *args, **kwargs) if filename and env.cib_data != original_content: try: with open(filename, mode="w") as cib_file: # the lock is released when the file gets closed on leaving # the with statement fcntl.flock(cib_file.fileno(), fcntl.LOCK_EX) cib_file.write(env.cib_data) except EnvironmentError as e: raise error( "Cannot write cib file '{0}': '{1}'".format( filename, str(e) ) ) from e return result_of_next return apply def corosync_conf_existing(local_file_path): def apply(next_in_line, env, *args, **kwargs): if local_file_path: try: with open(local_file_path, "r") as local_file: # the lock is released when the file gets closed on leaving # the with statement fcntl.flock(local_file.fileno(), fcntl.LOCK_SH) original_content = local_file.read() except EnvironmentError as e: raise error( "Unable to read {0}: {1}".format( local_file_path, e.strerror ) ) from e env.corosync_conf_data = original_content result_of_next = next_in_line(env, *args, **kwargs) if local_file_path and env.corosync_conf_data != original_content: try: with open(local_file_path, "w") as local_file: # the lock is released when the file gets closed on leaving # the with statement fcntl.flock(local_file.fileno(), fcntl.LOCK_EX) local_file.write(env.corosync_conf_data) except EnvironmentError as e: raise error( "Unable to write {0}: {1}".format( local_file_path, e.strerror ) ) from e return result_of_next return apply def create_middleware_factory(**kwargs): """ Commandline options: no options """ return namedtuple("MiddlewareFactory", kwargs.keys())(**kwargs) pcs-0.10.11/pcs/cli/common/parse_args.py000066400000000000000000000514761412706364600200110ustar00rootroot00000000000000from typing import ( Mapping, Iterable, Union, ) from pcs.cli.common.errors import ( CmdLineInputError, HINT_SYNTAX_CHANGE, ) from pcs.common.str_tools import ( format_list, format_plural, ) from pcs.common.tools import timeout_to_seconds ModifierValueType = Union[None, bool, str] ARG_TYPE_DELIMITER = "%" # h = help, f = file, # p = password (cluster auth), u = user (cluster auth), PCS_SHORT_OPTIONS = "hf:p:u:" PCS_LONG_OPTIONS = [ "debug", "version", "help", "fullhelp", "force", "skip-offline", # TODO remove, deprecated command 'pcs config import-cman' "interactive", "autodelete", "simulate", "all", "full", "local", "wait", "config", "start", "enable", "disabled", "off", "request-timeout=", "brief", # resource (safe-)disable "safe", "no-strict", # resource cleanup | refresh "strict", "pacemaker", "corosync", "no-default-ops", "defaults", "nodesc", "master", "name=", "group=", "node=", "from=", "to=", "after=", "before=", "corosync_conf=", "booth-conf=", "booth-key=", "no-watchdog-validation", "no-keys-sync", # in pcs status - do not display resource status on inactive node "hide-inactive", # pcs resource (un)manage - enable or disable monitor operations "monitor", # TODO remove # used only in deprecated 'pcs resource|stonith show' "groups", # "pcs resource clear --expired" - only clear expired moves and bans "expired", # disable evaluating whether rules are expired "no-expire-check", # allow overwriting existing files, currently meant for / used in CLI only "overwrite", # output format of commands, e.g: json, cmd, text, ... "output-format=", # auth token "token=", ] def split_list(arg_list, separator): """return list of list of arg_list using separator as delimiter""" separator_indexes = [i for i, x in enumerate(arg_list) if x == separator] bounds = zip( [0] + [i + 1 for i in separator_indexes], separator_indexes + [None] ) return [arg_list[i:j] for i, j in bounds] def split_list_by_any_keywords(arg_list, keyword_label): """ Return a list of lists of args using any arg not containing = as a delimiter iterable arg_list -- (part of) argv string keyword_label -- description of all keywords """ if "=" in arg_list[0]: raise CmdLineInputError( "Invalid character '=' in {} '{}'".format( keyword_label, arg_list[0], ) ) current_keyword = None groups = {} for arg in arg_list: if "=" in arg: groups[current_keyword].append(arg) else: current_keyword = arg if current_keyword in groups: raise CmdLineInputError( "{} '{}' defined multiple times".format( keyword_label.capitalize(), current_keyword ) ) groups[current_keyword] = [] return groups def split_option(arg, allow_empty_value=True): """ Get (key, value) from a key=value commandline argument. Split the argument by the first = and return resulting parts. Raise CmdLineInputError if the argument cannot be splitted. string arg -- commandline argument allow_empty_value -- if True, empty value is allowed. Otherwise, CmdLineInputError exception is raised Commandline options: no options """ if "=" not in arg: raise CmdLineInputError("missing value of '{0}' option".format(arg)) if arg.startswith("="): raise CmdLineInputError("missing key in '{0}' option".format(arg)) key, value = arg.split("=", 1) if not (value or allow_empty_value): raise CmdLineInputError("value of '{0}' option is empty".format(key)) return key, value def prepare_options(cmdline_args, allowed_repeatable_options=()): """ Get a dict of options from cmdline key=value args iterable cmdline_args -- command line arguments iterable allowed_repeatable_options -- options that can be set several times Commandline options: no options """ options = dict() for arg in cmdline_args: name, value = split_option(arg) if name not in options: if name in allowed_repeatable_options: options[name] = [value] else: options[name] = value elif name in allowed_repeatable_options: options[name].append(value) elif options[name] != value: raise CmdLineInputError( ( "duplicate option '{0}' with different values '{1}' and " "'{2}'" ).format(name, options[name], value) ) return options def prepare_options_allowed( cmdline_args, allowed_options, allowed_repeatable_options=() ): """ Get a dict of options from cmdline key=value args, raise on unallowed key iterable cmdline_args -- command line arguments iterable allowed_options -- list of allowed options iterable allowed_repeatable_options -- options that can be set several times Commandline options: no options """ parsed_options = prepare_options( cmdline_args, allowed_repeatable_options=allowed_repeatable_options ) unknown_options = frozenset(parsed_options.keys()) - frozenset( allowed_options ) if unknown_options: raise CmdLineInputError( "Unknown option{s} '{options}'".format( s=("s" if len(unknown_options) > 1 else ""), options="', '".join(sorted(unknown_options)), ) ) return parsed_options def group_by_keywords( arg_list, keyword_set, implicit_first_group_key=None, keyword_repeat_allowed=True, group_repeated_keywords=None, only_found_keywords=False, ): """ Return dictionary with keywords as keys and following arguments as value. For example when keywords are "first" and "seconds" then for arg_list ["first", 1, 2, "second", 3] it returns {"first": [1, 2], "second": [3]} list arg_list is commandline arguments containing keywords set keyword_set contain all expected keywords string implicit_first_group_key is the key for capturing of arguments before the occurrence of the first keyword. implicit_first_group_key is not a keyword => its occurence in args is considered as ordinary argument. bool keyword_repeat_allowed is the flag to turn on/off checking the uniqueness of each keyword in arg_list. list group_repeated_keywords contains keywords for which each occurence is packed separately. For example when keywords are "first" and "seconds" and group_repeated_keywords is ["first"] then for arg_list ["first", 1, 2, "second", 3, "first", 4] it returns {"first": [[1, 2], [4]], "second": [3]}. For these keywords is allowed repeating. bool only_found_keywords is flag for deciding to (not)contain keywords that do not appeared in arg_list. """ def get_keywords_for_grouping(): if not group_repeated_keywords: return [] # implicit_first_group_key is not keyword: when it is in # group_repeated_keywords but not in keyword_set is considered as # unknown. unknown_keywords = set(group_repeated_keywords) - set(keyword_set) if unknown_keywords: # to avoid developer mistake raise AssertionError( "Keywords in grouping not in keyword set: {0}".format( ", ".join(unknown_keywords) ) ) return group_repeated_keywords def get_completed_groups(): completed_groups = groups.copy() if not only_found_keywords: for keyword in keyword_set: if keyword not in completed_groups: completed_groups[keyword] = [] if ( implicit_first_group_key and implicit_first_group_key not in completed_groups ): completed_groups[implicit_first_group_key] = [] return completed_groups def is_acceptable_keyword_occurence(keyword): return ( keyword not in groups.keys() or keyword_repeat_allowed or keyword in keywords_for_grouping ) def process_keyword(keyword): if not is_acceptable_keyword_occurence(keyword): raise CmdLineInputError( "'{0}' cannot be used more than once".format(keyword) ) groups.setdefault(keyword, []) if keyword in keywords_for_grouping: groups[keyword].append([]) def process_non_keyword(keyword, arg): place = groups[keyword] if keyword in keywords_for_grouping: place = place[-1] place.append(arg) groups = {} keywords_for_grouping = get_keywords_for_grouping() if arg_list: current_keyword = None if arg_list[0] not in keyword_set: if not implicit_first_group_key: raise CmdLineInputError() process_keyword(implicit_first_group_key) current_keyword = implicit_first_group_key for arg in arg_list: if arg in keyword_set: process_keyword(arg) current_keyword = arg else: process_non_keyword(current_keyword, arg) return get_completed_groups() def parse_typed_arg(arg, allowed_types, default_type): """ Get (type, value) from a typed commandline argument. Split the argument by the type separator and return the type and the value. Raise CmdLineInputError in the argument format or type is not valid. string arg -- commandline argument Iterable allowed_types -- list of allowed argument types string default_type -- type to return if the argument doesn't specify a type """ if ARG_TYPE_DELIMITER not in arg: return default_type, arg arg_type, arg_value = arg.split(ARG_TYPE_DELIMITER, 1) if not arg_type: return default_type, arg_value if arg_type not in allowed_types: raise CmdLineInputError( ( "'{arg_type}' is not an allowed type for '{arg_full}', use " "{hint}" ).format( arg_type=arg_type, arg_full=arg, hint=", ".join(sorted(allowed_types)), ) ) return arg_type, arg_value def _is_num(arg): return arg.isdigit() or arg.lower() == "infinity" def _is_float(arg: str) -> bool: try: float(arg) return True except ValueError: return False def _is_negative_num(arg: str) -> bool: return arg.startswith("-") and (_is_num(arg[1:]) or _is_float(arg)) def is_short_option_expecting_value(arg): return ( len(arg) == 2 and arg[0] == "-" and "{0}:".format(arg[1]) in PCS_SHORT_OPTIONS ) def is_long_option_expecting_value(arg): return ( len(arg) > 2 and arg[0:2] == "--" and "{0}=".format(arg[2:]) in PCS_LONG_OPTIONS ) def is_option_expecting_value(arg): return is_short_option_expecting_value( arg ) or is_long_option_expecting_value(arg) # DEPRECATED # TODO remove # This function is called only by deprecated code for parsing argv containing # negative numbers without -- prepending them. def filter_out_non_option_negative_numbers(arg_list): """ Return arg_list without non-option negative numbers. Negative numbers following the option expecting value are kept. There is the problematic legacy: Argument "--" has special meaning: it can be used to signal that no more options will follow. This would solve the problem with negative numbers in a standard way: there would be no special approach to negative numbers, everything would be left in the hands of users. We cannot use "--" as it would be a backward incompatible change: * "pcs ... -infinity" would not work any more, users would have to switch to "pcs ... -- ... -infinity" * previously, position of some --options mattered, for example "--clone ", this syntax would not be possible with the "--" in place Currently used --options, which may be problematic when switching to "--": * --group , --before | --after * pcs resource | stonith create, pcs resource group add, pcs tag update * They have a single argument, so they would work even with --. But the command may look weird: pcs resource create --group G --after R2 -- R3 ocf:pacemaker:Dummy vs. current command pcs resource create R3 ocf:pacemaker:Dummy --group G --after R2 list arg_list contains command line arguments """ args_without_negative_nums = [] args_filtered_out = [] for i, arg in enumerate(arg_list): prev_arg = arg_list[i - 1] if i > 0 else "" if not _is_negative_num(arg) or is_option_expecting_value(prev_arg): args_without_negative_nums.append(arg) else: args_filtered_out.append(arg) return args_without_negative_nums, args_filtered_out # DEPRECATED # TODO remove # This function is called only by deprecated code for parsing argv containing # negative numbers without -- prepending them. def filter_out_options(arg_list): """ Return arg_list without options and its negative numbers. See a comment in filter_out_non_option_negative_numbers. list arg_list contains command line arguments """ args_without_options = [] for i, arg in enumerate(arg_list): prev_arg = arg_list[i - 1] if i > 0 else "" if not is_option_expecting_value(prev_arg) and ( not arg.startswith("-") or arg == "-" or _is_negative_num(arg) ): args_without_options.append(arg) return args_without_options def wait_to_timeout(wait: Union[bool, str, None]) -> int: if wait is False: return -1 if wait is None: return 0 timeout = timeout_to_seconds(wait) if timeout is None: raise CmdLineInputError(f"'{wait}' is not a valid interval value") return timeout class InputModifiers: def __init__(self, options: Mapping[str, ModifierValueType]): self._defined_options = set(options.keys()) self._options = dict(options) self._options.update( { # boolean values "--all": "--all" in options, "--autodelete": "--autodelete" in options, "--brief": "--brief" in options, "--config": "--config" in options, "--corosync": "--corosync" in options, "--debug": "--debug" in options, "--defaults": "--defaults" in options, "--disabled": "--disabled" in options, "--enable": "--enable" in options, "--expired": "--expired" in options, "--force": "--force" in options, "--full": "--full" in options, # TODO remove # used only in deprecated 'pcs resource|stonith show' "--groups": "--groups" in options, "--hide-inactive": "--hide-inactive" in options, # TODO remove, deprecated command 'pcs config import-cman' "--interactive": "--interactive" in options, "--local": "--local" in options, "--master": "--master" in options, "--monitor": "--monitor" in options, "--no-default-ops": "--no-default-ops" in options, "--nodesc": "--nodesc" in options, "--no-expire-check": "--no-expire-check" in options, "--no-keys-sync": "--no-keys-sync" in options, "--no-strict": "--no-strict" in options, "--no-watchdog-validation": "--no-watchdog-validation" in options, "--off": "--off" in options, "--overwrite": "--overwrite" in options, "--pacemaker": "--pacemaker" in options, "--safe": "--safe" in options, "--simulate": "--simulate" in options, "--skip-offline": "--skip-offline" in options, "--start": "--start" in options, "--strict": "--strict" in options, # string values "--after": options.get("--after", None), "--before": options.get("--before", None), "--booth-conf": options.get("--booth-conf", None), "--booth-key": options.get("--booth-key", None), "--corosync_conf": options.get("--corosync_conf", None), "--from": options.get("--from", None), "--group": options.get("--group", None), "--name": options.get("--name", None), "--node": options.get("--node", None), "--output-format": options.get("--output-format", "text"), "--request-timeout": options.get("--request-timeout", None), "--to": options.get("--to", None), "--token": options.get("--token", None), "--wait": options.get("--wait", False), "-f": options.get("-f", None), "-p": options.get("-p", None), "-u": options.get("-u", None), } ) def get_subset(self, *options, **custom_options): opt_dict = { opt: self.get(opt) for opt in options if self.is_specified(opt) } opt_dict.update(custom_options) return InputModifiers(opt_dict) def ensure_only_supported( self, *supported_options, hint_syntax_changed: bool = False ): unsupported_options = ( # --debug is supported in all commands self._defined_options - set(supported_options) - set(["--debug"]) ) if unsupported_options: pluralize = lambda word: format_plural(unsupported_options, word) raise CmdLineInputError( "Specified {option} {option_list} {_is} not supported in this " "command".format( option=pluralize("option"), option_list=format_list(sorted(unsupported_options)), _is=pluralize("is"), ), # Print error messages which point users to the changes section # in pcs manpage. # To be removed in the next significant version. hint=(HINT_SYNTAX_CHANGE if hint_syntax_changed else None), ) def ensure_not_mutually_exclusive(self, *mutually_exclusive): options_to_report = self._defined_options & set(mutually_exclusive) if len(options_to_report) > 1: raise CmdLineInputError( "Only one of {} can be used".format( format_list(sorted(options_to_report)) ) ) def ensure_not_incompatible(self, checked, incompatible): if not checked in self._defined_options: return disallowed = self._defined_options & set(incompatible) if disallowed: raise CmdLineInputError( "'{}' cannot be used with {}".format( checked, format_list(sorted(disallowed)) ) ) def ensure_dependency_satisfied( self, main_option: str, dependent_options: Iterable[str], ) -> None: """ Raise a `CmdLineInputError` exception if any of `dependent_options` is present without `main_option` being present. main_option -- option on which `dependent_options` depend dependent_options -- none of these options can be specified if `main_option` was not """ if main_option in self._defined_options: return disallowed = self._defined_options & set(dependent_options) if disallowed: raise CmdLineInputError( "{} cannot be used without '{}'".format( format_list(sorted(disallowed)), main_option ) ) def is_specified(self, option: str) -> bool: return option in self._defined_options def is_specified_any(self, option_list: Iterable[str]) -> bool: for option in option_list: if self.is_specified(option): return True return False def get( self, option: str, default: ModifierValueType = None ) -> ModifierValueType: if option in self._defined_options: return self._options[option] if default is not None: return default if option in self._options: return self._options[option] raise AssertionError(f"Non existing default value for '{option}'") pcs-0.10.11/pcs/cli/common/printable_tree.py000066400000000000000000000026751412706364600206570ustar00rootroot00000000000000from typing import Sequence class PrintableTreeNode: @property def members(self) -> Sequence["PrintableTreeNode"]: raise NotImplementedError() @property def detail(self) -> Sequence[str]: raise NotImplementedError() @property def is_leaf(self) -> bool: raise NotImplementedError() def get_title(self, verbose: bool) -> str: raise NotImplementedError() def tree_to_lines( node: PrintableTreeNode, verbose: bool = False, title_prefix: str = "", indent: str = "", ) -> Sequence[str]: """ Return sequence of strings representing lines to print out tree structure on command line. """ result = [] note = "" if node.is_leaf: note = " [displayed elsewhere]" title = node.get_title(verbose) result.append(f"{title_prefix}{title}{note}") if node.is_leaf: return result _indent = "| " if not node.members: _indent = " " for line in node.detail: result.append(f"{indent}{_indent}{line}") _indent = "| " _title_prefix = "|- " for member in node.members: if member == node.members[-1]: _indent = " " _title_prefix = "`- " result.extend( tree_to_lines( member, verbose, indent=f"{indent}{_indent}", title_prefix=f"{indent}{_title_prefix}", ) ) return result pcs-0.10.11/pcs/cli/common/routing.py000066400000000000000000000021771412706364600173440ustar00rootroot00000000000000from typing import ( Any, Callable, List, Mapping, Optional, ) from pcs import utils from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import InputModifiers CliCmdInterface = Callable[[Any, List[str], InputModifiers], None] def create_router( cmd_map: Mapping[str, CliCmdInterface], usage_sub_cmd: List[str], default_cmd: Optional[str] = None, ) -> CliCmdInterface: def _router(lib: Any, argv: List[str], modifiers: InputModifiers) -> None: if argv: sub_cmd, *argv_next = argv else: if default_cmd is None: raise CmdLineInputError() sub_cmd, argv_next = default_cmd, [] try: if sub_cmd not in cmd_map: sub_cmd = "" raise CmdLineInputError() return cmd_map[sub_cmd](lib, argv_next, modifiers) except CmdLineInputError as e: if not usage_sub_cmd: raise return utils.exit_on_cmdline_input_errror( e, usage_sub_cmd[0], (usage_sub_cmd[1:] + [sub_cmd]) ) return _router pcs-0.10.11/pcs/cli/common/tools.py000066400000000000000000000007101412706364600170040ustar00rootroot00000000000000from typing import Union from pcs.common.tools import timeout_to_seconds def timeout_to_seconds_legacy( timeout: Union[int, str] ) -> Union[int, str, None]: """ Transform pacemaker style timeout to number of seconds. If timeout is not valid then `timeout` is returned. timeout -- timeout string """ parsed_timeout = timeout_to_seconds(timeout) if parsed_timeout is None: return timeout return parsed_timeout pcs-0.10.11/pcs/cli/constraint/000077500000000000000000000000001412706364600161705ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/constraint/__init__.py000066400000000000000000000000001412706364600202670ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/constraint/command.py000066400000000000000000000052441412706364600201650ustar00rootroot00000000000000from pcs.cli.constraint import parse_args from pcs.common.reports.constraints import constraint_with_sets from pcs.common.str_tools import indent def create_with_set(create_with_set_library_call, argv, modifiers): """ callable create_with_set_library_call create constraint with set list argv part of comandline args see usage for "constraint (colocation|resource|ticket) set" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Commandline options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ resource_set_list, constraint_options = parse_args.prepare_set_args(argv) create_with_set_library_call( resource_set_list, constraint_options, resource_in_clone_alowed=modifiers.get("--force"), duplication_alowed=modifiers.get("--force"), ) def _config_constraints_with_set(constraint_list, show_detail, indent_step=2): """ return list of console lines with info about constraints list of dict constraint_list see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options int indent_step is count of spaces for indenting Commandline options: no options """ return ["Resource Sets:"] + indent( [ constraint_with_sets(constraint, with_id=show_detail) for constraint in constraint_list ], indent_step=indent_step, ) def config_cmd(caption, load_constraints, format_options, modifiers): """ load constraints and return console lines list with info about constraints string caption for example "Ticket Constraints:" callable load_constraints which returns desired constraints as dictionary like {"plain": [], "with_resource_sets": []} callable format_options takes dict of options and show_detail flag (bool) and returns string with constraint formated for commandline modifiers dict like object with command modifiers Commandline options: * -f - CIB file * --full - print more details """ show_detail = modifiers.get("--full") constraints = load_constraints() line_list = [caption] line_list.extend( [ " " + format_options(constraint_options_dict, show_detail) for constraint_options_dict in constraints["plain"] ] ) if constraints["with_resource_sets"]: line_list.extend( indent( _config_constraints_with_set( constraints["with_resource_sets"], show_detail ) ) ) return line_list pcs-0.10.11/pcs/cli/constraint/parse_args.py000066400000000000000000000023061412706364600206710ustar00rootroot00000000000000from pcs.cli.common import parse_args from pcs.cli.common.errors import CmdLineInputError def prepare_resource_sets(cmdline_args): return [ { "ids": [id for id in args if "=" not in id], "options": parse_args.prepare_options( [opt for opt in args if "=" in opt] ), } for args in parse_args.split_list(cmdline_args, "set") ] def prepare_set_args(argv): if argv.count("setoptions") > 1: raise CmdLineInputError( "Keyword 'setoptions' may be mentioned at most once" ) resource_set_args, constraint_options_args = ( parse_args.split_list(argv, "setoptions") if "setoptions" in argv else (argv, []) ) if not resource_set_args: raise CmdLineInputError() resource_set_list = prepare_resource_sets(resource_set_args) if not resource_set_list or not all( resource_set["ids"] for resource_set in resource_set_list ): raise CmdLineInputError() constraint_options = {} if constraint_options_args: constraint_options = parse_args.prepare_options(constraint_options_args) return (resource_set_list, constraint_options) pcs-0.10.11/pcs/cli/constraint_colocation/000077500000000000000000000000001412706364600204025ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/constraint_colocation/__init__.py000066400000000000000000000000001412706364600225010ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/constraint_colocation/command.py000066400000000000000000000032271412706364600223760ustar00rootroot00000000000000from pcs.cli.common.errors import CmdLineInputError from pcs.cli.constraint import command from pcs.cli.reports.output import warn from pcs.common.reports import constraints def create_with_set(lib, argv, modifiers): """ create colocation constraint with resource set object lib exposes library list argv see usage for "constraint colocation set" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ modifiers.ensure_only_supported("-f", "--force") command.create_with_set( lib.constraint_colocation.create_with_set, argv, modifiers, ) def show(lib, argv, modifiers): warn( "This command is deprecated and will be removed. " "Please use 'pcs constraint colocation config' instead.", stderr=True, ) return config_cmd(lib, argv, modifiers) def config_cmd(lib, argv, modifiers): """ show all colocation constraints object lib exposes library list argv see usage for "constraint colocation show" dict like object modifiers can contain "full" Options: * --full - print more details * -f - CIB file """ modifiers.ensure_only_supported("-f", "--full") if argv: raise CmdLineInputError() print( "\n".join( command.config_cmd( "Colocation Constraints:", lib.constraint_colocation.config, constraints.colocation_plain, modifiers, ) ) ) pcs-0.10.11/pcs/cli/constraint_order/000077500000000000000000000000001412706364600173635ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/constraint_order/__init__.py000066400000000000000000000000001412706364600214620ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/constraint_order/command.py000066400000000000000000000031461412706364600213570ustar00rootroot00000000000000from pcs.cli.common.errors import CmdLineInputError from pcs.cli.constraint import command from pcs.cli.reports.output import warn from pcs.common.reports import constraints def create_with_set(lib, argv, modifiers): """ create order constraint with resource set object lib exposes library list argv see usage for "constraint colocation set" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ modifiers.ensure_only_supported("--force", "-f") command.create_with_set( lib.constraint_order.create_with_set, argv, modifiers ) def show(lib, argv, modifiers): warn( "This command is deprecated and will be removed. " "Please use 'pcs constraint order config' instead.", stderr=True, ) return config_cmd(lib, argv, modifiers) def config_cmd(lib, argv, modifiers): """ show all order constraints object lib exposes library list argv see usage for "constraint colocation show" dict like object modifiers can contain "full" Options: * --full - print more details * -f - CIB file """ modifiers.ensure_only_supported("-f", "--full") if argv: raise CmdLineInputError() print( "\n".join( command.config_cmd( "Ordering Constraints:", lib.constraint_order.config, constraints.order_plain, modifiers, ) ) ) pcs-0.10.11/pcs/cli/constraint_ticket/000077500000000000000000000000001412706364600175335ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/constraint_ticket/__init__.py000066400000000000000000000000001412706364600216320ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/constraint_ticket/command.py000066400000000000000000000057521412706364600215340ustar00rootroot00000000000000from pcs.cli.common.errors import CmdLineInputError from pcs.cli.constraint import command from pcs.cli.constraint_ticket import parse_args from pcs.cli.reports.output import error, warn from pcs.common.reports import constraints def create_with_set(lib, argv, modifiers): """ create ticket constraint with resource set object lib exposes library list argv see usage for "constraint colocation set" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ modifiers.ensure_only_supported("--force", "-f") command.create_with_set( lib.constraint_ticket.create_with_set, argv, modifiers, ) def add(lib, argv, modifiers): """ create ticket constraint object lib exposes library list argv see usage for "constraint colocation add" dict like object modifiers can contain "force" allows resource in clone/master and constraint duplicity Options: * --force - allow resource inside clone (or master), allow duplicate element * -f - CIB file """ modifiers.ensure_only_supported("--force", "-f") ticket, resource_id, resource_role, options = parse_args.parse_add(argv) if "rsc-role" in options: raise CmdLineInputError( "Resource role must not be specified among options" + ", specify it before resource id" ) if resource_role: options["rsc-role"] = resource_role lib.constraint_ticket.create( ticket, resource_id, options, resource_in_clone_alowed=modifiers.get("--force"), duplication_alowed=modifiers.get("--force"), ) def remove(lib, argv, modifiers): """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 2: raise CmdLineInputError() ticket, resource_id = argv if not lib.constraint_ticket.remove(ticket, resource_id): raise error("no matching ticket constraint found") def show(lib, argv, modifiers): warn( "This command is deprecated and will be removed. " "Please use 'pcs constraint ticket config' instead.", stderr=True, ) return config_cmd(lib, argv, modifiers) def config_cmd(lib, argv, modifiers): """ show all ticket constraints object lib exposes library list argv see usage for "constraint colocation show" dict like object modifiers can contain "full" Options: * --full - print more details * -f - CIB file """ modifiers.ensure_only_supported("-f", "--full") if argv: raise CmdLineInputError() print( "\n".join( command.config_cmd( "Ticket Constraints:", lib.constraint_ticket.config, constraints.ticket_plain, modifiers, ) ) ) pcs-0.10.11/pcs/cli/constraint_ticket/parse_args.py000066400000000000000000000020201412706364600222250ustar00rootroot00000000000000from pcs.cli.common import parse_args from pcs.cli.common.errors import CmdLineInputError def separate_tail_option_candidates(arg_list): for i, arg in enumerate(arg_list): if "=" in arg: return arg_list[:i], arg_list[i:] return arg_list, [] def parse_add(arg_list): info, option_candidates = separate_tail_option_candidates(arg_list) if not info: raise CmdLineInputError("Ticket not specified") ticket, resource_specification = info[0], info[1:] if len(resource_specification) not in (1, 2): raise CmdLineInputError( "invalid resource specification: '{0}'".format( " ".join(resource_specification) ) ) if len(resource_specification) == 2: resource_role, resource_id = resource_specification else: resource_role = "" resource_id = resource_specification[0] return ( ticket, resource_id, resource_role, parse_args.prepare_options(option_candidates), ) pcs-0.10.11/pcs/cli/dr.py000066400000000000000000000101321412706364600147600ustar00rootroot00000000000000from typing import ( Any, List, Sequence, ) from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import InputModifiers from pcs.cli.reports.output import error from pcs.common.reports import codes as report_codes from pcs.common.dr import ( DrConfigDto, DrConfigSiteDto, DrSiteStatusDto, ) from pcs.common.interface import dto from pcs.common.str_tools import indent def config( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: None """ modifiers.ensure_only_supported() if argv: raise CmdLineInputError() config_raw = lib.dr.get_config() try: config_dto = dto.from_dict(DrConfigDto, config_raw) except (KeyError, TypeError, ValueError) as e: raise error( "Unable to communicate with pcsd, received response:\n" f"{config_raw}" ) from e lines = ["Local site:"] lines.extend(indent(_config_site_lines(config_dto.local_site))) for site_dto in config_dto.remote_site_list: lines.append("Remote site:") lines.extend(indent(_config_site_lines(site_dto))) print("\n".join(lines)) def _config_site_lines(site_dto: DrConfigSiteDto) -> List[str]: lines = [f"Role: {site_dto.site_role.capitalize()}"] if site_dto.node_list: lines.append("Nodes:") lines.extend(indent(sorted([node.name for node in site_dto.node_list]))) return lines def set_recovery_site( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * --request-timeout - HTTP timeout for node authorization check """ modifiers.ensure_only_supported("--request-timeout") if len(argv) != 1: raise CmdLineInputError() lib.dr.set_recovery_site(argv[0]) def status( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * --full - show full details, node attributes and failcount * --hide-inactive - hide inactive resources * --request-timeout - HTTP timeout for node authorization check """ modifiers.ensure_only_supported( "--full", "--hide-inactive", "--request-timeout", ) if argv: raise CmdLineInputError() status_list_raw = lib.dr.status_all_sites_plaintext( hide_inactive_resources=modifiers.get("--hide-inactive"), verbose=modifiers.get("--full"), ) try: status_list = [ dto.from_dict(DrSiteStatusDto, status_raw) for status_raw in status_list_raw ] except (KeyError, TypeError, ValueError) as e: raise error( "Unable to communicate with pcsd, received response:\n" f"{status_list_raw}" ) from e has_errors = False plaintext_parts = [] for site_status in status_list: plaintext_parts.append( "--- {local_remote} cluster - {role} site ---".format( local_remote=("Local" if site_status.local_site else "Remote"), role=site_status.site_role.capitalize(), ) ) if site_status.status_successfully_obtained: plaintext_parts.append(site_status.status_plaintext.strip()) plaintext_parts.extend(["", ""]) else: has_errors = True plaintext_parts.extend( ["Error: Unable to get status of the cluster from any node", ""] ) print("\n".join(plaintext_parts).strip()) if has_errors: raise error("Unable to get status of all sites") def destroy( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * --skip-offline - skip unreachable nodes (including missing auth token) * --request-timeout - HTTP timeout for node authorization check """ modifiers.ensure_only_supported("--skip-offline", "--request-timeout") if argv: raise CmdLineInputError() force_flags = [] if modifiers.get("--skip-offline"): force_flags.append(report_codes.SKIP_OFFLINE_NODES) lib.dr.destroy(force_flags=force_flags) pcs-0.10.11/pcs/cli/fencing_topology.py000066400000000000000000000006001412706364600177170ustar00rootroot00000000000000from pcs.common.fencing_topology import ( TARGET_TYPE_NODE, TARGET_TYPE_REGEXP, TARGET_TYPE_ATTRIBUTE, ) __target_type_map = { "attrib": TARGET_TYPE_ATTRIBUTE, "node": TARGET_TYPE_NODE, "regexp": TARGET_TYPE_REGEXP, } target_type_map_cli_to_lib = __target_type_map target_type_map_lib_to_cli = { value: key for key, value in __target_type_map.items() } pcs-0.10.11/pcs/cli/file/000077500000000000000000000000001412706364600147235ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/file/__init__.py000066400000000000000000000000001412706364600170220ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/file/metadata.py000066400000000000000000000022621412706364600170570ustar00rootroot00000000000000import os.path from pcs.common import file_type_codes as code from pcs.common.file import FileMetadata _metadata = { code.BOOTH_CONFIG: lambda path: FileMetadata( file_type_code=code.BOOTH_CONFIG, path=path, owner_user_name=None, owner_group_name=None, permissions=None, is_binary=False, ), code.BOOTH_KEY: lambda path: FileMetadata( file_type_code=code.BOOTH_KEY, path=path, owner_user_name=None, owner_group_name=None, permissions=0o600, is_binary=True, ), code.COROSYNC_CONF: lambda path: FileMetadata( file_type_code=code.COROSYNC_CONF, path=path, owner_user_name=None, owner_group_name=None, permissions=0o644, is_binary=False, ), code.PCS_KNOWN_HOSTS: lambda: FileMetadata( file_type_code=code.PCS_KNOWN_HOSTS, path=os.path.join(os.path.expanduser("~/.pcs"), "known-hosts"), owner_user_name=None, owner_group_name=None, permissions=0o600, is_binary=False, ), } def for_file_type(file_type_code, *args, **kwargs): return _metadata[file_type_code](*args, **kwargs) pcs-0.10.11/pcs/cli/nvset.py000066400000000000000000000037651412706364600155300ustar00rootroot00000000000000from typing import ( cast, Iterable, List, Optional, ) from pcs.cli.rule import ( get_in_effect_label, rule_expression_dto_to_lines, ) from pcs.common.pacemaker.nvset import CibNvsetDto from pcs.common.str_tools import ( format_name_value_list, format_optional, indent, ) from pcs.common.types import ( CibNvsetType, CibRuleInEffectStatus, ) def nvset_dto_list_to_lines( nvset_dto_list: Iterable[CibNvsetDto], with_ids: bool = False, include_expired: bool = False, text_if_empty: Optional[str] = None, ) -> List[str]: if not nvset_dto_list: return [text_if_empty] if text_if_empty else [] if not include_expired: nvset_dto_list = [ nvset_dto for nvset_dto in nvset_dto_list if not nvset_dto.rule or nvset_dto.rule.in_effect != CibRuleInEffectStatus.EXPIRED ] return [ line for nvset_dto in nvset_dto_list for line in nvset_dto_to_lines(nvset_dto, with_ids=with_ids) ] def nvset_dto_to_lines(nvset: CibNvsetDto, with_ids: bool = False) -> List[str]: nvset_label = _nvset_type_to_label.get(nvset.type, "Options Set") in_effect_label = get_in_effect_label(nvset.rule) if nvset.rule else None heading_parts = [ "{label}{in_effect}: {id}".format( label=nvset_label, in_effect=format_optional(in_effect_label, " ({})"), id=nvset.id, ) ] if nvset.options: heading_parts.append( " ".join(format_name_value_list(sorted(nvset.options.items()))) ) lines = format_name_value_list( sorted([(nvpair.name, nvpair.value) for nvpair in nvset.nvpairs]) ) if nvset.rule: lines.extend( rule_expression_dto_to_lines(nvset.rule, with_ids=with_ids) ) return [" ".join(heading_parts)] + indent(lines) _nvset_type_to_label = { cast(str, CibNvsetType.INSTANCE): "Attributes", cast(str, CibNvsetType.META): "Meta Attrs", } pcs-0.10.11/pcs/cli/reports/000077500000000000000000000000001412706364600155025ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/reports/__init__.py000066400000000000000000000001731412706364600176140ustar00rootroot00000000000000from . import messages, output from .output import process_library_reports from .processor import ReportProcessorToConsole pcs-0.10.11/pcs/cli/reports/messages.py000066400000000000000000000373751412706364600177020ustar00rootroot00000000000000from typing import ( get_type_hints, Any, Dict, Mapping, ) from pcs.common import file_type_codes from pcs.common.str_tools import ( format_list, format_optional, format_plural, transform, ) from pcs.common.reports import ( const, dto, item, messages, types, ) from pcs.common.tools import get_all_subclasses class CliReportMessage: def __init__(self, dto_obj: dto.ReportItemMessageDto) -> None: self._dto_obj = dto_obj @property def code(self) -> str: return self._dto_obj.code @property def message(self) -> str: return self._dto_obj.message @property def payload(self) -> Mapping[str, Any]: return self._dto_obj.payload class CliReportMessageCustom(CliReportMessage): # pylint: disable=no-member _obj: item.ReportItemMessage def __init__(self, dto_obj: dto.ReportItemMessageDto) -> None: super().__init__(dto_obj) self._obj = get_type_hints(self.__class__).get("_obj")( # type: ignore **dto_obj.payload ) @property def message(self) -> str: raise NotImplementedError() class ResourceManagedNoMonitorEnabled(CliReportMessageCustom): _obj: messages.ResourceManagedNoMonitorEnabled @property def message(self) -> str: return ( f"Resource '{self._obj.resource_id}' has no enabled monitor " "operations. Re-run with '--monitor' to enable them." ) class ResourceUnmoveUnbanPcmkExpiredNotSupported(CliReportMessageCustom): _obj: messages.ResourceUnmoveUnbanPcmkExpiredNotSupported @property def message(self) -> str: return "--expired not supported, please upgrade pacemaker" class CannotUnmoveUnbanResourceMasterResourceNotPromotable( CliReportMessageCustom ): _obj: messages.CannotUnmoveUnbanResourceMasterResourceNotPromotable @property def message(self) -> str: return _resource_move_ban_clear_master_resource_not_promotable( self._obj.promotable_id ) class InvalidCibContent(CliReportMessageCustom): _obj: messages.InvalidCibContent @property def message(self) -> str: return "invalid cib:\n{report}{more_verbose}".format( report=self._obj.report, more_verbose=format_optional( self._obj.can_be_more_verbose, "\n\nUse --full for more details.", ), ) class NodeCommunicationErrorNotAuthorized(CliReportMessageCustom): _obj: messages.NodeCommunicationErrorNotAuthorized @property def message(self) -> str: return ( f"Unable to authenticate to {self._obj.node} ({self._obj.reason})" f", try running 'pcs host auth {self._obj.node}'" ) class NodeCommunicationErrorTimedOut(CliReportMessageCustom): _obj: messages.NodeCommunicationErrorTimedOut @property def message(self) -> str: return ( f"{self._obj.node}: Connection timeout, check if pcsd is running " "there or try setting higher timeout with --request-timeout option " f"({self._obj.reason})" ) class CannotBanResourceMasterResourceNotPromotable(CliReportMessageCustom): _obj: messages.CannotBanResourceMasterResourceNotPromotable @property def message(self) -> str: return _resource_move_ban_clear_master_resource_not_promotable( self._obj.promotable_id ) class CannotMoveResourceMasterResourceNotPromotable(CliReportMessageCustom): _obj: messages.CannotMoveResourceMasterResourceNotPromotable @property def message(self) -> str: return _resource_move_ban_clear_master_resource_not_promotable( self._obj.promotable_id ) class SbdWatchdogTestMultipleDevices(CliReportMessageCustom): _obj: messages.SbdWatchdogTestMultipleDevices @property def message(self) -> str: return ( "Multiple watchdog devices available, therefore, watchdog which " "should be tested has to be specified. To list available watchdog " "devices use command 'pcs stonith sbd watchdog list'" ) class NodeUsedAsTieBreaker(CliReportMessageCustom): _obj: messages.NodeUsedAsTieBreaker @property def message(self) -> str: return ( f"Node '{self._obj.node}' with id '{self._obj.node_id}' is used as " "a tie breaker for a qdevice, run 'pcs quorum device update model " "tie_breaker=' to change it" ) class NodesToRemoveUnreachable(CliReportMessageCustom): _obj: messages.NodesToRemoveUnreachable @property def message(self) -> str: return ( "Removed {node} {nodes} could not be reached and subsequently " "deconfigured. Run 'pcs cluster destroy' on the unreachable " "{node}." ).format( node=format_plural(self._obj.node_list, "node"), nodes=format_list(self._obj.node_list), ) class UnableToConnectToAllRemainingNodes(CliReportMessageCustom): _obj: messages.UnableToConnectToAllRemainingNodes @property def message(self) -> str: pluralize = lambda word: format_plural(self._obj.node_list, word) return ( "Remaining cluster {node} {nodes} could not be reached, run " "'pcs cluster sync' on any currently online node once the " "unreachable {one} become available" ).format( node=pluralize("node"), nodes=format_list(self._obj.node_list), one=pluralize("one"), ) class CannotRemoveAllClusterNodes(CliReportMessageCustom): _obj: messages.CannotRemoveAllClusterNodes @property def message(self) -> str: return ( "No nodes would be left in the cluster, if you intend to destroy " "the whole cluster, run 'pcs cluster destroy --all' instead" ) class WaitForNodeStartupWithoutStart(CliReportMessageCustom): _obj: messages.WaitForNodeStartupWithoutStart @property def message(self) -> str: return "Cannot specify '--wait' without specifying '--start'" class HostNotFound(CliReportMessageCustom): _obj: messages.HostNotFound @property def message(self) -> str: pluralize = lambda word: format_plural(self._obj.host_list, word) return ( ( "{host} {hosts_comma} {_is} not known to pcs, try to " "authenticate the {host} using 'pcs host auth {hosts_space}' " "command" ) .format( host=pluralize("host"), hosts_comma=format_list(self._obj.host_list), _is=pluralize("is"), hosts_space=" ".join(sorted(self._obj.host_list)), ) .capitalize() ) class UseCommandNodeRemoveGuest(CliReportMessageCustom): _obj: messages.UseCommandNodeRemoveGuest @property def message(self) -> str: return ( "this command is not sufficient for removing a guest node, use" " 'pcs cluster node remove-guest'" ) class UseCommandNodeAddGuest(CliReportMessageCustom): _obj: messages.UseCommandNodeAddGuest @property def message(self) -> str: return ( "this command is not sufficient for creating a guest node, use" " 'pcs cluster node add-guest'" ) class UseCommandNodeAddRemote(CliReportMessageCustom): _obj: messages.UseCommandNodeAddRemote @property def message(self) -> str: return ( "this command is not sufficient for creating a remote connection," " use 'pcs cluster node add-remote'" ) class CorosyncNodeConflictCheckSkipped(CliReportMessageCustom): _obj: messages.CorosyncNodeConflictCheckSkipped @property def message(self) -> str: return ( "Unable to check if there is a conflict with nodes set in corosync " "because {reason}" ).format(reason=_skip_reason_to_string(self._obj.reason_type)) class LiveEnvironmentNotConsistent(CliReportMessageCustom): _obj: messages.LiveEnvironmentNotConsistent @property def message(self) -> str: return ( "When {given} {_is} specified, {missing} must be specified as well" ).format( given=format_list( transform( self._obj.mocked_files, _file_role_to_option_translation ) ), _is=format_plural(self._obj.mocked_files, "is"), missing=format_list( transform( self._obj.required_files, _file_role_to_option_translation ) ), ) class LiveEnvironmentRequired(CliReportMessageCustom): _obj: messages.LiveEnvironmentRequired @property def message(self) -> str: return "This command does not support {forbidden_options}".format( forbidden_options=format_list( transform( self._obj.forbidden_options, _file_role_to_option_translation, ) ), ) class LiveEnvironmentRequiredForLocalNode(CliReportMessageCustom): _obj: messages.LiveEnvironmentRequiredForLocalNode @property def message(self) -> str: return "Node(s) must be specified if -f is used" class ServiceCommandsOnNodesSkipped(CliReportMessageCustom): _obj: messages.ServiceCommandsOnNodesSkipped @property def message(self) -> str: return ( "Running action(s) {actions} on {nodes} was skipped because " "{reason}. Please, run the action(s) manually." ).format( actions=format_list(self._obj.action_list), nodes=format_list(self._obj.node_list), reason=_skip_reason_to_string(self._obj.reason_type), ) class FilesRemoveFromNodesSkipped(CliReportMessageCustom): _obj: messages.FilesRemoveFromNodesSkipped @property def message(self) -> str: return ( "Removing {files} from {nodes} was skipped because {reason}. " "Please, remove the file(s) manually." ).format( files=format_list(self._obj.file_list), nodes=format_list(self._obj.node_list), reason=_skip_reason_to_string(self._obj.reason_type), ) class FilesDistributionSkipped(CliReportMessageCustom): _obj: messages.FilesDistributionSkipped @property def message(self) -> str: return ( "Distribution of {files} to {nodes} was skipped because " "{reason}. Please, distribute the file(s) manually." ).format( files=format_list(self._obj.file_list), nodes=format_list(self._obj.node_list), reason=_skip_reason_to_string(self._obj.reason_type), ) class WaitForIdleNotLiveCluster(CliReportMessageCustom): _obj: messages.WaitForIdleNotLiveCluster @property def message(self) -> str: return "Cannot use '-f' together with '--wait'" class TagCannotRemoveReferencesWithoutRemovingTag(CliReportMessageCustom): _obj: messages.TagCannotRemoveReferencesWithoutRemovingTag @property def message(self) -> str: tag_id = self._obj.tag_id return ( f"There would be no references left in the tag '{tag_id}', please " f"remove the whole tag using the 'pcs tag remove {tag_id}' command" ) class RuleExpressionParseError(CliReportMessageCustom): _obj: messages.RuleExpressionParseError @property def message(self) -> str: # Messages coming from the parser are not very useful and readable, # they mostly contain one line grammar expression covering the whole # rule. No user would be able to parse that. Therefore we omit the # messages. marker = "-" * (self._obj.column_number - 1) + "^" return ( f"'{self._obj.rule_string}' is not a valid rule expression, parse " f"error near or after line {self._obj.line_number} column " f"{self._obj.column_number}\n" f" {self._obj.rule_line}\n" f" {marker}" ) class CibNvsetAmbiguousProvideNvsetId(CliReportMessageCustom): _obj: messages.CibNvsetAmbiguousProvideNvsetId @property def message(self) -> str: command_map = { const.PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE: ( "pcs resource defaults set update" ), const.PCS_COMMAND_OPERATION_DEFAULTS_UPDATE: ( "pcs resource op defaults set update" ), } command = command_map.get(self._obj.pcs_command, "") return ( f"Several options sets exist, please use the '{command}' command " "and specify an option set ID" ) class UnableToGetAgentMetadata(CliReportMessageCustom): _obj: messages.UnableToGetAgentMetadata @property def message(self) -> str: reason = ", ".join(self._obj.reason.splitlines()) return ( f"Agent '{self._obj.agent}' is not installed or does not provide " f"valid metadata: {reason}" ) class HostAlreadyInClusterConfig(CliReportMessageCustom): _obj: messages.HostAlreadyInClusterConfig @property def message(self) -> str: return ( f"{self._obj.host_name}: The host seems to be in a cluster already " "as cluster configuration files have been found on the host. If " "the host is not part of a cluster, run 'pcs cluster destroy' on " f"host '{self._obj.host_name}' to remove those configuration files" ) class CannotLeaveGroupEmptyAfterMove(CliReportMessageCustom): _obj: messages.CannotLeaveGroupEmptyAfterMove @property def message(self) -> str: return ( f"{self._obj.message} Please, use the 'pcs resource " f"ungroup {self._obj.group_id}' command first." ) class StonithRestartlessUpdateUnableToPerform(CliReportMessageCustom): _obj: messages.StonithRestartlessUpdateUnableToPerform @property def message(self) -> str: msg = self._obj.message if ( self._obj.reason_type == const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_NOT_RUNNING ): msg += ", please use command 'pcs stonith update' instead" return msg def _create_report_msg_map() -> Dict[str, type]: result: Dict[str, type] = {} for report_msg_cls in get_all_subclasses(CliReportMessageCustom): # pylint: disable=protected-access code = ( get_type_hints(report_msg_cls) .get("_obj", item.ReportItemMessage) ._code ) if code: if code in result: raise AssertionError() result[code] = report_msg_cls return result REPORT_MSG_MAP = _create_report_msg_map() def report_item_msg_from_dto(obj: dto.ReportItemMessageDto) -> CliReportMessage: return REPORT_MSG_MAP.get(obj.code, CliReportMessage)(obj) _file_role_to_option_translation: Mapping[file_type_codes.FileTypeCode, str] = { file_type_codes.BOOTH_CONFIG: "--booth-conf", file_type_codes.BOOTH_KEY: "--booth-key", file_type_codes.CIB: "-f", file_type_codes.COROSYNC_CONF: "--corosync_conf", } def _resource_move_ban_clear_master_resource_not_promotable( promotable_id: str, ) -> str: return ( "when specifying --master you must use the promotable clone id{_id}" ).format( _id=format_optional(promotable_id, " ({})"), ) def _skip_reason_to_string(reason: types.ReasonType) -> str: return { const.REASON_NOT_LIVE_CIB: ( "the command does not run on a live cluster (e.g. -f was used)" ), const.REASON_UNREACHABLE: "pcs is unable to connect to the node(s)", }.get(reason, reason) pcs-0.10.11/pcs/cli/reports/output.py000066400000000000000000000030311412706364600174110ustar00rootroot00000000000000import sys from pcs.common.reports import ( codes, ReportItem, ReportItemList, ReportItemSeverity, ) from .messages import report_item_msg_from_dto def warn(message: str, stderr: bool = False) -> None: stream = sys.stderr if stderr else sys.stdout stream.write(f"Warning: {message}\n") def error(message: str) -> SystemExit: sys.stderr.write(f"Error: {message}\n") return SystemExit(1) def prepare_force_text(report_item: ReportItem) -> str: force_text_map = { codes.SKIP_OFFLINE_NODES: ", use --skip-offline to override", } force_code = report_item.severity.force_code if force_code: return force_text_map.get(force_code, ", use --force to override") return "" def process_library_reports(report_item_list: ReportItemList) -> None: if not report_item_list: raise error("Errors have occurred, therefore pcs is unable to continue") critical_error = False for report_item in report_item_list: report_dto = report_item.to_dto() msg = report_item_msg_from_dto(report_dto.message).message severity = report_dto.severity.level if severity == ReportItemSeverity.WARNING: warn(msg) continue if severity != ReportItemSeverity.ERROR: print(msg) continue error( "{msg}{force}".format( msg=msg, force=prepare_force_text(report_item), ) ) critical_error = True if critical_error: sys.exit(1) pcs-0.10.11/pcs/cli/reports/processor.py000066400000000000000000000030301412706364600200670ustar00rootroot00000000000000from typing import List from pcs.common.reports import ( ReportItem, ReportItemSeverity, ReportProcessor, ) from .output import ( error, prepare_force_text, warn, ) from .messages import report_item_msg_from_dto class ReportProcessorToConsole(ReportProcessor): def __init__(self, debug: bool = False) -> None: super().__init__() self._ignore_severities: List[ReportItemSeverity] = [] self.debug = debug def _do_report(self, report_item: ReportItem) -> None: report_dto = report_item.to_dto() msg = report_item_msg_from_dto(report_dto.message).message if report_dto.context: msg = f"{report_dto.context.node}: {msg}" severity = report_dto.severity.level if severity in self._ignore_severities: # DEBUG overrides ignoring severities for debug reports if msg and self.debug and severity == ReportItemSeverity.DEBUG: print(msg) return if severity == ReportItemSeverity.ERROR: error( "{msg}{force}".format( msg=msg, force=prepare_force_text(report_item), ) ) elif severity == ReportItemSeverity.WARNING: warn(msg) elif msg and (self.debug or severity != ReportItemSeverity.DEBUG): print(msg) def suppress_reports_of_severity( self, severity_list: List[ReportItemSeverity] ) -> None: self._ignore_severities = list(severity_list) pcs-0.10.11/pcs/cli/resource/000077500000000000000000000000001412706364600156335ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/resource/__init__.py000066400000000000000000000000001412706364600177320ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/resource/parse_args.py000066400000000000000000000215471412706364600203440ustar00rootroot00000000000000from pcs.cli.common.parse_args import group_by_keywords, prepare_options from pcs.cli.common.errors import CmdLineInputError, SEE_MAN_CHANGES def parse_create_simple(arg_list): groups = group_by_keywords( arg_list, set(["op", "meta"]), implicit_first_group_key="options", group_repeated_keywords=["op"], ) parts = { "meta": prepare_options(groups.get("meta", [])), "options": prepare_options(groups.get("options", [])), "op": [ prepare_options(op) for op in build_operations(groups.get("op", [])) ], } return parts def parse_clone(arg_list, promotable=False): parts = { "clone_id": None, "meta": {}, } allowed_keywords = set(["op", "meta"]) if ( arg_list and arg_list[0] not in allowed_keywords and "=" not in arg_list[0] ): parts["clone_id"] = arg_list.pop(0) groups = group_by_keywords( arg_list, allowed_keywords, implicit_first_group_key="options", group_repeated_keywords=["op"], only_found_keywords=True, ) if "op" in groups: raise CmdLineInputError( "op settings must be changed on base resource, not the clone", ) parts["meta"] = prepare_options( groups.get("options", []) + groups.get("meta", []), ) if promotable: if "promotable" in parts["meta"]: raise CmdLineInputError( "you cannot specify both promotable option and promotable " "keyword" ) parts["meta"]["promotable"] = "true" return parts def parse_create(arg_list): groups = group_by_keywords( arg_list, set(["op", "meta", "clone", "promotable", "bundle"]), implicit_first_group_key="options", group_repeated_keywords=["op"], only_found_keywords=True, ) try: parts = { "meta": prepare_options(groups.get("meta", [])), "options": prepare_options(groups.get("options", [])), "op": [ prepare_options(op) for op in build_operations(groups.get("op", [])) ], } if "clone" in groups: if groups["clone"] and "=" not in groups["clone"][0]: parts["clone_id"] = groups["clone"].pop(0) parts["clone"] = prepare_options(groups["clone"]) if "promotable" in groups: if groups["promotable"] and "=" not in groups["promotable"][0]: parts["clone_id"] = groups["promotable"].pop(0) parts["promotable"] = prepare_options(groups["promotable"]) if "bundle" in groups: parts["bundle"] = groups["bundle"] except CmdLineInputError as e: # Print error messages which point users to the changes section in pcs # manpage. # To be removed in the next significant version. if e.message == "missing value of 'master' option": raise CmdLineInputError( message=e.message, hint=( "Master/Slave resources have been renamed to promotable " "clones, please use the 'promotable' keyword instead of " "'master'. " + SEE_MAN_CHANGES ), ) from e raise return parts def _parse_bundle_groups(arg_list): """ Commandline options: no options """ repeatable_keyword_list = ["port-map", "storage-map"] keyword_list = ["meta", "container", "network"] + repeatable_keyword_list groups = group_by_keywords( arg_list, set(keyword_list), group_repeated_keywords=repeatable_keyword_list, only_found_keywords=True, ) for keyword in keyword_list: if keyword not in groups: continue if keyword in repeatable_keyword_list: for repeated_section in groups[keyword]: if not repeated_section: raise CmdLineInputError( "No {0} options specified".format(keyword) ) else: if not groups[keyword]: raise CmdLineInputError( "No {0} options specified".format(keyword) ) return groups def parse_bundle_create_options(arg_list): """ Commandline options: no options """ groups = _parse_bundle_groups(arg_list) container_options = groups.get("container", []) container_type = "" if container_options and "=" not in container_options[0]: container_type = container_options.pop(0) parts = { "container_type": container_type, "container": prepare_options(container_options), "network": prepare_options(groups.get("network", [])), "port_map": [ prepare_options(port_map) for port_map in groups.get("port-map", []) ], "storage_map": [ prepare_options(storage_map) for storage_map in groups.get("storage-map", []) ], "meta": prepare_options(groups.get("meta", [])), } return parts def parse_bundle_reset_options(arg_list): """ Commandline options: no options """ groups = _parse_bundle_groups(arg_list) container_options = groups.get("container", []) parts = { "container": prepare_options(container_options), "network": prepare_options(groups.get("network", [])), "port_map": [ prepare_options(port_map) for port_map in groups.get("port-map", []) ], "storage_map": [ prepare_options(storage_map) for storage_map in groups.get("storage-map", []) ], "meta": prepare_options(groups.get("meta", [])), } return parts def _split_bundle_map_update_op_and_options( map_arg_list, result_parts, map_name ): """ Commandline options: no options """ if len(map_arg_list) < 2: raise _bundle_map_update_not_valid(map_name) op, options = map_arg_list[0], map_arg_list[1:] if op == "add": result_parts["add"].append(prepare_options(options)) elif op in {"delete", "remove"}: result_parts["remove"].extend(options) else: raise _bundle_map_update_not_valid(map_name) def _bundle_map_update_not_valid(map_name): """ Commandline options: no options """ return CmdLineInputError( ( "When using '{map}' you must specify either 'add' and options or " "either of 'delete' or 'remove' and id(s)" ).format(map=map_name) ) def parse_bundle_update_options(arg_list): """ Commandline options: no options """ groups = _parse_bundle_groups(arg_list) port_map = {"add": [], "remove": []} for map_group in groups.get("port-map", []): _split_bundle_map_update_op_and_options(map_group, port_map, "port-map") storage_map = {"add": [], "remove": []} for map_group in groups.get("storage-map", []): _split_bundle_map_update_op_and_options( map_group, storage_map, "storage-map" ) parts = { "container": prepare_options(groups.get("container", [])), "network": prepare_options(groups.get("network", [])), "port_map_add": port_map["add"], "port_map_remove": port_map["remove"], "storage_map_add": storage_map["add"], "storage_map_remove": storage_map["remove"], "meta": prepare_options(groups.get("meta", [])), } return parts def build_operations(op_group_list): """ Return a list of dicts. Each dict represents one operation. list of list op_group_list contains items that have parameters after "op" (so item can contain multiple operations) for example: [ [monitor timeout=1 start timeout=2], [monitor timeout=3 interval=10], ] """ operation_list = [] for op_group in op_group_list: # empty operation is not allowed if not op_group: raise __not_enough_parts_in_operation() # every operation group needs to start with operation name if "=" in op_group[0]: raise __every_operation_needs_name() for arg in op_group: if "=" not in arg: operation_list.append(["name={0}".format(arg)]) else: operation_list[-1].append(arg) # every operation needs at least name and one option # there can be more than one operation in op_group: check is after # processing if any(len(operation) < 2 for operation in operation_list): raise __not_enough_parts_in_operation() return operation_list def __not_enough_parts_in_operation(): return CmdLineInputError( "When using 'op' you must specify an operation name" " and at least one option" ) def __every_operation_needs_name(): return CmdLineInputError( "When using 'op' you must specify an operation name after 'op'" ) pcs-0.10.11/pcs/cli/resource/relations.py000066400000000000000000000166421412706364600202160ustar00rootroot00000000000000from typing import ( Any, cast, Iterable, List, Mapping, Sequence, ) from pcs.common.interface import dto from pcs.common.pacemaker.resource.relations import ( RelationEntityDto, ResourceRelationDto, ResourceRelationType, ) from pcs.common.str_tools import format_optional from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import InputModifiers from pcs.cli.common.printable_tree import ( tree_to_lines, PrintableTreeNode, ) def show_resource_relations_cmd( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * -f - CIB file * --full - show constraint ids and resource types """ modifiers.ensure_only_supported("-f", "--full") if len(argv) != 1: raise CmdLineInputError() tree = ResourcePrintableNode.from_dto( dto.from_dict( ResourceRelationDto, lib.resource.get_resource_relations_tree(argv[0]), ) ) for line in tree_to_lines( tree, verbose=cast(bool, modifiers.get("--full")) ): print(line) class ResourceRelationBase(PrintableTreeNode): def __init__( self, relation_entity: RelationEntityDto, members: Sequence["ResourceRelationBase"], is_leaf: bool, ): self._relation_entity = relation_entity self._members = members self._is_leaf = is_leaf @property def is_leaf(self) -> bool: return self._is_leaf @property def relation_entity(self) -> RelationEntityDto: return self._relation_entity @property def members(self) -> Sequence["ResourceRelationBase"]: return self._members @property def detail(self) -> Sequence[str]: raise NotImplementedError() def get_title(self, verbose: bool) -> str: raise NotImplementedError() class ResourcePrintableNode(ResourceRelationBase): @classmethod def from_dto( cls, resource_dto: ResourceRelationDto ) -> "ResourcePrintableNode": def _relation_comparator(item: ResourceRelationBase) -> str: type_priorities = ( ResourceRelationType.INNER_RESOURCES, ResourceRelationType.OUTER_RESOURCE, ResourceRelationType.ORDER, ResourceRelationType.ORDER_SET, ) priority_map = { _type: value for value, _type in enumerate(type_priorities) } return "{_type}_{_id}".format( _type=priority_map.get( # Hardcoded number 9 is intentional. If there is more than # 10 items, it would be required to also prepend zeros for # lower numbers. E.g: if there is 100 options, it should # starts as 000, 001, ... item.relation_entity.type, # type: ignore 9, ), _id=item.relation_entity.id, ) return cls( resource_dto.relation_entity, sorted( [ RelationPrintableNode.from_dto(member_dto) for member_dto in resource_dto.members ], key=_relation_comparator, ), resource_dto.is_leaf, ) def get_title(self, verbose: bool) -> str: metadata = self._relation_entity.metadata rsc_type = self._relation_entity.type type_str = { ResourceRelationType.RSC_GROUP: "group", ResourceRelationType.RSC_BUNDLE: "bundle", ResourceRelationType.RSC_CLONE: "clone", }.get(rsc_type, "") if rsc_type == ResourceRelationType.RSC_PRIMITIVE: type_str = "{_class}{_provider}{_type}".format( _class=format_optional(metadata.get("class"), "{}:"), _provider=format_optional(metadata.get("provider"), "{}:"), _type=metadata.get("type"), ) detail = f" (resource: {type_str})" if verbose else "" return f"{self._relation_entity.id}{detail}" @property def detail(self) -> Sequence[str]: return [] class RelationPrintableNode(ResourceRelationBase): @classmethod def from_dto( cls, relation_dto: ResourceRelationDto ) -> "RelationPrintableNode": return cls( relation_dto.relation_entity, sorted( [ ResourcePrintableNode.from_dto(member_dto) for member_dto in relation_dto.members ], key=lambda item: item.relation_entity.id, ), relation_dto.is_leaf, ) def get_title(self, verbose: bool) -> str: rel_type_map: Mapping[ResourceRelationType, str] = { ResourceRelationType.ORDER: "order", ResourceRelationType.ORDER_SET: "order set", ResourceRelationType.INNER_RESOURCES: "inner resource(s)", ResourceRelationType.OUTER_RESOURCE: "outer resource", } detail = ( " ({})".format(self._relation_entity.metadata.get("id")) if verbose else "" ) return "{type}{detail}".format( type=rel_type_map.get(self._relation_entity.type, ""), detail=detail, ) @property def detail(self) -> Sequence[str]: ent = self._relation_entity if ent.type is ResourceRelationType.ORDER: return _order_metadata_to_str(ent.metadata) if ent.type is ResourceRelationType.ORDER_SET: return _order_set_metadata_to_str(ent.metadata) if ( ent.type is ResourceRelationType.INNER_RESOURCES and len(ent.members) > 1 ): return ["members: {}".format(" ".join(ent.members))] return [] def _order_metadata_to_str(metadata: Mapping[str, Any]) -> Sequence[str]: return [ "{action1} {resource1} then {action2} {resource2}".format( action1=metadata["first-action"], resource1=metadata["first"], action2=metadata["then-action"], resource2=metadata["then"], ) ] + _order_common_metadata_to_str(metadata) def _order_set_metadata_to_str(metadata: Mapping[str, Any]) -> Sequence[str]: result = [] for res_set in metadata["sets"]: result.append( " set {resources}{options}".format( resources=" ".join(res_set["members"]), options=_resource_set_options_to_str(res_set["metadata"]), ) ) return _order_common_metadata_to_str(metadata) + result def _resource_set_options_to_str(metadata: Mapping[str, Any]) -> str: supported_keys = ( "sequential", "require-all", "ordering", "action", "role", "kind", "score", ) result = _filter_supported_keys(metadata, supported_keys) return f" ({result})" if result else "" def _filter_supported_keys( data: Mapping[str, Any], supported_keys: Iterable[str] ) -> str: return " ".join( [ f"{key}={value}" for key, value in sorted(data.items()) if key in supported_keys ] ) def _order_common_metadata_to_str(metadata: Mapping[str, Any]) -> List[str]: result = _filter_supported_keys( metadata, ("symmetrical", "kind", "require-all", "score") ) return [result] if result else [] pcs-0.10.11/pcs/cli/routing/000077500000000000000000000000001412706364600154735ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/routing/__init__.py000066400000000000000000000000001412706364600175720ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/routing/acl.py000066400000000000000000000027231412706364600166100ustar00rootroot00000000000000from pcs import ( acl, usage, ) from pcs.cli.common.routing import create_router acl_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.acl(argv), # TODO remove, deprecated command # replaced with 'config' "show": acl.show_acl_config, "config": acl.acl_config, "enable": acl.acl_enable, "disable": acl.acl_disable, "role": create_router( { "create": acl.role_create, "delete": acl.role_delete, "remove": acl.role_delete, "assign": acl.role_assign, "unassign": acl.role_unassign, }, ["acl", "role"], ), "user": create_router( { "create": acl.user_create, "delete": acl.user_delete, "remove": acl.user_delete, }, ["acl", "user"], ), "group": create_router( { "create": acl.group_create, "delete": acl.group_delete, "remove": acl.group_delete, }, ["acl", "group"], ), "permission": create_router( { "add": acl.permission_add, "delete": acl.run_permission_delete, "remove": acl.run_permission_delete, }, ["acl", "permission"], ), }, ["acl"], default_cmd="config", ) pcs-0.10.11/pcs/cli/routing/alert.py000066400000000000000000000017351412706364600171620ustar00rootroot00000000000000from pcs import ( alert, usage, ) from pcs.cli.common.routing import create_router alert_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.alert(argv), "create": alert.alert_add, "update": alert.alert_update, "delete": alert.alert_remove, "remove": alert.alert_remove, "config": alert.print_alert_config, # TODO remove, deprecated command # replaced with 'config' "show": alert.print_alert_show, "recipient": create_router( { "help": lambda lib, argv, modifiers: usage.alert(["recipient"]), "add": alert.recipient_add, "update": alert.recipient_update, "delete": alert.recipient_remove, "remove": alert.recipient_remove, }, ["alert", "recipient"], ), "get_all_alerts": alert.print_alerts_in_json, }, ["alert"], default_cmd="config", ) pcs-0.10.11/pcs/cli/routing/booth.py000066400000000000000000000024431412706364600171630ustar00rootroot00000000000000from pcs import usage from pcs.cli.booth import command from pcs.cli.common.routing import create_router from pcs.resource import resource_remove, resource_restart booth_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.booth(argv), "config": command.config_show, "setup": command.config_setup, "destroy": command.config_destroy, "ticket": create_router( { "help": lambda lib, argv, modifiers: usage.booth(["ticket"]), "add": command.config_ticket_add, "delete": command.config_ticket_remove, "remove": command.config_ticket_remove, "grant": command.ticket_grant, "revoke": command.ticket_revoke, }, ["booth", "ticket"], ), "create": command.create_in_cluster, "delete": command.get_remove_from_cluster(resource_remove), "remove": command.get_remove_from_cluster(resource_remove), "restart": command.get_restart(resource_restart), "sync": command.sync, "pull": command.pull, "enable": command.enable, "disable": command.disable, "start": command.start, "stop": command.stop, "status": command.status, }, ["booth"], ) pcs-0.10.11/pcs/cli/routing/client.py000066400000000000000000000002621412706364600173230ustar00rootroot00000000000000from pcs import client from pcs.cli.common.routing import create_router client_cmd = create_router( { "local-auth": client.local_auth_cmd, }, ["client"], ) pcs-0.10.11/pcs/cli/routing/cluster.py000066400000000000000000000121561412706364600175330ustar00rootroot00000000000000from typing import ( Any, Sequence, ) from pcs import ( cluster, pcsd, resource, status, usage, ) import pcs.cli.cluster.command as cluster_command from pcs.cli.common.errors import ( CmdLineInputError, raise_command_replaced, ) from pcs.cli.common.parse_args import InputModifiers from pcs.cli.common.routing import create_router from pcs.cli.reports.output import warn from pcs.utils import exit_on_cmdline_input_errror def certkey(lib: Any, argv: Sequence[str], modifiers: InputModifiers) -> None: warn( "This command is deprecated and will be removed. " "Please use 'pcs pcsd certkey' instead.", stderr=True, ) try: return pcsd.pcsd_certkey_cmd(lib, argv, modifiers) except CmdLineInputError as e: return exit_on_cmdline_input_errror(e, "pcsd", ["certkey"]) def pcsd_status( lib: Any, argv: Sequence[str], modifiers: InputModifiers ) -> None: warn( "This command is deprecated and will be removed. " "Please use 'pcs pcsd status' or 'pcs status pcsd' instead.", stderr=True, ) try: return pcsd.pcsd_status_cmd(lib, argv, modifiers) except CmdLineInputError as e: return exit_on_cmdline_input_errror(e, "pcsd", ["status"]) cluster_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.cluster(argv), "setup": cluster.cluster_setup, "config": create_router( { "show": cluster.config_show, "update": cluster.config_update, }, ["cluster", "config"], default_cmd="show", ), "authkey": create_router( {"corosync": cluster.authkey_corosync}, ["cluster", "authkey"], ), "sync": create_router( { "corosync": cluster.sync_nodes, }, ["cluster", "sync"], default_cmd="corosync", ), "status": status.cluster_status, # TODO remove, deprecated command # replaced with 'pcs pcsd status' and 'pcs status pcsd' "pcsd-status": pcsd_status, # TODO remove, deprecated command # replaced with 'pcs pcsd certkey' "certkey": certkey, "auth": cluster.cluster_auth_cmd, "start": cluster.cluster_start_cmd, "stop": cluster.cluster_stop_cmd, "kill": cluster.kill_cluster, "enable": cluster.cluster_enable_cmd, "disable": cluster.cluster_disable_cmd, "cib": cluster.get_cib, "cib-push": cluster.cluster_push, "cib-upgrade": cluster.cluster_cib_upgrade_cmd, "edit": cluster.cluster_edit, "link": create_router( { "add": cluster.link_add, "delete": cluster.link_remove, "remove": cluster.link_remove, "update": cluster.link_update, }, ["cluster", "link"], ), "node": create_router( { "add": cluster.node_add, "add-guest": cluster_command.node_add_guest, "add-outside": cluster.node_add_outside_cluster, "add-remote": cluster_command.node_add_remote, "clear": cluster_command.node_clear, "delete": cluster.node_remove, "delete-guest": cluster_command.node_remove_guest, "delete-remote": cluster_command.create_node_remove_remote( resource.resource_remove ), "remove": cluster.node_remove, "remove-guest": cluster_command.node_remove_guest, "remove-remote": cluster_command.create_node_remove_remote( resource.resource_remove ), }, ["cluster", "node"], ), "uidgid": cluster.cluster_uidgid, "corosync": cluster.cluster_get_corosync_conf, "reload": cluster.cluster_reload, "destroy": cluster.cluster_destroy, "verify": cluster.cluster_verify, "report": cluster.cluster_report, "remove_nodes_from_cib": cluster.remove_nodes_from_cib, # removed commands # These print error messages which point users to the changes section in # pcs manpage. # To be removed in the next significant version. "quorum": lambda lib, argv, modifiers: raise_command_replaced( "pcs quorum" ), "remote-node": create_router( { "add": lambda lib, argv, modifiers: raise_command_replaced( "pcs cluster node add-guest", ), "remove": lambda lib, argv, modifiers: raise_command_replaced( "pcs cluster node delete-guest", "pcs cluster node remove-guest", ), }, ["cluster", "node"], ), "standby": lambda lib, argv, modifiers: raise_command_replaced( "pcs node standby" ), "unstandby": lambda lib, argv, modifiers: raise_command_replaced( "pcs node unstandby" ), }, ["cluster"], ) pcs-0.10.11/pcs/cli/routing/config.py000066400000000000000000000023631412706364600173160ustar00rootroot00000000000000from pcs import ( config, usage, ) from pcs.cli.common.routing import create_router config_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.config(argv), "show": config.config_show, "backup": config.config_backup, "restore": config.config_restore, "checkpoint": create_router( { "list": config.config_checkpoint_list, "view": config.config_checkpoint_view, "restore": config.config_checkpoint_restore, "diff": config.config_checkpoint_diff, }, ["config", "checkpoint"], default_cmd="list", ), # TODO remove, deprecated command "import-cman": config.config_import_cman, # TODO remove, deprecated command "export": create_router( { "pcs-commands": config.config_export_pcs_commands, "pcs-commands-verbose": lambda lib, argv, modifiers: ( config.config_export_pcs_commands( lib, argv, modifiers, verbose=True ) ), }, ["config", "export"], ), }, ["config"], default_cmd="show", ) pcs-0.10.11/pcs/cli/routing/constraint.py000066400000000000000000000040061412706364600202310ustar00rootroot00000000000000from pcs import ( constraint, usage, ) from pcs.cli.common.routing import create_router import pcs.cli.constraint_colocation.command as colocation_command from pcs.cli.constraint_ticket import command as ticket_command constraint_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.constraint(argv), "location": constraint.constraint_location_cmd, "order": constraint.constraint_order_cmd, "ticket": create_router( { "set": ticket_command.create_with_set, "add": ticket_command.add, "delete": ticket_command.remove, "remove": ticket_command.remove, # TODO remove, deprecated command # replaced with 'config' "show": ticket_command.show, "config": ticket_command.config_cmd, }, ["constraint", "ticket"], default_cmd="config", ), "colocation": create_router( { "add": constraint.colocation_add, "remove": constraint.colocation_rm, "delete": constraint.colocation_rm, "set": colocation_command.create_with_set, # TODO remove, deprecated command # replaced with 'config' "show": colocation_command.show, "config": colocation_command.config_cmd, }, ["constraint", "colocation"], default_cmd="config", ), "remove": constraint.constraint_rm, "delete": constraint.constraint_rm, # TODO remove, deprecated command # replaced with 'config' "show": constraint.constraint_show, # TODO remove, deprecated command # replaced with 'config' "list": constraint.constraint_show, "config": constraint.constraint_config_cmd, "ref": constraint.constraint_ref, "rule": constraint.constraint_rule, }, ["constraint"], default_cmd="config", ) pcs-0.10.11/pcs/cli/routing/dr.py000066400000000000000000000005631412706364600164560ustar00rootroot00000000000000from pcs import usage from pcs.cli import dr from pcs.cli.common.routing import create_router dr_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.dr(argv), "config": dr.config, "destroy": dr.destroy, "set-recovery-site": dr.set_recovery_site, "status": dr.status, }, ["dr"], default_cmd="help", ) pcs-0.10.11/pcs/cli/routing/host.py000066400000000000000000000004241412706364600170220ustar00rootroot00000000000000from pcs import ( host, usage, ) from pcs.cli.common.routing import create_router host_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.host(argv), "auth": host.auth_cmd, "deauth": host.deauth_cmd, }, ["host"], ) pcs-0.10.11/pcs/cli/routing/node.py000066400000000000000000000012761412706364600170000ustar00rootroot00000000000000from functools import partial from pcs import ( node, usage, ) from pcs.cli.common.routing import create_router node_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.node(argv), "maintenance": partial(node.node_maintenance_cmd, enable=True), "unmaintenance": partial(node.node_maintenance_cmd, enable=False), "standby": partial(node.node_standby_cmd, enable=True), "unstandby": partial(node.node_standby_cmd, enable=False), "attribute": node.node_attribute_cmd, "utilization": node.node_utilization_cmd, # pcs-to-pcsd use only "pacemaker-status": node.node_pacemaker_status, }, ["node"], ) pcs-0.10.11/pcs/cli/routing/pcsd.py000066400000000000000000000014531412706364600170010ustar00rootroot00000000000000from pcs import ( pcsd, usage, ) from pcs.cli.common.errors import raise_command_replaced from pcs.cli.common.routing import create_router pcsd_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.pcsd(argv), "accept_token": pcsd.accept_token_cmd, "deauth": pcsd.pcsd_deauth, "certkey": pcsd.pcsd_certkey_cmd, "status": pcsd.pcsd_status_cmd, "sync-certificates": pcsd.pcsd_sync_certs, # removed commands # These print error messages which point users to the changes section in # pcs manpage. # TODO To be removed in the next significant version. "clear-auth": lambda lib, argv, modifiers: raise_command_replaced( "pcs host deauth", "pcs pcsd deauth" ), }, ["pcsd"], ) pcs-0.10.11/pcs/cli/routing/prop.py000066400000000000000000000013151412706364600170250ustar00rootroot00000000000000from pcs import ( prop, usage, ) from pcs.cli.common.routing import create_router property_cmd = create_router( { "help": lambda _lib, _argv, _modifiers: usage.property(_argv), "set": prop.set_property, "unset": prop.unset_property, # TODO remove, deprecated command # replaced with 'config' "list": prop.list_property_deprecated, # TODO remove, deprecated command # replaced with 'config' "show": prop.list_property_deprecated, "config": prop.list_property, "get_cluster_properties_definition": ( prop.print_cluster_properties_definition ), }, ["property"], default_cmd="config", ) pcs-0.10.11/pcs/cli/routing/qdevice.py000066400000000000000000000021121412706364600174610ustar00rootroot00000000000000from pcs import ( qdevice, usage, ) from pcs.cli.common.routing import create_router qdevice_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.qdevice(argv), "status": qdevice.qdevice_status_cmd, "setup": qdevice.qdevice_setup_cmd, "destroy": qdevice.qdevice_destroy_cmd, "start": qdevice.qdevice_start_cmd, "stop": qdevice.qdevice_stop_cmd, "kill": qdevice.qdevice_kill_cmd, "enable": qdevice.qdevice_enable_cmd, "disable": qdevice.qdevice_disable_cmd, # following commands are internal use only, called from pcsd "sign-net-cert-request": qdevice.qdevice_sign_net_cert_request_cmd, "net-client": create_router( { "setup": qdevice.qdevice_net_client_setup_cmd, "import-certificate": ( qdevice.qdevice_net_client_import_certificate_cmd ), "destroy": qdevice.qdevice_net_client_destroy, }, ["qdevice", "net-client"], ), }, ["qdevice"], ) pcs-0.10.11/pcs/cli/routing/quorum.py000066400000000000000000000023311412706364600173740ustar00rootroot00000000000000from pcs import ( quorum, usage, ) from pcs.cli.common.routing import create_router quorum_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.quorum(argv), "config": quorum.quorum_config_cmd, "expected-votes": quorum.quorum_expected_votes_cmd, "status": quorum.quorum_status_cmd, "device": create_router( { "add": quorum.quorum_device_add_cmd, "heuristics": create_router( { "delete": quorum.quorum_device_heuristics_remove_cmd, "remove": quorum.quorum_device_heuristics_remove_cmd, }, ["quorum", "device", "heuristics"], ), "delete": quorum.quorum_device_remove_cmd, "remove": quorum.quorum_device_remove_cmd, "status": quorum.quorum_device_status_cmd, "update": quorum.quorum_device_update_cmd, }, ["quorum", "device"], ), # TODO switch to new architecture "unblock": quorum.quorum_unblock_cmd, "update": quorum.quorum_update_cmd, }, ["quorum"], default_cmd="config", ) pcs-0.10.11/pcs/cli/routing/resource.py000066400000000000000000000147641412706364600177100ustar00rootroot00000000000000from functools import partial from typing import ( Any, List, ) from pcs import ( resource, usage, ) from pcs.cli.common.errors import raise_command_replaced from pcs.cli.common.parse_args import InputModifiers from pcs.cli.common.routing import create_router from pcs.cli.resource.relations import show_resource_relations_cmd def resource_defaults_cmd( lib: Any, argv: List[str], modifiers: InputModifiers ) -> None: """ Options: * -f - CIB file * --force - allow unknown options """ if argv and "=" in argv[0]: # DEPRECATED legacy command return resource.resource_defaults_legacy_cmd( lib, argv, modifiers, deprecated_syntax_used=True ) router = create_router( { "config": resource.resource_defaults_config_cmd, "set": create_router( { "create": resource.resource_defaults_set_create_cmd, "delete": resource.resource_defaults_set_remove_cmd, "remove": resource.resource_defaults_set_remove_cmd, "update": resource.resource_defaults_set_update_cmd, }, ["resource", "defaults", "set"], ), "update": resource.resource_defaults_legacy_cmd, }, ["resource", "defaults"], default_cmd="config", ) return router(lib, argv, modifiers) def resource_op_defaults_cmd( lib: Any, argv: List[str], modifiers: InputModifiers ) -> None: """ Options: * -f - CIB file * --force - allow unknown options """ if argv and "=" in argv[0]: # DEPRECATED legacy command return resource.resource_op_defaults_legacy_cmd( lib, argv, modifiers, deprecated_syntax_used=True ) router = create_router( { "config": resource.resource_op_defaults_config_cmd, "set": create_router( { "create": resource.resource_op_defaults_set_create_cmd, "delete": resource.resource_op_defaults_set_remove_cmd, "remove": resource.resource_op_defaults_set_remove_cmd, "update": resource.resource_op_defaults_set_update_cmd, }, ["resource", "op", "defaults", "set"], ), "update": resource.resource_op_defaults_legacy_cmd, }, ["resource", "op", "defaults"], default_cmd="config", ) return router(lib, argv, modifiers) resource_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.resource(argv), "list": resource.resource_list_available, "describe": resource.resource_list_options, "create": resource.resource_create, "move": resource.resource_move, "ban": resource.resource_ban, "clear": resource.resource_unmove_unban, "standards": resource.resource_standards, "providers": resource.resource_providers, "agents": resource.resource_agents, "update": resource.resource_update, "meta": resource.resource_meta, "delete": resource.resource_remove_cmd, "remove": resource.resource_remove_cmd, # TODO remove, deprecated command # replaced with 'resource status' and 'resource config' "show": resource.resource_show, "status": resource.resource_status, "config": resource.resource_config, "group": create_router( { "add": resource.resource_group_add_cmd, "list": resource.resource_group_list, "remove": resource.resource_group_rm_cmd, "delete": resource.resource_group_rm_cmd, }, ["resource", "group"], ), "ungroup": resource.resource_group_rm_cmd, "clone": resource.resource_clone, "promotable": partial(resource.resource_clone, promotable=True), "unclone": resource.resource_clone_master_remove, "enable": resource.resource_enable_cmd, "disable": resource.resource_disable_cmd, "safe-disable": resource.resource_safe_disable_cmd, "restart": resource.resource_restart, "debug-start": partial( resource.resource_force_action, action="debug-start" ), "debug-stop": partial( resource.resource_force_action, action="debug-stop" ), "debug-promote": partial( resource.resource_force_action, action="debug-promote" ), "debug-demote": partial( resource.resource_force_action, action="debug-demote" ), "debug-monitor": partial( resource.resource_force_action, action="debug-monitor" ), "manage": resource.resource_manage_cmd, "unmanage": resource.resource_unmanage_cmd, "failcount": resource.resource_failcount, "op": create_router( { "defaults": resource_op_defaults_cmd, "add": resource.resource_op_add_cmd, "remove": resource.resource_op_delete_cmd, "delete": resource.resource_op_delete_cmd, }, ["resource", "op"], ), "defaults": resource_defaults_cmd, "cleanup": resource.resource_cleanup, "refresh": resource.resource_refresh, "relocate": create_router( { "show": resource.resource_relocate_show_cmd, "dry-run": resource.resource_relocate_dry_run_cmd, "run": resource.resource_relocate_run_cmd, "clear": resource.resource_relocate_clear_cmd, }, ["resource", "relocate"], ), "utilization": resource.resource_utilization_cmd, "bundle": create_router( { "create": resource.resource_bundle_create_cmd, "reset": resource.resource_bundle_reset_cmd, "update": resource.resource_bundle_update_cmd, }, ["resource", "bundle"], ), # internal use only "get_resource_agent_info": resource.get_resource_agent_info, # removed commands # These print error messages which point users to the changes section in # pcs manpage. # To be removed in the next significant version. "master": lambda lib, argv, modifiers: raise_command_replaced( "pcs resource promotable" ), "relations": show_resource_relations_cmd, }, ["resource"], default_cmd="status", ) pcs-0.10.11/pcs/cli/routing/status.py000066400000000000000000000022721412706364600173730ustar00rootroot00000000000000from pcs import ( status, usage, ) from pcs.cli.common.errors import raise_command_replaced from pcs.cli.common.routing import create_router from pcs.pcsd import pcsd_status_cmd from pcs.qdevice import qdevice_status_cmd from pcs.quorum import quorum_status_cmd from pcs.resource import resource_status from pcs.cli.booth.command import status as booth_status_cmd status_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.status(argv), "booth": booth_status_cmd, "corosync": status.corosync_status, "cluster": status.cluster_status, "nodes": status.nodes_status, "pcsd": pcsd_status_cmd, "qdevice": qdevice_status_cmd, "quorum": quorum_status_cmd, "resources": resource_status, "xml": status.xml_status, "status": status.full_status, # removed commands # These print error messages which point users to the changes section in # pcs manpage. # To be removed in the next significant version. "groups": lambda lib, argv, modifiers: raise_command_replaced( "pcs resource group list" ), }, ["status"], default_cmd="status", ) pcs-0.10.11/pcs/cli/routing/stonith.py000066400000000000000000000060161412706364600175400ustar00rootroot00000000000000from pcs import ( resource, stonith, usage, ) from pcs.cli.common.routing import create_router stonith_cmd = create_router( { "help": lambda lib, argv, modifiers: usage.stonith(argv), "list": stonith.stonith_list_available, "describe": stonith.stonith_list_options, "create": stonith.stonith_create, "update": resource.resource_update, "update-scsi-devices": stonith.stonith_update_scsi_devices, "delete": resource.resource_remove_cmd, "remove": resource.resource_remove_cmd, # TODO remove, deprecated command # replaced with 'stonith status' and 'stonith config' "show": stonith.stonith_show_cmd, "status": stonith.stonith_status_cmd, "config": stonith.stonith_config_cmd, "level": create_router( { "add": stonith.stonith_level_add_cmd, "clear": stonith.stonith_level_clear_cmd, "config": stonith.stonith_level_config_cmd, "remove": stonith.stonith_level_remove_cmd, "delete": stonith.stonith_level_remove_cmd, "verify": stonith.stonith_level_verify_cmd, }, ["stonith", "level"], default_cmd="config", ), "fence": stonith.stonith_fence, "cleanup": resource.resource_cleanup, "refresh": resource.resource_refresh, "confirm": stonith.stonith_confirm, "sbd": create_router( { "enable": stonith.sbd_enable, "disable": stonith.sbd_disable, "status": stonith.sbd_status, "config": stonith.sbd_config, "device": create_router( { "setup": stonith.sbd_setup_block_device, "message": stonith.sbd_message, }, ["stonith", "sbd", "device"], ), "watchdog": create_router( { "list": stonith.sbd_watchdog_list, "test": stonith.sbd_watchdog_test, # internal use only "list_json": stonith.sbd_watchdog_list_json, }, ["stonith", "sbd", "watchdog"], ), # internal use only "local_config_in_json": stonith.local_sbd_config, }, ["stonith", "sbd"], ), "enable": resource.resource_enable_cmd, "disable": resource.resource_disable_cmd, "history": create_router( { "show": stonith.stonith_history_show_cmd, "cleanup": stonith.stonith_history_cleanup_cmd, "update": stonith.stonith_history_update_cmd, }, ["stonith", "history"], default_cmd="show", ), # internal use only "get_fence_agent_info": stonith.get_fence_agent_info, }, ["stonith"], default_cmd="status", ) pcs-0.10.11/pcs/cli/routing/tag.py000066400000000000000000000010231412706364600166140ustar00rootroot00000000000000from pcs import usage from pcs.cli.common.routing import create_router from pcs.cli.tag import command as tag tag_cmd = create_router( { "config": tag.tag_config, "create": tag.tag_create, "delete": tag.tag_remove, "help": lambda lib, argv, modifiers: usage.tag(argv), # TODO remove, deprecated command # replaced with 'config' "list": tag.tag_list_cmd, "remove": tag.tag_remove, "update": tag.tag_update, }, ["tag"], default_cmd="config", ) pcs-0.10.11/pcs/cli/rule.py000066400000000000000000000064031412706364600153300ustar00rootroot00000000000000from typing import List, Optional from pcs.common.pacemaker.rule import CibRuleExpressionDto from pcs.common.str_tools import ( format_name_value_list, indent, ) from pcs.common.types import ( CibRuleInEffectStatus, CibRuleExpressionType, ) _in_effect_label_map = { CibRuleInEffectStatus.NOT_YET_IN_EFFECT: "not yet in effect", CibRuleInEffectStatus.IN_EFFECT: None, CibRuleInEffectStatus.EXPIRED: "expired", } def get_in_effect_label(rule: CibRuleExpressionDto) -> Optional[str]: return _in_effect_label_map.get(rule.in_effect, None) def rule_expression_dto_to_lines( rule_expr: CibRuleExpressionDto, with_ids: bool = False ) -> List[str]: if rule_expr.type == CibRuleExpressionType.RULE: return _rule_dto_to_lines(rule_expr, with_ids) if rule_expr.type == CibRuleExpressionType.DATE_EXPRESSION: return _date_dto_to_lines(rule_expr, with_ids) return _simple_expr_to_lines(rule_expr, with_ids) def _rule_dto_to_lines( rule_expr: CibRuleExpressionDto, with_ids: bool = False ) -> List[str]: in_effect_label = get_in_effect_label(rule_expr) heading_parts = [ "Rule{0}:".format(f" ({in_effect_label})" if in_effect_label else "") ] heading_parts.extend( format_name_value_list(sorted(rule_expr.options.items())) ) if with_ids: heading_parts.append(f"(id:{rule_expr.id})") lines = [] for child in rule_expr.expressions: lines.extend(rule_expression_dto_to_lines(child, with_ids)) return [" ".join(heading_parts)] + indent(lines) def _date_dto_to_lines( rule_expr: CibRuleExpressionDto, with_ids: bool = False ) -> List[str]: # pylint: disable=too-many-branches operation = rule_expr.options.get("operation", None) if operation == "date_spec": heading_parts = ["Expression:"] if with_ids: heading_parts.append(f"(id:{rule_expr.id})") line_parts = ["Date Spec:"] if rule_expr.date_spec: line_parts.extend( format_name_value_list( sorted(rule_expr.date_spec.options.items()) ) ) if with_ids: line_parts.append(f"(id:{rule_expr.date_spec.id})") return [" ".join(heading_parts)] + indent([" ".join(line_parts)]) if operation == "in_range" and rule_expr.duration: heading_parts = ["Expression:", "date", "in_range"] if "start" in rule_expr.options: heading_parts.append(rule_expr.options["start"]) heading_parts.extend(["to", "duration"]) if with_ids: heading_parts.append(f"(id:{rule_expr.id})") lines = [" ".join(heading_parts)] line_parts = ["Duration:"] line_parts.extend( format_name_value_list(sorted(rule_expr.duration.options.items())) ) if with_ids: line_parts.append(f"(id:{rule_expr.duration.id})") lines.extend(indent([" ".join(line_parts)])) return lines return _simple_expr_to_lines(rule_expr, with_ids=with_ids) def _simple_expr_to_lines( rule_expr: CibRuleExpressionDto, with_ids: bool = False ) -> List[str]: parts = ["Expression:", rule_expr.as_string] if with_ids: parts.append(f"(id:{rule_expr.id})") return [" ".join(parts)] pcs-0.10.11/pcs/cli/tag/000077500000000000000000000000001412706364600145575ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/tag/__init__.py000066400000000000000000000000001412706364600166560ustar00rootroot00000000000000pcs-0.10.11/pcs/cli/tag/command.py000066400000000000000000000066151412706364600165570ustar00rootroot00000000000000from typing import ( Any, Sequence, ) from pcs.cli.common.errors import CmdLineInputError from pcs.cli.common.parse_args import group_by_keywords, InputModifiers from pcs.cli.reports.output import warn from pcs.common.str_tools import indent def tag_create( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) < 2: raise CmdLineInputError() tag_id, idref_list = argv[0], argv[1:] lib.tag.create(tag_id, idref_list) def tag_list_cmd( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * -f - CIB file """ warn( "This command is deprecated and will be removed. " "Please use 'pcs tag config' instead.", stderr=True, ) return tag_config(lib, argv, modifiers) def tag_config( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") tag_list = lib.tag.config(argv) if not tag_list: print(" No tags defined") return lines = [] for tag in tag_list: lines.append(tag["tag_id"]) lines.extend(indent(tag["idref_list"])) print("\n".join(lines)) def tag_remove( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) < 1: raise CmdLineInputError() lib.tag.remove(argv) def tag_update( lib: Any, argv: Sequence[str], modifiers: InputModifiers, ) -> None: """ Options: * -f - CIB file * --after - place a reference id in a tag after the specified reference id in the tag * --before - place a reference id in a tag before the specified reference id in the tag """ modifiers.ensure_only_supported("-f", "--after", "--before") if not argv: raise CmdLineInputError() tag_id = argv[0] parsed_args = group_by_keywords( argv[1:], ["add", "remove"], keyword_repeat_allowed=False, only_found_keywords=True, ) no_add_remove_arguments = ( "add" not in parsed_args and "remove" not in parsed_args ) no_add_id = "add" in parsed_args and not parsed_args["add"] no_remove_id = "remove" in parsed_args and not parsed_args["remove"] if no_add_remove_arguments or no_add_id or no_remove_id: raise CmdLineInputError( show_both_usage_and_message=True, hint=("Specify at least one id for 'add' or 'remove' arguments."), ) adjacent_idref = None after_adjacent = True if modifiers.is_specified("--after") and modifiers.is_specified("--before"): raise CmdLineInputError("Cannot specify both --before and --after") if modifiers.is_specified("--after"): adjacent_idref = modifiers.get("--after") after_adjacent = True elif modifiers.is_specified("--before"): adjacent_idref = modifiers.get("--before") after_adjacent = False lib.tag.update( tag_id, parsed_args["add"] if "add" in parsed_args else [], parsed_args["remove"] if "remove" in parsed_args else [], adjacent_idref=adjacent_idref, put_after_adjacent=after_adjacent, ) pcs-0.10.11/pcs/client.py000066400000000000000000000013371412706364600150710ustar00rootroot00000000000000from pcs import settings, utils from pcs.cli.common.errors import CmdLineInputError def local_auth_cmd(lib, argv, modifiers): """ Options: * -u - username * -p - password * --request-timeout - timeout for HTTP requests """ del lib modifiers.ensure_only_supported("-u", "-p", "--request-timeout") if len(argv) > 1: raise CmdLineInputError() port = argv[0] if argv else settings.pcsd_default_port username, password = utils.get_user_and_pass() utils.auth_hosts( { "localhost": { "username": username, "password": password, "dest_list": [{"addr": "localhost", "port": port}], } } ) pcs-0.10.11/pcs/cluster.py000066400000000000000000002033231412706364600152730ustar00rootroot00000000000000# pylint: disable=too-many-lines import datetime import json import math import os import subprocess import sys import tempfile import time import xml.dom.minidom from typing import ( cast, Any, Iterable, List, Mapping, Optional, ) from pcs import ( settings, utils, ) from pcs.utils import parallel_for_nodes from pcs.cli.common import parse_args from pcs.cli.common.errors import ( CmdLineInputError, ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE, HINT_SYNTAX_CHANGE, msg_command_replaced, ) from pcs.cli.file import metadata as file_metadata from pcs.cli.reports import process_library_reports from pcs.cli.reports.messages import report_item_msg_from_dto from pcs.common import ( file as pcs_file, file_type_codes, reports, ) from pcs.common.corosync_conf import ( CorosyncNodeDto, CorosyncConfDto, ) from pcs.common.interface import dto from pcs.common.node_communicator import ( HostNotFound, Request, RequestData, ) from pcs.common.str_tools import ( format_list, indent, ) from pcs.common.tools import format_os_error from pcs.lib import sbd as lib_sbd from pcs.lib.commands.remote_node import _destroy_pcmk_remote_env from pcs.lib.communication.nodes import CheckAuth from pcs.lib.communication.tools import ( run_and_raise, run as run_com_cmd, RunRemotelyBase, ) from pcs.lib.corosync import ( live as corosync_live, qdevice_net, ) from pcs.cli.reports.output import warn from pcs.lib.errors import LibraryError from pcs.lib.node import get_existing_nodes_names import pcs.lib.pacemaker.live as lib_pacemaker # pylint: disable=too-many-branches, too-many-statements def cluster_cib_upgrade_cmd(lib, argv, modifiers): """ Options: * -f - CIB file """ del lib modifiers.ensure_only_supported("-f") if argv: raise CmdLineInputError() utils.cluster_upgrade() def cluster_disable_cmd(lib, argv, modifiers): """ Options: * --all - disable all cluster nodes * --request-timeout - timeout for HTTP requests - effective only when at least one node has been specified or --all has been used """ del lib modifiers.ensure_only_supported("--all", "--request-timeout") if modifiers.get("--all"): if argv: utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE) disable_cluster_all() else: disable_cluster(argv) def cluster_enable_cmd(lib, argv, modifiers): """ Options: * --all - enable all cluster nodes * --request-timeout - timeout for HTTP requests - effective only when at least one node has been specified or --all has been used """ del lib modifiers.ensure_only_supported("--all", "--request-timeout") if modifiers.get("--all"): if argv: utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE) enable_cluster_all() else: enable_cluster(argv) def cluster_stop_cmd(lib, argv, modifiers): """ Options: * --force - no error when possible quorum loss * --request-timeout - timeout for HTTP requests - effective only when at least one node has been specified * --pacemaker - stop pacemaker, only effective when no node has been specified * --corosync - stop corosync, only effective when no node has been specified * --all - stop all cluster nodes """ del lib modifiers.ensure_only_supported( "--wait", "--request-timeout", "--pacemaker", "--corosync", "--all", "--force", ) if modifiers.get("--all"): if argv: utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE) stop_cluster_all() else: stop_cluster(argv) def cluster_start_cmd(lib, argv, modifiers): """ Options: * --wait * --request-timeout - timeout for HTTP requests, have effect only if at least one node have been specified * --all - start all cluster nodes """ del lib modifiers.ensure_only_supported( "--wait", "--request-timeout", "--all", "--corosync_conf" ) if modifiers.get("--all"): if argv: utils.err(ERR_NODE_LIST_AND_ALL_MUTUALLY_EXCLUSIVE) start_cluster_all() else: start_cluster(argv) def authkey_corosync(lib, argv, modifiers): """ Options: * --force - skip check for authkey length * --request-timeout - timeout for HTTP requests * --skip-offline - skip unreachable nodes """ modifiers.ensure_only_supported( "--force", "--skip-offline", "--request-timeout" ) if len(argv) > 1: raise CmdLineInputError() force_flags = [] if modifiers.get("--force"): force_flags.append(reports.codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(reports.codes.SKIP_OFFLINE_NODES) corosync_authkey = None if argv: try: with open(argv[0], "rb") as file: corosync_authkey = file.read() except OSError as e: utils.err( "Unable to read file '{0}': {1}".format( argv[0], format_os_error(e) ) ) lib.cluster.corosync_authkey_change( corosync_authkey=corosync_authkey, force_flags=force_flags, ) def sync_nodes(lib, argv, modifiers): """ Options: * --request-timeout - timeout for HTTP requests """ del lib modifiers.ensure_only_supported("--request-timeout") if argv: raise CmdLineInputError() config = utils.getCorosyncConf() nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade(conf_text=config) ) if not nodes: report_list.append( reports.ReportItem.error( reports.messages.CorosyncConfigNoNodesDefined() ) ) if report_list: process_library_reports(report_list) for node in nodes: utils.setCorosyncConfig(node, config) print( "Corosync configuration has been synchronized, please reload corosync " "daemon using 'pcs cluster reload corosync' command." ) def start_cluster(argv): """ Commandline options: * --wait * --request-timeout - timeout for HTTP requests, have effect only if at least one node have been specified """ wait = False wait_timeout = None if "--wait" in utils.pcs_options: wait_timeout = utils.validate_wait_get_timeout(False) wait = True if argv: nodes = set(argv) # unique start_cluster_nodes(nodes) if wait: wait_for_nodes_started(nodes, wait_timeout) return if not utils.hasCorosyncConf(): utils.err("cluster is not currently configured on this node") print("Starting Cluster...") service_list = ["corosync"] if utils.need_to_handle_qdevice_service(): service_list.append("corosync-qdevice") service_list.append("pacemaker") for service in service_list: utils.start_service(service) if wait: wait_for_nodes_started([], wait_timeout) def start_cluster_all(): """ Commandline options: * --wait * --request-timeout - timeout for HTTP requests """ wait = False wait_timeout = None if "--wait" in utils.pcs_options: wait_timeout = utils.validate_wait_get_timeout(False) wait = True all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not all_nodes: report_list.append( reports.ReportItem.error( reports.messages.CorosyncConfigNoNodesDefined() ) ) if report_list: process_library_reports(report_list) start_cluster_nodes(all_nodes) if wait: wait_for_nodes_started(all_nodes, wait_timeout) def start_cluster_nodes(nodes): """ Commandline options: * --request-timeout - timeout for HTTP requests """ # Large clusters take longer time to start up. So we make the timeout longer # for each 8 nodes: # 1 - 8 nodes: 1 * timeout # 9 - 16 nodes: 2 * timeout # 17 - 24 nodes: 3 * timeout # and so on # Users can override this and set their own timeout by specifying # the --request-timeout option (see utils.sendHTTPRequest). timeout = int( settings.default_request_timeout * math.ceil(len(nodes) / 8.0) ) utils.read_known_hosts_file() # cache known hosts node_errors = parallel_for_nodes( utils.startCluster, nodes, quiet=True, timeout=timeout ) if node_errors: utils.err( "unable to start all nodes\n" + "\n".join(node_errors.values()) ) def is_node_fully_started(node_status): """ Commandline options: no options """ return ( "online" in node_status and "pending" in node_status and node_status["online"] and not node_status["pending"] ) def wait_for_local_node_started(stop_at, interval): """ Commandline options: no options """ try: while True: time.sleep(interval) node_status = lib_pacemaker.get_local_node_status( utils.cmd_runner() ) if is_node_fully_started(node_status): return 0, "Started" if datetime.datetime.now() > stop_at: return 1, "Waiting timeout" except LibraryError as e: return ( 1, "Unable to get node status: {0}".format( # pylint: disable=no-member "\n".join( report_item_msg_from_dto( cast(reports.ReportItemDto, item).message ).message for item in e.args ) ), ) def wait_for_remote_node_started(node, stop_at, interval): """ Commandline options: * --request-timeout - timeout for HTTP requests """ while True: time.sleep(interval) code, output = utils.getPacemakerNodeStatus(node) # HTTP error, permission denied or unable to auth # there is no point in trying again as it won't get magically fixed if code in [1, 3, 4]: return 1, output if code == 0: try: node_status = json.loads(output) if is_node_fully_started(node_status): return 0, "Started" except (ValueError, KeyError): # this won't get fixed either return 1, "Unable to get node status" if datetime.datetime.now() > stop_at: return 1, "Waiting timeout" def wait_for_nodes_started(node_list, timeout=None): """ Commandline options: * --request-timeout - timeout for HTTP request, effective only if node_list is not empty list """ timeout = 60 * 15 if timeout is None else timeout interval = 2 stop_at = datetime.datetime.now() + datetime.timedelta(seconds=timeout) print("Waiting for node(s) to start...") if not node_list: code, output = wait_for_local_node_started(stop_at, interval) if code != 0: utils.err(output) else: print(output) else: utils.read_known_hosts_file() # cache known hosts node_errors = parallel_for_nodes( wait_for_remote_node_started, node_list, stop_at, interval ) if node_errors: utils.err("unable to verify all nodes have started") def stop_cluster_all(): """ Commandline options: * --force - no error when possible quorum loss * --request-timeout - timeout for HTTP requests """ all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not all_nodes: report_list.append( reports.ReportItem.error( reports.messages.CorosyncConfigNoNodesDefined() ) ) if report_list: process_library_reports(report_list) stop_cluster_nodes(all_nodes) def stop_cluster_nodes(nodes): """ Commandline options: * --force - no error when possible quorum loss * --request-timeout - timeout for HTTP requests """ all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) unknown_nodes = set(nodes) - set(all_nodes) if unknown_nodes: if report_list: process_library_reports(report_list) utils.err( "nodes '%s' do not appear to exist in configuration" % "', '".join(sorted(unknown_nodes)) ) utils.read_known_hosts_file() # cache known hosts stopping_all = set(nodes) >= set(all_nodes) if "--force" not in utils.pcs_options and not stopping_all: error_list = [] for node in nodes: retval, data = utils.get_remote_quorumtool_output(node) if retval != 0: error_list.append(node + ": " + data) continue try: quorum_status = corosync_live.QuorumStatus.from_string(data) if not quorum_status.is_quorate: # Get quorum status from a quorate node, non-quorate nodes # may provide inaccurate info. If no node is quorate, there # is no quorum to be lost and therefore no error to be # reported. continue if quorum_status.stopping_nodes_cause_quorum_loss(nodes): utils.err( "Stopping the node(s) will cause a loss of the quorum" + ", use --force to override" ) else: # We have the info, no need to print errors error_list = [] break except corosync_live.QuorumStatusException: if not utils.is_node_offline_by_quorumtool_output(data): error_list.append(node + ": Unable to get quorum status") # else the node seems to be stopped already if error_list: utils.err( "Unable to determine whether stopping the nodes will cause " + "a loss of the quorum, use --force to override\n" + "\n".join(error_list) ) was_error = False node_errors = parallel_for_nodes( utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True ) accessible_nodes = [ node for node in nodes if node not in node_errors.keys() ] if node_errors: utils.err( "unable to stop all nodes\n" + "\n".join(node_errors.values()), exit_after_error=not accessible_nodes, ) was_error = True for node in node_errors: print("{0}: Not stopping cluster - node is unreachable".format(node)) node_errors = parallel_for_nodes( utils.stopCorosync, accessible_nodes, quiet=True ) if node_errors: utils.err( "unable to stop all nodes\n" + "\n".join(node_errors.values()) ) if was_error: utils.err("unable to stop all nodes") def enable_cluster(argv): """ Commandline options: * --request-timeout - timeout for HTTP requests, effective only if at least one node has been specified """ if argv: enable_cluster_nodes(argv) return try: utils.enableServices() except LibraryError as e: process_library_reports(e.args) def disable_cluster(argv): """ Commandline options: * --request-timeout - timeout for HTTP requests, effective only if at least one node has been specified """ if argv: disable_cluster_nodes(argv) return try: utils.disableServices() except LibraryError as e: process_library_reports(e.args) def enable_cluster_all(): """ Commandline options: * --request-timeout - timeout for HTTP requests """ all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not all_nodes: report_list.append( reports.ReportItem.error( reports.messages.CorosyncConfigNoNodesDefined() ) ) if report_list: process_library_reports(report_list) enable_cluster_nodes(all_nodes) def disable_cluster_all(): """ Commandline options: * --request-timeout - timeout for HTTP requests """ all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not all_nodes: report_list.append( reports.ReportItem.error( reports.messages.CorosyncConfigNoNodesDefined() ) ) if report_list: process_library_reports(report_list) disable_cluster_nodes(all_nodes) def enable_cluster_nodes(nodes): """ Commandline options: * --request-timeout - timeout for HTTP requests """ error_list = utils.map_for_error_list(utils.enableCluster, nodes) if error_list: utils.err("unable to enable all nodes\n" + "\n".join(error_list)) def disable_cluster_nodes(nodes): """ Commandline options: * --request-timeout - timeout for HTTP requests """ error_list = utils.map_for_error_list(utils.disableCluster, nodes) if error_list: utils.err("unable to disable all nodes\n" + "\n".join(error_list)) def destroy_cluster(argv): """ Commandline options: * --request-timeout - timeout for HTTP requests """ if argv: utils.read_known_hosts_file() # cache known hosts # stop pacemaker and resources while cluster is still quorate nodes = argv node_errors = parallel_for_nodes( utils.repeat_if_timeout(utils.stopPacemaker), nodes, quiet=True ) # proceed with destroy regardless of errors # destroy will stop any remaining cluster daemons node_errors = parallel_for_nodes( utils.destroyCluster, nodes, quiet=True ) if node_errors: utils.err( "unable to destroy cluster\n" + "\n".join(node_errors.values()) ) def stop_cluster(argv): """ Commandline options: * --force - no error when possible quorum loss * --request-timeout - timeout for HTTP requests - effective only when at least one node has been specified * --pacemaker - stop pacemaker, only effective when no node has been specified """ if argv: stop_cluster_nodes(argv) return if "--force" not in utils.pcs_options: # corosync 3.0.1 and older: # - retval is 0 on success if a node is not in a partition with quorum # - retval is 1 on error OR on success if a node has quorum # corosync 3.0.2 and newer: # - retval is 0 on success if a node has quorum # - retval is 1 on error # - retval is 2 on success if a node is not in a partition with quorum output, dummy_retval = utils.run(["corosync-quorumtool", "-p", "-s"]) try: if corosync_live.QuorumStatus.from_string( output ).stopping_local_node_cause_quorum_loss(): utils.err( "Stopping the node will cause a loss of the quorum" + ", use --force to override" ) except corosync_live.QuorumStatusException: if not utils.is_node_offline_by_quorumtool_output(output): utils.err( "Unable to determine whether stopping the node will cause " + "a loss of the quorum, use --force to override" ) # else the node seems to be stopped already, proceed to be sure stop_all = ( "--pacemaker" not in utils.pcs_options and "--corosync" not in utils.pcs_options ) if stop_all or "--pacemaker" in utils.pcs_options: stop_cluster_pacemaker() if stop_all or "--corosync" in utils.pcs_options: stop_cluster_corosync() def stop_cluster_pacemaker(): """ Commandline options: no options """ print("Stopping Cluster (pacemaker)...") utils.stop_service("pacemaker") def stop_cluster_corosync(): """ Commandline options: no options """ print("Stopping Cluster (corosync)...") service_list = [] if utils.need_to_handle_qdevice_service(): service_list.append("corosync-qdevice") service_list.append("corosync") for service in service_list: utils.stop_service(service) def kill_cluster(lib, argv, modifiers): """ Options: no options """ del lib if argv: raise CmdLineInputError() modifiers.ensure_only_supported() dummy_output, dummy_retval = kill_local_cluster_services() # if dummy_retval != 0: # print "Error: unable to execute killall -9" # print output # sys.exit(1) def kill_local_cluster_services(): """ Commandline options: no options """ all_cluster_daemons = [ # Daemons taken from cluster-clean script in pacemaker "pacemaker-attrd", "pacemaker-based", "pacemaker-controld", "pacemaker-execd", "pacemaker-fenced", "pacemaker-remoted", "pacemaker-schedulerd", "pacemakerd", "dlm_controld", "gfs_controld", # Corosync daemons "corosync-qdevice", "corosync", ] return utils.run([settings.killall_executable, "-9"] + all_cluster_daemons) def cluster_push(lib, argv, modifiers): """ Options: * --wait * --config - push only configuration section of CIB * -f - CIB file """ # pylint: disable=too-many-locals, del lib modifiers.ensure_only_supported("--wait", "--config", "-f") if len(argv) > 2: raise CmdLineInputError() filename = None scope = None timeout = None diff_against = None if modifiers.get("--wait"): timeout = utils.validate_wait_get_timeout() for arg in argv: if "=" not in arg: filename = arg else: arg_name, arg_value = arg.split("=", 1) if arg_name == "scope": if modifiers.get("--config"): utils.err("Cannot use both scope and --config") if not utils.is_valid_cib_scope(arg_value): utils.err("invalid CIB scope '%s'" % arg_value) else: scope = arg_value elif arg_name == "diff-against": diff_against = arg_value else: raise CmdLineInputError() if modifiers.get("--config"): scope = "configuration" if diff_against and scope: utils.err("Cannot use both scope and diff-against") if not filename: raise CmdLineInputError() try: new_cib_dom = xml.dom.minidom.parse(filename) if scope and not new_cib_dom.getElementsByTagName(scope): utils.err( "unable to push cib, scope '%s' not present in new cib" % scope ) except (EnvironmentError, xml.parsers.expat.ExpatError) as e: utils.err("unable to parse new cib: %s" % e) if diff_against: runner = utils.cmd_runner() command = [ settings.crm_diff, "--original", diff_against, "--new", filename, "--no-version", ] patch, stderr, retval = runner.run(command) # 0 (CRM_EX_OK) - success with no difference # 1 (CRM_EX_ERROR) - success with difference # 64 (CRM_EX_USAGE) - usage error # 65 (CRM_EX_DATAERR) - XML fragments not parseable if retval > 1: utils.err("unable to diff the CIBs:\n" + stderr) if retval == 0: print( "The new CIB is the same as the original CIB, nothing to push." ) sys.exit(0) command = [ settings.cibadmin, "--patch", "--xml-pipe", ] output, stderr, retval = runner.run(command, patch) if retval != 0: utils.err("unable to push cib\n" + stderr + output) else: command = ["cibadmin", "--replace", "--xml-file", filename] if scope: command.append("--scope=%s" % scope) output, retval = utils.run(command) # 103 (CRM_EX_OLD) - update older than existing config if retval == 103: utils.err( "Unable to push to the CIB because pushed configuration " "is older than existing one. If you are sure you want to " "push this configuration, try to use --config to replace only " "configuration part instead of whole CIB. Otherwise get current" " configuration by running command 'pcs cluster cib' and update" " that." ) elif retval != 0: utils.err("unable to push cib\n" + output) print("CIB updated") if not modifiers.is_specified("--wait"): return cmd = ["crm_resource", "--wait"] if timeout: cmd.extend(["--timeout", str(timeout)]) output, retval = utils.run(cmd) if retval != 0: msg = [] if retval == settings.pacemaker_wait_timeout_status: msg.append("waiting timeout") if output: msg.append("\n" + output) utils.err("\n".join(msg).strip()) def cluster_edit(lib, argv, modifiers): """ Options: * --config - edit configuration section of CIB * -f - CIB file * --wait """ modifiers.ensure_only_supported("--config", "--wait", "-f") if "EDITOR" in os.environ: if len(argv) > 1: raise CmdLineInputError() scope = None scope_arg = "" for arg in argv: if "=" not in arg: raise CmdLineInputError() arg_name, arg_value = arg.split("=", 1) if arg_name == "scope" and not modifiers.get("--config"): if not utils.is_valid_cib_scope(arg_value): utils.err("invalid CIB scope '%s'" % arg_value) else: scope_arg = arg scope = arg_value else: raise CmdLineInputError() if modifiers.get("--config"): scope = "configuration" # Leave scope_arg empty as cluster_push will pick up a --config # option from utils.pcs_options scope_arg = "" editor = os.environ["EDITOR"] cib = utils.get_cib(scope) with tempfile.NamedTemporaryFile(mode="w+", suffix=".pcs") as tempcib: tempcib.write(cib) tempcib.flush() try: subprocess.call([editor, tempcib.name]) except OSError: utils.err("unable to open file with $EDITOR: " + editor) tempcib.seek(0) newcib = "".join(tempcib.readlines()) if newcib == cib: print("CIB not updated, no changes detected") else: cluster_push( lib, [arg for arg in [tempcib.name, scope_arg] if arg], modifiers.get_subset("--wait", "--config", "-f"), ) else: utils.err("$EDITOR environment variable is not set") def get_cib(lib, argv, modifiers): """ Options: * --config show configuration section of CIB * -f - CIB file """ del lib modifiers.ensure_only_supported("--config", "-f") if len(argv) > 2: raise CmdLineInputError() filename = None scope = None for arg in argv: if "=" not in arg: filename = arg else: arg_name, arg_value = arg.split("=", 1) if arg_name == "scope" and not modifiers.get("--config"): if not utils.is_valid_cib_scope(arg_value): utils.err("invalid CIB scope '%s'" % arg_value) else: scope = arg_value else: raise CmdLineInputError() if modifiers.get("--config"): scope = "configuration" if not filename: print(utils.get_cib(scope).rstrip()) else: output = utils.get_cib(scope) if not output: utils.err("No data in the CIB") try: with open(filename, "w") as cib_file: cib_file.write(output) except EnvironmentError as e: utils.err( "Unable to write to file '%s', %s" % (filename, e.strerror) ) class RemoteAddNodes(RunRemotelyBase): def __init__(self, report_processor, target, data): super().__init__(report_processor) self._target = target self._data = data self._success = False def get_initial_request_list(self): return [ Request( self._target, RequestData( "remote/cluster_add_nodes", [("data_json", json.dumps(self._data))], ), ) ] def _process_response(self, response): node_label = response.request.target.label report_item = self._get_response_report(response) if report_item is not None: self._report(report_item) return try: output = json.loads(response.data) for report_dict in output["report_list"]: self._report( reports.ReportItem( severity=reports.ReportItemSeverity( report_dict["severity"], report_dict["forceable"], ), message=reports.messages.LegacyCommonMessage( report_dict["code"], report_dict["info"], report_dict["report_text"], ), ) ) if output["status"] == "success": self._success = True elif output["status"] != "error": print("Error: {}".format(output["status_msg"])) except (KeyError, json.JSONDecodeError): self._report( reports.ReportItem.warning( reports.messages.InvalidResponseFormat(node_label) ) ) def on_complete(self): return self._success def node_add_outside_cluster(lib, argv, modifiers): """ Options: * --wait - wait until new node will start up, effective only when --start is specified * --start - start new node * --enable - enable new node * --force - treat validation issues and not resolvable addresses as warnings instead of errors * --skip-offline - skip unreachable nodes * --no-watchdog-validation - do not validatate watchdogs * --request-timeout - HTTP request timeout """ del lib modifiers.ensure_only_supported( "--wait", "--start", "--enable", "--force", "--skip-offline", "--no-watchdog-validation", "--request-timeout", ) if len(argv) < 2: raise CmdLineInputError( "Usage: pcs cluster node add-outside " "[addr=]... [watchdog=] " "[device=]... [--start [--wait[=]]] [--enable] " "[--no-watchdog-validation]" ) cluster_node, *argv = argv node_dict = _parse_add_node(argv) force_flags = [] if modifiers.get("--force"): force_flags.append(reports.codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(reports.codes.SKIP_OFFLINE_NODES) cmd_data = dict( nodes=[node_dict], wait=modifiers.get("--wait"), start=modifiers.get("--start"), enable=modifiers.get("--enable"), no_watchdog_validation=modifiers.get("--no-watchdog-validation"), force_flags=force_flags, ) lib_env = utils.get_lib_env() report_processor = lib_env.report_processor target_factory = lib_env.get_node_target_factory() report_list, target_list = target_factory.get_target_list_with_reports( [cluster_node], skip_non_existing=False, allow_skip=False, ) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() com_cmd = RemoteAddNodes(report_processor, target_list[0], cmd_data) was_successfull = run_com_cmd(lib_env.get_node_communicator(), com_cmd) if not was_successfull: raise LibraryError() def node_remove(lib, argv, modifiers): """ Options: * --force - continue even though the action may cause qourum loss * --skip-offline - skip unreachable nodes * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported( "--force", "--skip-offline", "--request-timeout", ) if not argv: raise CmdLineInputError() force_flags = [] if modifiers.get("--force"): force_flags.append(reports.codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(reports.codes.SKIP_OFFLINE_NODES) lib.cluster.remove_nodes(argv, force_flags=force_flags) def cluster_uidgid(lib, argv, modifiers, silent_list=False): """ Options: no options """ # pylint: disable=too-many-locals, del lib modifiers.ensure_only_supported() if not argv: found = False uid_gid_files = os.listdir(settings.corosync_uidgid_dir) for ug_file in uid_gid_files: uid_gid_dict = utils.read_uid_gid_file(ug_file) if "uid" in uid_gid_dict or "gid" in uid_gid_dict: line = "UID/GID: uid=" if "uid" in uid_gid_dict: line += uid_gid_dict["uid"] line += " gid=" if "gid" in uid_gid_dict: line += uid_gid_dict["gid"] print(line) found = True if not found and not silent_list: print("No uidgids configured") return command = argv.pop(0) uid = "" gid = "" if command in {"add", "delete", "remove", "rm"} and argv: for arg in argv: if arg.find("=") == -1: utils.err( "uidgid options must be of the form uid= gid=" ) (key, value) = arg.split("=", 1) if key not in {"uid", "gid"}: utils.err( "%s is not a valid key, you must use uid or gid" % key ) if key == "uid": uid = value if key == "gid": gid = value if uid == "" and gid == "": utils.err("you must set either uid or gid") if command == "add": utils.write_uid_gid_file(uid, gid) elif command in {"delete", "remove", "rm"}: if command == "rm": sys.stderr.write( "'pcs cluster uidgid rm' has been deprecated, use 'pcs " "cluster uidgid delete' or 'pcs cluster uidgid remove' " "instead\n" ) file_removed = utils.remove_uid_gid_file(uid, gid) if not file_removed: utils.err( "no uidgid files with uid=%s and gid=%s found" % (uid, gid) ) else: # The hint is defined to print error messages which point users to the # changes section in pcs manpage. # To be removed in the next significant version. raise CmdLineInputError( hint=( msg_command_replaced( "pcs cluster uidgid delete", "pcs cluster uidgid remove", ) if command == "rm" else None ) ) def cluster_get_corosync_conf(lib, argv, modifiers): """ Options: * --request-timeout - timeout for HTTP requests, effetive only when at least one node has been specified """ del lib modifiers.ensure_only_supported("--request-timeout") if len(argv) > 1: raise CmdLineInputError() if not argv: print(utils.getCorosyncConf().rstrip()) return node = argv[0] retval, output = utils.getCorosyncConfig(node) if retval != 0: utils.err(output) else: print(output.rstrip()) def cluster_reload(lib, argv, modifiers): """ Options: no options """ del lib modifiers.ensure_only_supported() if len(argv) != 1 or argv[0] != "corosync": raise CmdLineInputError() output, retval = utils.reloadCorosync() if retval != 0 or "invalid option" in output: utils.err(output.rstrip()) print("Corosync reloaded") # Completely tear down the cluster & remove config files # Code taken from cluster-clean script in pacemaker def cluster_destroy(lib, argv, modifiers): """ Options: * --all - destroy cluster on all cluster nodes => destroy whole cluster * --request-timeout - timeout of HTTP requests, effective only with --all """ # pylint: disable=bare-except del lib modifiers.ensure_only_supported("--all", "--request-timeout") if argv: raise CmdLineInputError() if modifiers.get("--all"): # load data cib = None lib_env = utils.get_lib_env() try: cib = lib_env.get_cib() except LibraryError as e: warn( "Unable to load CIB to get guest and remote nodes from it, " "those nodes will not be deconfigured." ) corosync_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if not corosync_nodes: report_list.append( reports.ReportItem.error( reports.messages.CorosyncConfigNoNodesDefined() ) ) if report_list: process_library_reports(report_list) # destroy remote and guest nodes if cib is not None: try: all_remote_nodes, report_list = get_existing_nodes_names( cib=cib ) if report_list: process_library_reports(report_list) if all_remote_nodes: _destroy_pcmk_remote_env( lib_env, all_remote_nodes, skip_offline_nodes=True, allow_fails=True, ) except LibraryError as e: process_library_reports(e.args) # destroy full-stack nodes destroy_cluster(corosync_nodes) else: print("Shutting down pacemaker/corosync services...") for service in ["pacemaker", "corosync-qdevice", "corosync"]: try: utils.stop_service(service) except LibraryError: # It is safe to ignore error since we want it not to be running # anyways. pass print("Killing any remaining services...") kill_local_cluster_services() try: utils.disableServices() except: # previously errors were suppressed in here, let's keep it that way # for now pass try: service_manager = utils.get_service_manager() service_manager.disable( lib_sbd.get_sbd_service_name(service_manager) ) except: # it's not a big deal if sbd disable fails pass print("Removing all cluster configuration files...") dummy_output, dummy_retval = utils.run( [ settings.rm_executable, "-f", settings.corosync_conf_file, settings.corosync_authkey_file, settings.pacemaker_authkey_file, settings.pcsd_dr_config_location, ] ) state_files = [ "cib-*", "cib.*", "cib.xml*", "core.*", "cts.*", "hostcache", "pe*.bz2", ] for name in state_files: dummy_output, dummy_retval = utils.run( [ settings.find_executable, settings.pacemaker_local_state_dir, "-name", name, "-exec", settings.rm_executable, "-f", "{}", ";", ] ) try: qdevice_net.client_destroy() except: # errors from deleting other files are suppressed as well # we do not want to fail if qdevice was not set up pass def cluster_verify(lib, argv, modifiers): """ Options: * -f - CIB file * --full - more verbose output """ modifiers.ensure_only_supported("-f", "--full") if argv: raise CmdLineInputError() lib.cluster.verify(verbose=modifiers.get("--full")) def cluster_report(lib, argv, modifiers): """ Options: * --force - overwrite existing file * --from - timestamp * --to - timestamp """ del lib modifiers.ensure_only_supported("--force", "--from", "--to") if len(argv) != 1: raise CmdLineInputError() outfile = argv[0] dest_outfile = outfile + ".tar.bz2" if os.path.exists(dest_outfile): if not modifiers.get("--force"): utils.err( dest_outfile + " already exists, use --force to overwrite" ) else: try: os.remove(dest_outfile) except OSError as e: utils.err( "Unable to remove " + dest_outfile + ": " + e.strerror ) crm_report_opts = [] crm_report_opts.append("-f") if modifiers.is_specified("--from"): crm_report_opts.append(modifiers.get("--from")) if modifiers.is_specified("--to"): crm_report_opts.append("-t") crm_report_opts.append(modifiers.get("--to")) else: yesterday = datetime.datetime.now() - datetime.timedelta(1) crm_report_opts.append(yesterday.strftime("%Y-%m-%d %H:%M")) crm_report_opts.append(outfile) output, retval = utils.run([settings.crm_report] + crm_report_opts) if retval != 0 and ( "ERROR: Cannot determine nodes; specify --nodes or --single-node" in output ): utils.err("cluster is not configured on this node") newoutput = "" for line in output.split("\n"): if ( line.startswith("cat:") or line.startswith("grep") or line.startswith("tail") ): continue if "We will attempt to remove" in line: continue if "-p option" in line: continue if "However, doing" in line: continue if "to diagnose" in line: continue if "--dest" in line: line = line.replace("--dest", "") newoutput = newoutput + line + "\n" if retval != 0: utils.err(newoutput) print(newoutput) def send_local_configs( node_name_list, clear_local_cluster_permissions=False, force=False ): """ Commandline options: * --request-timeout - timeout of HTTP requests """ # pylint: disable=bare-except pcsd_data = { "nodes": node_name_list, "force": force, "clear_local_cluster_permissions": clear_local_cluster_permissions, } err_msgs = [] output, retval = utils.run_pcsdcli("send_local_configs", pcsd_data) if retval == 0 and output["status"] == "ok" and output["data"]: try: for node_name in node_name_list: node_response = output["data"][node_name] if node_response["status"] == "notauthorized": err_msgs.append( ( "Unable to authenticate to {0}, try running 'pcs " "host auth {0}'" ).format(node_name) ) if node_response["status"] not in ["ok", "not_supported"]: err_msgs.append( "Unable to set pcsd configs on {0}".format(node_name) ) except: err_msgs.append("Unable to communicate with pcsd") else: err_msgs.append("Unable to set pcsd configs") return err_msgs def cluster_auth_cmd(lib, argv, modifiers): """ Options: * --corosync_conf - corosync.conf file * --request-timeout - timeout of HTTP requests * -u - username * -p - password """ # pylint: disable=too-many-locals, del lib modifiers.ensure_only_supported( "--corosync_conf", "--request-timeout", "-u", "-p" ) if argv: # The hint is defined to print error messages which point users to the # changes section in pcs manpage. # To be removed in the next significant version. raise CmdLineInputError(hint=HINT_SYNTAX_CHANGE) lib_env = utils.get_lib_env() target_factory = lib_env.get_node_target_factory() cluster_node_list = lib_env.get_corosync_conf().get_nodes() cluster_node_names = [] missing_name = False for node in cluster_node_list: if node.name: cluster_node_names.append(node.name) else: missing_name = True if missing_name: print( "Warning: Skipping nodes which do not have their name defined in " "corosync.conf, use the 'pcs host auth' command to authenticate " "them" ) target_list = [] not_authorized_node_name_list = [] for node_name in cluster_node_names: try: target_list.append(target_factory.get_target(node_name)) except HostNotFound: print("{}: Not authorized".format(node_name)) not_authorized_node_name_list.append(node_name) com_cmd = CheckAuth(lib_env.report_processor) com_cmd.set_targets(target_list) not_authorized_node_name_list.extend( run_and_raise(lib_env.get_node_communicator(), com_cmd) ) if not_authorized_node_name_list: print( "Nodes to authorize: {}".format( ", ".join(not_authorized_node_name_list) ) ) username, password = utils.get_user_and_pass() not_auth_node_list = [] for node_name in not_authorized_node_name_list: for node in cluster_node_list: if node.name == node_name: if node.addrs_plain(): not_auth_node_list.append(node) else: print( f"{node.name}: No addresses defined in " "corosync.conf, use the 'pcs host auth' command to " "authenticate the node" ) nodes_to_auth_data = { node.name: dict( username=username, password=password, dest_list=[ dict( addr=node.addrs_plain()[0], port=settings.pcsd_default_port, ) ], ) for node in not_auth_node_list } utils.auth_hosts(nodes_to_auth_data) else: print("Sending cluster config files to the nodes...") msgs = send_local_configs(cluster_node_names, force=True) for msg in msgs: print("Warning: {0}".format(msg)) def _parse_node_options( node, options, additional_options=(), additional_repeatable_options=() ): """ Commandline options: no options """ # pylint: disable=invalid-name ADDR_OPT_KEYWORD = "addr" supported_options = {ADDR_OPT_KEYWORD} | set(additional_options) repeatable_options = {ADDR_OPT_KEYWORD} | set(additional_repeatable_options) parsed_options = parse_args.prepare_options(options, repeatable_options) unknown_options = set(parsed_options.keys()) - supported_options if unknown_options: raise CmdLineInputError( "Unknown options '{}' for node '{}'".format( "', '".join(sorted(unknown_options)), node ) ) parsed_options["name"] = node if ADDR_OPT_KEYWORD in parsed_options: parsed_options["addrs"] = parsed_options[ADDR_OPT_KEYWORD] del parsed_options[ADDR_OPT_KEYWORD] return parsed_options TRANSPORT_KEYWORD = "transport" TRANSPORT_DEFAULT_SECTION = "__default__" LINK_KEYWORD = "link" def _parse_transport(transport_args): """ Commandline options: no options """ if not transport_args: raise CmdLineInputError( "{} type not defined".format(TRANSPORT_KEYWORD.capitalize()) ) transport_type, *transport_options = transport_args keywords = {"compression", "crypto", LINK_KEYWORD} parsed_options = parse_args.group_by_keywords( transport_options, keywords, implicit_first_group_key=TRANSPORT_DEFAULT_SECTION, group_repeated_keywords=[LINK_KEYWORD], ) options = { section: parse_args.prepare_options(parsed_options[section]) for section in keywords | {TRANSPORT_DEFAULT_SECTION} if section != LINK_KEYWORD } options[LINK_KEYWORD] = [ parse_args.prepare_options(link_options) for link_options in parsed_options[LINK_KEYWORD] ] return transport_type, options def cluster_setup(lib, argv, modifiers): """ Options: * --wait - only effective when used with --start * --start - start cluster * --enable - enable cluster * --force - some validation issues and unresolvable addresses are treated as warnings * --no-keys-sync - do not create and distribute pcsd ssl cert and key, corosync and pacemaker authkeys * --corosync_conf - corosync.conf file path, do not talk to cluster nodes """ # pylint: disable=too-many-locals is_local = modifiers.is_specified("--corosync_conf") allowed_options_common = ["--force"] allowed_options_live = ["--wait", "--start", "--enable", "--no-keys-sync"] allowed_options_local = ["--corosync_conf", "--overwrite"] modifiers.ensure_only_supported( *( allowed_options_common + allowed_options_live + allowed_options_local ), # The hint is defined to print error messages which point users to the # changes section in pcs manpage. # To be removed in the next significant version. hint_syntax_changed=modifiers.is_specified_any(["--local", "--name"]), ) if is_local and modifiers.is_specified_any(allowed_options_live): raise CmdLineInputError( ( "Cannot specify any of {banned} when '--corosync_conf' is " "specified" ).format(banned=format_list(allowed_options_live)) ) if not is_local and modifiers.is_specified("--overwrite"): raise CmdLineInputError( "Cannot specify '--overwrite' when '--corosync_conf' is not " "specified" ) if len(argv) < 2: raise CmdLineInputError() cluster_name, *argv = argv keywords = [TRANSPORT_KEYWORD, "totem", "quorum"] parsed_args = parse_args.group_by_keywords( argv, keywords, implicit_first_group_key="nodes", keyword_repeat_allowed=False, only_found_keywords=True, ) nodes = [ _parse_node_options(node, options) for node, options in parse_args.split_list_by_any_keywords( parsed_args["nodes"], "node name", ).items() ] transport_type = None transport_options = {} if TRANSPORT_KEYWORD in parsed_args: transport_type, transport_options = _parse_transport( parsed_args[TRANSPORT_KEYWORD] ) force_flags = [] if modifiers.get("--force"): force_flags.append(reports.codes.FORCE) if not is_local: lib.cluster.setup( cluster_name, nodes, transport_type=transport_type, transport_options=transport_options.get( TRANSPORT_DEFAULT_SECTION, {} ), link_list=transport_options.get(LINK_KEYWORD, []), compression_options=transport_options.get("compression", {}), crypto_options=transport_options.get("crypto", {}), totem_options=parse_args.prepare_options( parsed_args.get("totem", []) ), quorum_options=parse_args.prepare_options( parsed_args.get("quorum", []) ), wait=modifiers.get("--wait"), start=modifiers.get("--start"), enable=modifiers.get("--enable"), no_keys_sync=modifiers.get("--no-keys-sync"), force_flags=force_flags, ) return corosync_conf_data = lib.cluster.setup_local( cluster_name, nodes, transport_type=transport_type, transport_options=transport_options.get(TRANSPORT_DEFAULT_SECTION, {}), link_list=transport_options.get(LINK_KEYWORD, []), compression_options=transport_options.get("compression", {}), crypto_options=transport_options.get("crypto", {}), totem_options=parse_args.prepare_options(parsed_args.get("totem", [])), quorum_options=parse_args.prepare_options( parsed_args.get("quorum", []) ), force_flags=force_flags, ) corosync_conf_file = pcs_file.RawFile( file_metadata.for_file_type( file_type_codes.COROSYNC_CONF, modifiers.get("--corosync_conf") ) ) overwrite = modifiers.is_specified("--overwrite") try: corosync_conf_file.write(corosync_conf_data, can_overwrite=overwrite) except pcs_file.FileAlreadyExists as e: utils.err( reports.messages.FileAlreadyExists( e.metadata.file_type_code, e.metadata.path, ).message + ", use --overwrite to overwrite existing file(s)" ) except pcs_file.RawFileError as e: utils.err( reports.messages.FileIoError( e.metadata.file_type_code, e.action, e.reason, file_path=e.metadata.path, ).message ) def config_update( lib: Any, argv: List[str], modifiers: parse_args.InputModifiers ) -> None: """ Options: * --corosync_conf - corosync.conf file path, do not talk to cluster nodes """ modifiers.ensure_only_supported("--corosync_conf") parsed_args = parse_args.group_by_keywords( argv, ["transport", "compression", "crypto", "totem"], ) if not modifiers.is_specified("--corosync_conf"): lib.cluster.config_update( parse_args.prepare_options(parsed_args["transport"]), parse_args.prepare_options(parsed_args["compression"]), parse_args.prepare_options(parsed_args["crypto"]), parse_args.prepare_options(parsed_args["totem"]), ) return corosync_conf_file = pcs_file.RawFile( file_metadata.for_file_type( file_type_codes.COROSYNC_CONF, modifiers.get("--corosync_conf") ) ) try: corosync_conf_file.write( lib.cluster.config_update_local( corosync_conf_file.read(), parse_args.prepare_options(parsed_args["transport"]), parse_args.prepare_options(parsed_args["compression"]), parse_args.prepare_options(parsed_args["crypto"]), parse_args.prepare_options(parsed_args["totem"]), ), can_overwrite=True, ) except pcs_file.RawFileError as e: # TODO do not use LibraryError raise LibraryError( reports.ReportItem.error( reports.messages.FileIoError( e.metadata.file_type_code, e.action, e.reason, file_path=e.metadata.path, ) ) ) from e def _format_options(label: str, options: Mapping[str, str]) -> List[str]: output = [] if options: output.append(f"{label}:") output.extend( indent(f"{opt}: {val}" for opt, val in sorted(options.items())) ) return output def _format_nodes(nodes: Iterable[CorosyncNodeDto]) -> List[str]: output = ["Nodes:"] for node in sorted(nodes, key=lambda node: node.name): node_attrs = [ f"Link {addr.link} address: {addr.addr}" for addr in sorted(node.addrs, key=lambda addr: addr.link) ] + [f"nodeid: {node.nodeid}"] output.extend(indent([f"{node.name}:"] + indent(node_attrs))) return output def config_show( lib: Any, argv: List[str], modifiers: parse_args.InputModifiers ) -> None: """ Options: * --corosync_conf - corosync.conf file path, do not talk to cluster nodes * --output-format - supported formats: text, cmd, json """ modifiers.ensure_only_supported("--corosync_conf", "--output-format") if argv: raise CmdLineInputError() output_format = modifiers.get("--output-format") supported_formats = ["text", "cmd", "json"] if not output_format in supported_formats: raise CmdLineInputError( ( "Unknown value '{}' for '--output-format' option. Supported " "values are: {}" ).format(output_format, format_list(supported_formats)) ) corosync_conf_dto = lib.cluster.get_corosync_conf_struct() if output_format == "cmd": if corosync_conf_dto.quorum_device is not None: warn( "Quorum device configuration detected but not yet supported by " "this command." ) output = " \\\n".join(_config_get_cmd(corosync_conf_dto)) elif output_format == "json": output = json.dumps(dto.to_dict(corosync_conf_dto)) else: output = "\n".join(_config_get_text(corosync_conf_dto)) print(output) def _config_get_text(corosync_conf: CorosyncConfDto) -> List[str]: lines = [ f"Cluster Name: {corosync_conf.cluster_name}", "Transport: {}".format(corosync_conf.transport.lower()), ] lines.extend(_format_nodes(corosync_conf.nodes)) if corosync_conf.links_options: lines.append("Links:") for linknum, link_options in sorted( corosync_conf.links_options.items() ): lines.extend( indent(_format_options(f"Link {linknum}", link_options)) ) lines.extend( _format_options("Transport Options", corosync_conf.transport_options) ) lines.extend( _format_options( "Compression Options", corosync_conf.compression_options ) ) lines.extend( _format_options("Crypto Options", corosync_conf.crypto_options) ) lines.extend(_format_options("Totem Options", corosync_conf.totem_options)) lines.extend( _format_options("Quorum Options", corosync_conf.quorum_options) ) if corosync_conf.quorum_device: lines.append(f"Quorum Device: {corosync_conf.quorum_device.model}") lines.extend( indent( _format_options( "Options", corosync_conf.quorum_device.generic_options ) ) ) lines.extend( indent( _format_options( "Model Options", corosync_conf.quorum_device.model_options, ) ) ) lines.extend( indent( _format_options( "Heuristics", corosync_conf.quorum_device.heuristics_options, ) ) ) return lines def _corosync_node_to_cmd_line(node: CorosyncNodeDto) -> str: return " ".join( [node.name] + [ f"addr={addr.addr}" for addr in sorted(node.addrs, key=lambda addr: addr.link) ] ) def _section_to_lines( options: Mapping[str, str], keyword: Optional[str] = None ) -> List[str]: output: List[str] = [] if options: if keyword: output.append(keyword) output.extend( indent([f"{key}={val}" for key, val in sorted(options.items())]) ) return indent(output) def _config_get_cmd(corosync_conf: CorosyncConfDto) -> List[str]: lines = [f"pcs cluster setup {corosync_conf.cluster_name}"] lines += indent( [ _corosync_node_to_cmd_line(node) for node in sorted( corosync_conf.nodes, key=lambda node: node.nodeid ) ] ) transport = [ "transport", str(corosync_conf.transport.value).lower(), ] + _section_to_lines(corosync_conf.transport_options) for _, link in sorted(corosync_conf.links_options.items()): transport.extend(_section_to_lines(link, "link")) transport.extend( _section_to_lines(corosync_conf.compression_options, "compression") ) transport.extend(_section_to_lines(corosync_conf.crypto_options, "crypto")) lines.extend(indent(transport)) lines.extend(_section_to_lines(corosync_conf.totem_options, "totem")) lines.extend(_section_to_lines(corosync_conf.quorum_options, "quorum")) return lines def _parse_add_node(argv): # pylint: disable=invalid-name DEVICE_KEYWORD = "device" WATCHDOG_KEYWORD = "watchdog" hostname, *argv = argv node_dict = _parse_node_options( hostname, argv, additional_options={DEVICE_KEYWORD, WATCHDOG_KEYWORD}, additional_repeatable_options={DEVICE_KEYWORD}, ) if DEVICE_KEYWORD in node_dict: node_dict[f"{DEVICE_KEYWORD}s"] = node_dict[DEVICE_KEYWORD] del node_dict[DEVICE_KEYWORD] return node_dict def node_add(lib, argv, modifiers): """ Options: * --wait - wait until new node will start up, effective only when --start is specified * --start - start new node * --enable - enable new node * --force - treat validation issues and not resolvable addresses as warnings instead of errors * --skip-offline - skip unreachable nodes * --no-watchdog-validation - do not validatate watchdogs * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported( "--wait", "--start", "--enable", "--force", "--skip-offline", "--no-watchdog-validation", "--request-timeout", ) if not argv: raise CmdLineInputError() node_dict = _parse_add_node(argv) force_flags = [] if modifiers.get("--force"): force_flags.append(reports.codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(reports.codes.SKIP_OFFLINE_NODES) lib.cluster.add_nodes( nodes=[node_dict], wait=modifiers.get("--wait"), start=modifiers.get("--start"), enable=modifiers.get("--enable"), no_watchdog_validation=modifiers.get("--no-watchdog-validation"), force_flags=force_flags, ) def remove_nodes_from_cib(lib, argv, modifiers): """ Options: no options """ modifiers.ensure_only_supported() if not argv: raise CmdLineInputError("No nodes specified") lib.cluster.remove_nodes_from_cib(argv) def link_add(lib, argv, modifiers): """ Options: * --force - treat validation issues and not resolvable addresses as warnings instead of errors * --skip-offline - skip unreachable nodes * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported( "--force", "--request-timeout", "--skip-offline" ) if not argv: raise CmdLineInputError() force_flags = [] if modifiers.get("--force"): force_flags.append(reports.codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(reports.codes.SKIP_OFFLINE_NODES) parsed = parse_args.group_by_keywords( argv, {"options"}, implicit_first_group_key="nodes", keyword_repeat_allowed=False, ) lib.cluster.add_link( parse_args.prepare_options(parsed["nodes"]), parse_args.prepare_options(parsed["options"]), force_flags=force_flags, ) def link_remove(lib, argv, modifiers): """ Options: * --skip-offline - skip unreachable nodes * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported("--request-timeout", "--skip-offline") if not argv: raise CmdLineInputError() force_flags = [] if modifiers.get("--skip-offline"): force_flags.append(reports.codes.SKIP_OFFLINE_NODES) lib.cluster.remove_links(argv, force_flags=force_flags) def link_update(lib, argv, modifiers): """ Options: * --force - treat validation issues and not resolvable addresses as warnings instead of errors * --skip-offline - skip unreachable nodes * --request-timeout - HTTP request timeout """ modifiers.ensure_only_supported( "--force", "--request-timeout", "--skip-offline" ) if len(argv) < 2: raise CmdLineInputError() force_flags = [] if modifiers.get("--force"): force_flags.append(reports.codes.FORCE) if modifiers.get("--skip-offline"): force_flags.append(reports.codes.SKIP_OFFLINE_NODES) linknumber = argv[0] parsed = parse_args.group_by_keywords( argv[1:], {"options"}, implicit_first_group_key="nodes", keyword_repeat_allowed=False, ) lib.cluster.update_link( linknumber, parse_args.prepare_options(parsed["nodes"]), parse_args.prepare_options(parsed["options"]), force_flags=force_flags, ) pcs-0.10.11/pcs/common/000077500000000000000000000000001412706364600145255ustar00rootroot00000000000000pcs-0.10.11/pcs/common/__init__.py000066400000000000000000000000001412706364600166240ustar00rootroot00000000000000pcs-0.10.11/pcs/common/communication/000077500000000000000000000000001412706364600173725ustar00rootroot00000000000000pcs-0.10.11/pcs/common/communication/__init__.py000066400000000000000000000000401412706364600214750ustar00rootroot00000000000000from . import const, dto, types pcs-0.10.11/pcs/common/communication/const.py000066400000000000000000000004131412706364600210700ustar00rootroot00000000000000from .types import CommunicationResultStatus as Status COM_STATUS_SUCCESS = Status("success") COM_STATUS_INPUT_ERROR = Status("input_error") COM_STATUS_UNKNOWN_CMD = Status("unknown_cmd") COM_STATUS_ERROR = Status("error") COM_STATUS_EXCEPTION = Status("exception") pcs-0.10.11/pcs/common/communication/dto.py000066400000000000000000000013761412706364600205410ustar00rootroot00000000000000from dataclasses import dataclass from typing import ( Any, List, Mapping, Optional, ) from pcs.common.interface.dto import DataTransferObject from pcs.common.reports.dto import ReportItemDto from .types import CommunicationResultStatus as StatusType @dataclass(frozen=True) class InternalCommunicationResultDto(DataTransferObject): status: StatusType status_msg: Optional[str] report_list: List[ReportItemDto] data: Any @dataclass(frozen=True) class InternalCommunicationRequestOptionsDto(DataTransferObject): request_timeout: Optional[int] @dataclass(frozen=True) class InternalCommunicationRequestDto(DataTransferObject): options: InternalCommunicationRequestOptionsDto cmd: str cmd_data: Mapping[str, Any] pcs-0.10.11/pcs/common/communication/types.py000066400000000000000000000001421412706364600211050ustar00rootroot00000000000000from typing import NewType CommunicationResultStatus = NewType("CommunicationResultStatus", str) pcs-0.10.11/pcs/common/const.py000066400000000000000000000017211412706364600162260ustar00rootroot00000000000000from typing import NewType from pcs.common.tools import Version PcmkRoleType = NewType("PcmkRoleType", str) PCMK_ROLE_STARTED = PcmkRoleType("Started") PCMK_ROLE_STOPPED = PcmkRoleType("Stopped") PCMK_ROLE_PROMOTED = PcmkRoleType("Promoted") PCMK_ROLE_UNPROMOTED = PcmkRoleType("Unpromoted") PCMK_ROLE_PROMOTED_LEGACY = PcmkRoleType("Master") PCMK_ROLE_UNPROMOTED_LEGACY = PcmkRoleType("Slave") PCMK_ROLE_PROMOTED_PRIMARY = PCMK_ROLE_PROMOTED_LEGACY PCMK_ROLE_UNPROMOTED_PRIMARY = PCMK_ROLE_UNPROMOTED_LEGACY PCMK_ROLES_PROMOTED = (PCMK_ROLE_PROMOTED, PCMK_ROLE_PROMOTED_LEGACY) PCMK_ROLES_UNPROMOTED = (PCMK_ROLE_UNPROMOTED, PCMK_ROLE_UNPROMOTED_LEGACY) PCMK_ROLES_RUNNING = ( (PCMK_ROLE_STARTED,) + PCMK_ROLES_PROMOTED + PCMK_ROLES_UNPROMOTED ) PCMK_ROLES = (PCMK_ROLE_STOPPED,) + PCMK_ROLES_RUNNING PCMK_NEW_ROLES_CIB_VERSION = Version(3, 7, 0) PCMK_RULES_NODE_ATTR_EXPR_WITH_INT_TYPE_CIB_VERSION = Version(3, 5, 0) PCMK_ON_FAIL_DEMOTE_CIB_VERSION = Version(3, 4, 0) pcs-0.10.11/pcs/common/corosync_conf.py000066400000000000000000000023521412706364600177450ustar00rootroot00000000000000from dataclasses import dataclass from typing import ( List, Mapping, Optional, ) from pcs.common.interface.dto import DataTransferObject from pcs.common.types import CorosyncTransportType @dataclass(frozen=True) class CorosyncNodeAddressDto(DataTransferObject): addr: str link: str type: str # TODO: create enum of addr types in pcs.lib.corosync.node @dataclass(frozen=True) class CorosyncNodeDto(DataTransferObject): name: str nodeid: str addrs: List[CorosyncNodeAddressDto] @dataclass(frozen=True) class CorosyncQuorumDeviceSettingsDto(DataTransferObject): model: str model_options: Mapping[str, str] generic_options: Mapping[str, str] heuristics_options: Mapping[str, str] @dataclass(frozen=True) class CorosyncConfDto(DataTransferObject): # pylint: disable=too-many-instance-attributes cluster_name: str transport: CorosyncTransportType totem_options: Mapping[str, str] transport_options: Mapping[str, str] compression_options: Mapping[str, str] crypto_options: Mapping[str, str] nodes: List[CorosyncNodeDto] links_options: Mapping[str, Mapping[str, str]] quorum_options: Mapping[str, str] quorum_device: Optional[CorosyncQuorumDeviceSettingsDto] pcs-0.10.11/pcs/common/dr.py000066400000000000000000000012641412706364600155070ustar00rootroot00000000000000from dataclasses import dataclass from typing import Sequence from pcs.common.types import DrRole from pcs.common.interface.dto import DataTransferObject @dataclass(frozen=True) class DrConfigNodeDto(DataTransferObject): name: str @dataclass(frozen=True) class DrConfigSiteDto(DataTransferObject): site_role: DrRole node_list: Sequence[DrConfigNodeDto] @dataclass(frozen=True) class DrConfigDto(DataTransferObject): local_site: DrConfigSiteDto remote_site_list: Sequence[DrConfigSiteDto] @dataclass(frozen=True) class DrSiteStatusDto(DataTransferObject): local_site: bool site_role: DrRole status_plaintext: str status_successfully_obtained: bool pcs-0.10.11/pcs/common/fencing_topology.py000066400000000000000000000001341412706364600204420ustar00rootroot00000000000000TARGET_TYPE_NODE = "node" TARGET_TYPE_REGEXP = "regexp" TARGET_TYPE_ATTRIBUTE = "attribute" pcs-0.10.11/pcs/common/file.py000066400000000000000000000145401412706364600160220ustar00rootroot00000000000000from collections import namedtuple import fcntl import os import shutil from pcs.common.tools import format_os_error # TODO add logging (logger / debug reports ?) to the RawFile class; be aware # the class is used both in pcs.cli and pcs.lib packages FileMetadata = namedtuple( "FileMetadata", [ "file_type_code", "path", "owner_user_name", "owner_group_name", "permissions", "is_binary", ], ) class RawFileError(Exception): # So far there has been no need to have a separate exception for each # action. Actions must be passed in a report and we certainely do not want # a separate report for each action. ACTION_CHMOD = "chmod" ACTION_CHOWN = "chown" ACTION_READ = "read" ACTION_REMOVE = "remove" ACTION_WRITE = "write" def __init__(self, metadata, action, reason=""): """ FileMetadata metadata -- describes the file involved in the error string action -- possible values enumerated in RawFileError string reason -- plain text error details """ super().__init__() self.metadata = metadata self.action = action self.reason = reason class FileAlreadyExists(RawFileError): def __init__(self, metadata): """ FileMetadata metadata -- describes the file involved in the error """ super().__init__(metadata, RawFileError.ACTION_WRITE) class RawFileInterface: def __init__(self, metadata): """ FileMetadata metadata -- describes the file and provides its metadata """ self.__metadata = metadata @property def metadata(self): return self.__metadata def exists(self): """ Return True if file exists, False otherwise """ raise NotImplementedError() def read(self): """ Return content of the file as bytes """ raise NotImplementedError() def write(self, file_data, can_overwrite=False): """ Write file_data to the file bytes file_data -- data to be written bool can_overwrite -- raise if False and the file already exists """ raise NotImplementedError() class RawFile(RawFileInterface): def exists(self): # Returns False if the file is not accessible, does not raise. return os.path.exists(self.metadata.path) def read(self): try: mode = "rb" if self.metadata.is_binary else "r" with open(self.metadata.path, mode) as my_file: # the lock is released when the file gets closed on leaving the # with statement fcntl.flock(my_file.fileno(), fcntl.LOCK_SH) content = my_file.read() return ( content if self.metadata.is_binary else content.encode("utf-8") ) except OSError as e: # Specific expection if the file does not exist is not needed, # anyone can and should check that using the exists method. raise RawFileError( self.metadata, RawFileError.ACTION_READ, format_os_error(e) ) from e def write(self, file_data, can_overwrite=False): try: mode = "{write_mode}{binary_mode}".format( write_mode="w" if can_overwrite else "x", binary_mode="b" if self.metadata.is_binary else "", ) # It seems pylint cannot process constructing the mode variable and # gives a false positive. # pylint: disable=bad-open-mode with open(self.metadata.path, mode) as my_file: # the lock is released when the file gets closed on leaving the # with statement fcntl.flock(my_file.fileno(), fcntl.LOCK_EX) # Set the ownership and permissions to cover the case when we # just created the file. If the file already existed, make sure # the ownership and permissions are correct before writing any # data into it. if ( self.metadata.owner_user_name is not None or self.metadata.owner_group_name is not None ): try: shutil.chown( self.metadata.path, self.metadata.owner_user_name, self.metadata.owner_group_name, ) except LookupError as e: raise RawFileError( self.metadata, RawFileError.ACTION_CHOWN, str(e) ) from e except OSError as e: raise RawFileError( self.metadata, RawFileError.ACTION_CHOWN, format_os_error(e), ) from e if self.metadata.permissions is not None: try: os.chmod(my_file.fileno(), self.metadata.permissions) except OSError as e: raise RawFileError( self.metadata, RawFileError.ACTION_CHMOD, format_os_error(e), ) from e # Write file data my_file.write( file_data if self.metadata.is_binary else file_data.decode("utf-8") ) except FileExistsError as e: raise FileAlreadyExists(self.metadata) from e except OSError as e: raise RawFileError( self.metadata, RawFileError.ACTION_WRITE, format_os_error(e) ) from e def remove(self, fail_if_file_not_found=True): get_raw_file_error = lambda e: RawFileError( self.metadata, RawFileError.ACTION_REMOVE, format_os_error(e) ) try: os.remove(self.metadata.path) except FileNotFoundError as e: if fail_if_file_not_found: raise get_raw_file_error(e) from e except OSError as e: raise get_raw_file_error(e) from e def backup(self): # TODO implement raise NotImplementedError() pcs-0.10.11/pcs/common/file_type_codes.py000066400000000000000000000011771412706364600202420ustar00rootroot00000000000000from typing import NewType FileTypeCode = NewType("FileTypeCode", str) BOOTH_CONFIG = FileTypeCode("BOOTH_CONFIG") BOOTH_KEY = FileTypeCode("BOOTH_KEY") CIB = FileTypeCode("CIB") COROSYNC_AUTHKEY = FileTypeCode("COROSYNC_AUTHKEY") COROSYNC_CONF = FileTypeCode("COROSYNC_CONF") PACEMAKER_AUTHKEY = FileTypeCode("PACEMAKER_AUTHKEY") PCSD_ENVIRONMENT_CONFIG = FileTypeCode("PCSD_ENVIRONMENT_CONFIG") PCSD_SSL_CERT = FileTypeCode("PCSD_SSL_CERT") PCSD_SSL_KEY = FileTypeCode("PCSD_SSL_KEY") PCS_KNOWN_HOSTS = FileTypeCode("PCS_KNOWN_HOSTS") PCS_SETTINGS_CONF = FileTypeCode("PCS_SETTINGS_CONF") PCS_DR_CONFIG = FileTypeCode("PCS_DR_CONFIG") pcs-0.10.11/pcs/common/host.py000066400000000000000000000021311412706364600160510ustar00rootroot00000000000000from collections import namedtuple from pcs import settings Destination = namedtuple("Destination", ["addr", "port"]) class PcsKnownHost(namedtuple("KnownHost", ["name", "token", "dest_list"])): @classmethod def from_known_host_file_dict(cls, name, known_host_dict): dest_list = [ Destination(conn["addr"], conn["port"]) for conn in known_host_dict["dest_list"] ] if not dest_list: raise KeyError("no destination defined") return cls(name, token=known_host_dict["token"], dest_list=dest_list) def to_known_host_dict(self): return ( self.name, dict( token=self.token, dest_list=[ dict( addr=dest.addr, port=dest.port, ) for dest in self.dest_list ], ), ) @property def dest(self): if self.dest_list: return self.dest_list[0] return Destination(self.name, settings.pcsd_default_port) pcs-0.10.11/pcs/common/interface/000077500000000000000000000000001412706364600164655ustar00rootroot00000000000000pcs-0.10.11/pcs/common/interface/__init__.py000066400000000000000000000000001412706364600205640ustar00rootroot00000000000000pcs-0.10.11/pcs/common/interface/dto.py000066400000000000000000000067731412706364600176420ustar00rootroot00000000000000from typing import ( Any, Dict, Iterable, NewType, Type, TypeVar, Union, ) from dataclasses import asdict, fields, is_dataclass import dacite from pcs.common import types PrimitiveType = Union[str, int, float, bool, None] DtoPayload = Dict[str, "SerializableType"] # type: ignore SerializableType = Union[ # type: ignore PrimitiveType, DtoPayload, # type: ignore Iterable["SerializableType"], # type: ignore ] T = TypeVar("T") ToDictMetaKey = NewType("ToDictMetaKey", str) META_NAME = ToDictMetaKey("META_NAME") class DataTransferObject: pass def meta(name: str) -> Dict[str, str]: metadata: Dict[str, str] = dict() if name: metadata[META_NAME] = name return metadata def _is_compatible_type(_type: Type, arg_index: int) -> bool: return ( hasattr(_type, "__args__") and len(_type.__args__) >= arg_index and is_dataclass(_type.__args__[arg_index]) ) def _convert_dict( klass: Type[DataTransferObject], obj_dict: DtoPayload ) -> DtoPayload: new_dict = dict() for _field in fields(klass): value = obj_dict[_field.name] if is_dataclass(_field.type): value = _convert_dict(_field.type, value) elif isinstance(value, list) and _is_compatible_type(_field.type, 0): value = [ _convert_dict(_field.type.__args__[0], item) for item in value ] elif isinstance(value, dict) and _is_compatible_type(_field.type, 1): value = { item_key: _convert_dict(_field.type.__args__[1], item_val) for item_key, item_val in value.items() } new_dict[_field.metadata.get(META_NAME, _field.name)] = value return new_dict def to_dict(obj: DataTransferObject) -> DtoPayload: return _convert_dict(obj.__class__, asdict(obj)) DtoType = TypeVar("DtoType", bound=DataTransferObject) def _convert_payload(klass: Type[DtoType], data: DtoPayload) -> DtoPayload: new_dict = dict() for _field in fields(klass): value = data[_field.metadata.get(META_NAME, _field.name)] if is_dataclass(_field.type): value = _convert_payload(_field.type, value) elif isinstance(value, list) and _is_compatible_type(_field.type, 0): value = [ _convert_payload(_field.type.__args__[0], item) for item in value ] elif isinstance(value, dict) and _is_compatible_type(_field.type, 1): value = { item_key: _convert_payload(_field.type.__args__[1], item_val) for item_key, item_val in value.items() } new_dict[_field.name] = value return new_dict def from_dict(cls: Type[DtoType], data: DtoPayload) -> DtoType: return dacite.from_dict( data_class=cls, data=_convert_payload(cls, data), # NOTE: all enum types has to be listed here in key cast # see: https://github.com/konradhalas/dacite#casting config=dacite.Config( cast=[ types.CibNvsetType, types.CibRuleInEffectStatus, types.CibRuleExpressionType, types.CorosyncTransportType, types.DrRole, types.ResourceRelationType, ] ), ) class ImplementsToDto: def to_dto(self) -> Any: raise NotImplementedError() class ImplementsFromDto: @classmethod def from_dto(cls: Type[T], dto_obj: Any) -> T: raise NotImplementedError() pcs-0.10.11/pcs/common/node_communicator.py000066400000000000000000000427631412706364600206200ustar00rootroot00000000000000import base64 import io import re from collections import namedtuple from urllib.parse import urlencode # We should ignore SIGPIPE when using pycurl.NOSIGNAL - see the libcurl tutorial # for more info. try: import signal signal.signal(signal.SIGPIPE, signal.SIG_IGN) except ImportError: pass from pcs import settings from pcs.common import pcs_pycurl as pycurl from pcs.common.host import Destination def _find_value_for_possible_keys(value_dict, possible_key_list): for key in possible_key_list: if key in value_dict: return value_dict[key] return None class HostNotFound(Exception): def __init__(self, name): super().__init__() self.name = name class NodeTargetFactory: def __init__(self, known_hosts): self._known_hosts = known_hosts def get_target(self, host_name): known_host = self._known_hosts.get(host_name) if known_host is None: raise HostNotFound(host_name) return RequestTarget.from_known_host(known_host) def get_target_from_hostname(self, hostname): try: return self.get_target(hostname) except HostNotFound: return RequestTarget(hostname) class RequestData( namedtuple("RequestData", ["action", "structured_data", "data"]) ): """ This class represents action and data asociated with action which will be send in request """ def __new__(cls, action, structured_data=(), data=None): """ string action -- action to perform list structured_data -- list of tuples, data to send with specified action string data -- raw data to send in request's body """ return super(RequestData, cls).__new__( cls, action, data if data else structured_data, data if data else urlencode(structured_data), ) class RequestTarget( namedtuple("RequestTarget", ["label", "token", "dest_list"]) ): """ This class represents target (host) for request to be performed on """ def __new__(cls, label, token=None, dest_list=()): if not dest_list: dest_list = [Destination(label, settings.pcsd_default_port)] return super(RequestTarget, cls).__new__( cls, label, token=token, dest_list=list(dest_list), ) @classmethod def from_known_host(cls, known_host): return cls( known_host.name, token=known_host.token, dest_list=known_host.dest_list, ) @property def first_addr(self): # __new__ ensures there is always at least one item in self.dest_list return self.dest_list[0].addr class Request: """ This class represents request. With usage of RequestTarget it provides interface for getting next available host to make request on. """ def __init__(self, request_target, request_data): """ RequestTarget request_target RequestData request_data """ self._target = request_target self._data = request_data self._current_dest_iterator = iter(self._target.dest_list) self._current_dest = None self.next_dest() def next_dest(self): """ Move to the next available host connection. Raises StopIteration when there is no connection to use. """ self._current_dest = next(self._current_dest_iterator) @property def url(self): """ URL representing request using current host. """ addr = self.dest.addr port = self.dest.port return "https://{host}:{port}/{request}".format( host="[{0}]".format(addr) if ":" in addr else addr, port=(port if port else settings.pcsd_default_port), request=self._data.action, ) @property def dest(self): return self._current_dest @property def host_label(self): return self._target.label @property def target(self): return self._target @property def data(self): return self._data.data @property def action(self): return self._data.action @property def cookies(self): cookies = {} if self._target.token: cookies["token"] = self._target.token return cookies def __repr__(self): return str("Request({0}, {1})").format(self._target, self._data) class Response: """ This class represents response for request which is available as instance property. """ def __init__(self, handle, was_connected, errno=None, error_msg=None): self._handle = handle self._was_connected = was_connected self._errno = errno self._error_msg = error_msg self._data = None self._debug = None @classmethod def connection_successful(cls, handle): """ Returns Response instance that is marked as successfully connected. pycurl.Curl handle -- curl easy handle, which connection was successful """ return cls(handle, True) @classmethod def connection_failure(cls, handle, errno, error_msg): """ Returns Response instance that is marked as not successfuly connected. pycurl.Curl handle -- curl easy handle, which was not connected int errno -- error number string error_msg -- text description of error """ return cls(handle, False, errno, error_msg) @property def request(self): return self._handle.request_obj @property def handle(self): return self._handle @property def was_connected(self): return self._was_connected @property def errno(self): return self._errno @property def error_msg(self): return self._error_msg @property def data(self): if self._data is None: self._data = self._handle.output_buffer.getvalue().decode("utf-8") return self._data @property def debug(self): if self._debug is None: self._debug = self._handle.debug_buffer.getvalue().decode("utf-8") return self._debug @property def response_code(self): if not self.was_connected: return None return self._handle.getinfo(pycurl.RESPONSE_CODE) def __repr__(self): return str( "Response({0} data='{1}' was_connected={2}) errno='{3}'" " error_msg='{4}' response_code='{5}')" ).format( self.request, self.data, self.was_connected, self.errno, self.error_msg, self.response_code, ) class NodeCommunicatorFactory: def __init__(self, communicator_logger, user, groups, request_timeout): self._logger = communicator_logger self._user = user self._groups = groups self._request_timeout = request_timeout def get_communicator(self, request_timeout=None): return self.get_simple_communicator(request_timeout=request_timeout) def get_simple_communicator(self, request_timeout=None): timeout = request_timeout if request_timeout else self._request_timeout return Communicator( self._logger, self._user, self._groups, request_timeout=timeout ) def get_multiaddress_communicator(self, request_timeout=None): timeout = request_timeout if request_timeout else self._request_timeout return MultiaddressCommunicator( self._logger, self._user, self._groups, request_timeout=timeout ) class Communicator: """ This class provides simple interface for making parallel requests. The instances of this class are not thread-safe! It is intended to use it only in a single thread. Use an unique instance for each thread. """ curl_multi_select_timeout_default = 0.8 # in seconds def __init__(self, communicator_logger, user, groups, request_timeout=None): self._logger = communicator_logger self._auth_cookies = _get_auth_cookies(user, groups) self._request_timeout = ( request_timeout if request_timeout is not None else settings.default_request_timeout ) self._multi_handle = pycurl.CurlMulti() self._is_running = False # This is used just for storing references of curl easy handles. # We need to have references for all the handles, so they don't be # cleaned up by the garbage collector. self._easy_handle_list = [] def add_requests(self, request_list): """ Add requests to queue to be processed. It is possible to call this method before getting generator using start_loop method and also during getting responses from generator. Requests are not performed after calling this method, but only when generator returned by start_loop method is in progress (returned at least one response and not raised StopIteration exception). list request_list -- Request objects to add to the queue """ for request in request_list: handle = _create_request_handle( request, self._auth_cookies, self._request_timeout, ) self._easy_handle_list.append(handle) self._multi_handle.add_handle(handle) if self._is_running: self._logger.log_request_start(request) def start_loop(self): """ Returns generator. When generator is invoked, all requests in queue (added by method add_requests) will be invoked in parallel, and generator will then return responses for these requests. It is possible to add new request to the queue while the generator is in progres. Generator will stop (raise StopIteration) after all requests (also those added after creation of generator) are processed. WARNING: do not use multiple instances of generator (of one Communicator instance) when there is one which didn't finish (raised StopIteration). It wil cause AssertionError. USAGE: com = Communicator(...) com.add_requests([ Request(...), ... ]) for response in communicator.start_loop(): # do something with response # if needed, add some new requests to the queue com.add_requests([Request(...)]) """ if self._is_running: raise AssertionError("Method start_loop already running") self._is_running = True for handle in self._easy_handle_list: self._logger.log_request_start(handle.request_obj) finished_count = 0 while finished_count < len(self._easy_handle_list): self.__multi_perform() self.__wait_for_multi_handle() response_list = self.__get_all_ready_responses() for response in response_list: # free up memory for next usage of this Communicator instance self._multi_handle.remove_handle(response.handle) self._logger.log_response(response) yield response # if something was added to the queue in the meantime, run it # immediately, so we don't need to wait until all responses will # be processed self.__multi_perform() finished_count += len(response_list) self._easy_handle_list = [] self._is_running = False def __get_all_ready_responses(self): response_list = [] repeat = True while repeat: num_queued, ok_list, err_list = self._multi_handle.info_read() response_list.extend( [Response.connection_successful(handle) for handle in ok_list] + [ Response.connection_failure(handle, errno, error_msg) for handle, errno, error_msg in err_list ] ) repeat = num_queued > 0 return response_list def __multi_perform(self): # run all internal operation required by libcurl status, num_to_process = self._multi_handle.perform() # if perform returns E_CALL_MULTI_PERFORM it requires to call perform # once again right away while status == pycurl.E_CALL_MULTI_PERFORM: status, num_to_process = self._multi_handle.perform() return num_to_process def __wait_for_multi_handle(self): # try to wait until there is something to do for us need_to_wait = True while need_to_wait: timeout = self._multi_handle.timeout() if timeout == 0: # if timeout == 0 then there is something to precess already return timeout = ( timeout / 1000.0 if timeout > 0 # curl don't have timeout set, so we can use our default else self.curl_multi_select_timeout_default ) # when value returned from select is -1, it timed out, so we can # wait need_to_wait = self._multi_handle.select(timeout) == -1 class MultiaddressCommunicator(Communicator): """ Class with same interface as Communicator. In difference with Communicator, it takes advantage of multiple hosts in RequestTarget. So if it is not possible to connect to target using first hostname, it will use next one until connection will be successful or there is no host left. """ def start_loop(self): for response in super().start_loop(): if response.was_connected: yield response continue try: previous_dest = response.request.dest response.request.next_dest() self._logger.log_retry(response, previous_dest) self.add_requests([response.request]) except StopIteration: self._logger.log_no_more_addresses(response) yield response class CommunicatorLoggerInterface: def log_request_start(self, request): raise NotImplementedError() def log_response(self, response): raise NotImplementedError() def log_retry(self, response, previous_dest): raise NotImplementedError() def log_no_more_addresses(self, response): raise NotImplementedError() def _get_auth_cookies(user, group_list): """ Returns input parameters in a dictionary which is prepared to be converted to cookie string. string user -- CIB user string group_list -- CIB user groups """ # Let's be safe about characters in variables (they can come from env) # and do base64. We cannot do it for CIB_user however to be backward # compatible so we at least remove disallowed characters. cookies = {} if user: cookies["CIB_user"] = re.sub(r"[^!-~]", "", user).replace(";", "") if group_list: # cookies require string but base64encode returns bytes, so decode it... cookies["CIB_user_groups"] = base64.b64encode( # python3 requires the value to be bytes not str " ".join(group_list).encode("utf-8") ).decode("utf-8") return cookies def _create_request_handle(request, cookies, timeout): """ Returns Curl object (easy handle) which is set up witc specified parameters. Request request -- request specification dict cookies -- cookies to add to request int timeot -- request timeout """ # it is not possible to take this callback out of this function, because of # curl API def __debug_callback(data_type, debug_data): # pylint: disable=no-member prefixes = { pycurl.DEBUG_TEXT: b"* ", pycurl.DEBUG_HEADER_IN: b"< ", pycurl.DEBUG_HEADER_OUT: b"> ", pycurl.DEBUG_DATA_IN: b"<< ", pycurl.DEBUG_DATA_OUT: b">> ", } if data_type in prefixes: debug_output.write(prefixes[data_type]) debug_output.write(debug_data) if not debug_data.endswith(b"\n"): debug_output.write(b"\n") output = io.BytesIO() debug_output = io.BytesIO() cookies.update(request.cookies) handle = pycurl.Curl() handle.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTPS) handle.setopt(pycurl.TIMEOUT, timeout) handle.setopt(pycurl.URL, request.url.encode("utf-8")) handle.setopt(pycurl.WRITEFUNCTION, output.write) handle.setopt(pycurl.VERBOSE, 1) handle.setopt(pycurl.DEBUGFUNCTION, __debug_callback) handle.setopt(pycurl.SSL_VERIFYHOST, 0) handle.setopt(pycurl.SSL_VERIFYPEER, 0) handle.setopt(pycurl.NOSIGNAL, 1) # required for multi-threading handle.setopt(pycurl.HTTPHEADER, ["Expect: "]) if cookies: handle.setopt(pycurl.COOKIE, _dict_to_cookies(cookies).encode("utf-8")) if request.data: handle.setopt(pycurl.COPYPOSTFIELDS, request.data.encode("utf-8")) # add reference for request object and output bufers to handle, so later # we don't need to match these objects when they are returned from # pycurl after they've been processed # similar usage is in pycurl example: # https://github.com/pycurl/pycurl/blob/REL_7_19_0_3/examples/retriever-multi.py handle.request_obj = request handle.output_buffer = output handle.debug_buffer = debug_output return handle def _dict_to_cookies(cookies_dict): return ";".join( [ "{0}={1}".format(key, value) for key, value in sorted(cookies_dict.items()) ] ) pcs-0.10.11/pcs/common/pacemaker/000077500000000000000000000000001412706364600164555ustar00rootroot00000000000000pcs-0.10.11/pcs/common/pacemaker/__init__.py000066400000000000000000000000771412706364600205720ustar00rootroot00000000000000from . import ( nvset, resource, role, rule, ) pcs-0.10.11/pcs/common/pacemaker/nvset.py000066400000000000000000000011731412706364600201700ustar00rootroot00000000000000from dataclasses import dataclass from typing import ( Mapping, Optional, Sequence, ) from pcs.common.interface.dto import DataTransferObject from pcs.common.pacemaker.rule import CibRuleExpressionDto from pcs.common.types import CibNvsetType @dataclass(frozen=True) class CibNvpairDto(DataTransferObject): id: str # pylint: disable=invalid-name name: str value: str @dataclass(frozen=True) class CibNvsetDto(DataTransferObject): id: str # pylint: disable=invalid-name type: CibNvsetType options: Mapping[str, str] rule: Optional[CibRuleExpressionDto] nvpairs: Sequence[CibNvpairDto] pcs-0.10.11/pcs/common/pacemaker/resource/000077500000000000000000000000001412706364600203045ustar00rootroot00000000000000pcs-0.10.11/pcs/common/pacemaker/resource/__init__.py000066400000000000000000000000001412706364600224030ustar00rootroot00000000000000pcs-0.10.11/pcs/common/pacemaker/resource/relations.py000066400000000000000000000011041412706364600226520ustar00rootroot00000000000000from dataclasses import dataclass from typing import ( Any, Mapping, Sequence, ) from pcs.common.types import ResourceRelationType from pcs.common.interface.dto import DataTransferObject @dataclass(frozen=True) class RelationEntityDto(DataTransferObject): id: str # pylint: disable=invalid-name type: ResourceRelationType members: Sequence[str] metadata: Mapping[str, Any] @dataclass(frozen=True) class ResourceRelationDto(DataTransferObject): relation_entity: RelationEntityDto members: Sequence["ResourceRelationDto"] is_leaf: bool pcs-0.10.11/pcs/common/pacemaker/role.py000066400000000000000000000012311412706364600177650ustar00rootroot00000000000000from .. import const def get_value_for_cib( role: const.PcmkRoleType, is_latest_supported: bool ) -> const.PcmkRoleType: if is_latest_supported: return get_value_primary(role) if role in const.PCMK_ROLES_PROMOTED: return const.PCMK_ROLE_PROMOTED_LEGACY if role in const.PCMK_ROLES_UNPROMOTED: return const.PCMK_ROLE_UNPROMOTED_LEGACY return role def get_value_primary(role: const.PcmkRoleType) -> const.PcmkRoleType: if role in const.PCMK_ROLES_PROMOTED: return const.PCMK_ROLE_PROMOTED_PRIMARY if role in const.PCMK_ROLES_UNPROMOTED: return const.PCMK_ROLE_UNPROMOTED_PRIMARY return role pcs-0.10.11/pcs/common/pacemaker/rule.py000066400000000000000000000015211412706364600177750ustar00rootroot00000000000000from dataclasses import dataclass from typing import ( Mapping, Optional, Sequence, ) from pcs.common.interface.dto import DataTransferObject from pcs.common.types import ( CibRuleInEffectStatus, CibRuleExpressionType, ) @dataclass(frozen=True) class CibRuleDateCommonDto(DataTransferObject): id: str # pylint: disable=invalid-name options: Mapping[str, str] @dataclass(frozen=True) class CibRuleExpressionDto(DataTransferObject): # pylint: disable=too-many-instance-attributes id: str # pylint: disable=invalid-name type: CibRuleExpressionType in_effect: CibRuleInEffectStatus # only valid for type==rule options: Mapping[str, str] date_spec: Optional[CibRuleDateCommonDto] duration: Optional[CibRuleDateCommonDto] expressions: Sequence["CibRuleExpressionDto"] as_string: str pcs-0.10.11/pcs/common/pcs_pycurl.py000066400000000000000000000014531412706364600172650ustar00rootroot00000000000000import sys # pylint: disable=wildcard-import, unused-wildcard-import from pycurl import * # This package defines constants which are not present in some older versions # of pycurl but pcs needs to use them required_constants = { "PROTOCOLS": 181, "PROTO_HTTPS": 2, "E_OPERATION_TIMEDOUT": 28, # these are types of debug messages # see https://curl.haxx.se/libcurl/c/CURLOPT_DEBUGFUNCTION.html "DEBUG_TEXT": 0, "DEBUG_HEADER_IN": 1, "DEBUG_HEADER_OUT": 2, "DEBUG_DATA_IN": 3, "DEBUG_DATA_OUT": 4, "DEBUG_SSL_DATA_IN": 5, "DEBUG_SSL_DATA_OUT": 6, "DEBUG_END": 7, } __current_module = sys.modules[__name__] for constant, value in required_constants.items(): if not hasattr(__current_module, constant): setattr(__current_module, constant, value) pcs-0.10.11/pcs/common/reports/000077500000000000000000000000001412706364600162235ustar00rootroot00000000000000pcs-0.10.11/pcs/common/reports/__init__.py000066400000000000000000000005211412706364600203320ustar00rootroot00000000000000from . import ( codes, const, item, messages, types, ) from .item import ( get_severity, ReportItem, ReportItemList, ReportItemSeverity, ReportItemContext, ) from .dto import ReportItemDto from .processor import ( has_errors, ReportProcessor, ) from .conversions import report_dto_to_item pcs-0.10.11/pcs/common/reports/codes.py000066400000000000000000000631111412706364600176740ustar00rootroot00000000000000from .types import ( ForceCode as F, MessageCode as M, ) # force categories FORCE = F("FORCE") SKIP_OFFLINE_NODES = F("SKIP_OFFLINE_NODES") # messages ADD_REMOVE_ITEMS_NOT_SPECIFIED = M("ADD_REMOVE_ITEMS_NOT_SPECIFIED") ADD_REMOVE_ITEMS_DUPLICATION = M("ADD_REMOVE_ITEMS_DUPLICATION") ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER = M( "ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER" ) ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER = M( "ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER" ) ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME = M( "ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME" ) ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER = M( "ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER" ) ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER = M( "ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER" ) ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF = M( "ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF" ) ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD = M( "ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD" ) AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE = M("AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE") AGENT_NAME_GUESS_FOUND_NONE = M("AGENT_NAME_GUESS_FOUND_NONE") AGENT_NAME_GUESSED = M("AGENT_NAME_GUESSED") BAD_CLUSTER_STATE_FORMAT = M("BAD_CLUSTER_STATE_FORMAT") BOOTH_ADDRESS_DUPLICATION = M("BOOTH_ADDRESS_DUPLICATION") BOOTH_ALREADY_IN_CIB = M("BOOTH_ALREADY_IN_CIB") BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP = M("BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP") BOOTH_CONFIG_ACCEPTED_BY_NODE = M("BOOTH_CONFIG_ACCEPTED_BY_NODE") BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR = M("BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR") BOOTH_CONFIG_DISTRIBUTION_STARTED = M("BOOTH_CONFIG_DISTRIBUTION_STARTED") BOOTH_CONFIG_IS_USED = M("BOOTH_CONFIG_IS_USED") BOOTH_CONFIG_UNEXPECTED_LINES = M("BOOTH_CONFIG_UNEXPECTED_LINES") BOOTH_DAEMON_STATUS_ERROR = M("BOOTH_DAEMON_STATUS_ERROR") BOOTH_EVEN_PEERS_NUM = M("BOOTH_EVEN_PEERS_NUM") BOOTH_FETCHING_CONFIG_FROM_NODE = M("BOOTH_FETCHING_CONFIG_FROM_NODE") BOOTH_INVALID_NAME = M("BOOTH_INVALID_NAME") BOOTH_LACK_OF_SITES = M("BOOTH_LACK_OF_SITES") BOOTH_MULTIPLE_TIMES_IN_CIB = M("BOOTH_MULTIPLE_TIMES_IN_CIB") BOOTH_NOT_EXISTS_IN_CIB = M("BOOTH_NOT_EXISTS_IN_CIB") BOOTH_PEERS_STATUS_ERROR = M("BOOTH_PEERS_STATUS_ERROR") BOOTH_TICKET_DOES_NOT_EXIST = M("BOOTH_TICKET_DOES_NOT_EXIST") BOOTH_TICKET_DUPLICATE = M("BOOTH_TICKET_DUPLICATE") BOOTH_TICKET_NAME_INVALID = M("BOOTH_TICKET_NAME_INVALID") BOOTH_TICKET_OPERATION_FAILED = M("BOOTH_TICKET_OPERATION_FAILED") BOOTH_TICKET_STATUS_ERROR = M("BOOTH_TICKET_STATUS_ERROR") BOOTH_UNSUPPORTED_FILE_LOCATION = M("BOOTH_UNSUPPORTED_FILE_LOCATION") CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE = M( "CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE" ) CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED = M( "CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED" ) # TODO: remove, use ADD_REMOVE reports CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP = M( "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP" ) # TODO: remove, use ADD_REMOVE reports CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP = M( "CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP" ) # TODO: remove, use ADD_REMOVE reports CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP = M( "CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP" ) # TODO: remove, use ADD_REMOVE reports CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE = M("CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE") # TODO: remove, use ADD_REMOVE reports CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF = M("CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF") # TODO: remove, use ADD_REMOVE reports CANNOT_GROUP_RESOURCE_NO_RESOURCES = M("CANNOT_GROUP_RESOURCE_NO_RESOURCES") CANNOT_GROUP_RESOURCE_WRONG_TYPE = M("CANNOT_GROUP_RESOURCE_WRONG_TYPE") CANNOT_LEAVE_GROUP_EMPTY_AFTER_MOVE = M("CANNOT_LEAVE_GROUP_EMPTY_AFTER_MOVE") CANNOT_MOVE_RESOURCE_BUNDLE = M("CANNOT_MOVE_RESOURCE_BUNDLE") CANNOT_MOVE_RESOURCE_CLONE = M("CANNOT_MOVE_RESOURCE_CLONE") CANNOT_MOVE_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE = M( "CANNOT_MOVE_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE" ) CANNOT_MOVE_RESOURCE_PROMOTABLE_INNER = M( "CANNOT_MOVE_RESOURCE_PROMOTABLE_INNER" ) CANNOT_MOVE_RESOURCE_STOPPED_NO_NODE_SPECIFIED = M( "CANNOT_MOVE_RESOURCE_STOPPED_NO_NODE_SPECIFIED" ) CANNOT_REMOVE_ALL_CLUSTER_NODES = M("CANNOT_REMOVE_ALL_CLUSTER_NODES") CANNOT_UNMOVE_UNBAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE = M( "CANNOT_UNMOVE_UNBAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE" ) CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET = M( "CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET" ) CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET = M( "CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET" ) CIB_ACL_TARGET_ALREADY_EXISTS = M("CIB_ACL_TARGET_ALREADY_EXISTS") CIB_ALERT_RECIPIENT_ALREADY_EXISTS = M("CIB_ALERT_RECIPIENT_ALREADY_EXISTS") CIB_ALERT_RECIPIENT_VALUE_INVALID = M("CIB_ALERT_RECIPIENT_VALUE_INVALID") CIB_CANNOT_FIND_MANDATORY_SECTION = M("CIB_CANNOT_FIND_MANDATORY_SECTION") CIB_DIFF_ERROR = M("CIB_DIFF_ERROR") CIB_FENCING_LEVEL_ALREADY_EXISTS = M("CIB_FENCING_LEVEL_ALREADY_EXISTS") CIB_FENCING_LEVEL_DOES_NOT_EXIST = M("CIB_FENCING_LEVEL_DOES_NOT_EXIST") CIB_LOAD_ERROR_BAD_FORMAT = M("CIB_LOAD_ERROR_BAD_FORMAT") CIB_LOAD_ERROR = M("CIB_LOAD_ERROR") CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION = M( "CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION" ) CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID = M("CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID") CIB_LOAD_ERROR_SCOPE_MISSING = M("CIB_LOAD_ERROR_SCOPE_MISSING") CIB_PUSH_ERROR = M("CIB_PUSH_ERROR") CIB_SAVE_TMP_ERROR = M("CIB_SAVE_TMP_ERROR") CIB_SIMULATE_ERROR = M("CIB_SIMULATE_ERROR") CIB_UPGRADE_FAILED = M("CIB_UPGRADE_FAILED") CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION = M( "CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION" ) CIB_UPGRADE_SUCCESSFUL = M("CIB_UPGRADE_SUCCESSFUL") CLUSTER_DESTROY_STARTED = M("CLUSTER_DESTROY_STARTED") CLUSTER_DESTROY_SUCCESS = M("CLUSTER_DESTROY_SUCCESS") CLUSTER_ENABLE_STARTED = M("CLUSTER_ENABLE_STARTED") CLUSTER_ENABLE_SUCCESS = M("CLUSTER_ENABLE_SUCCESS") CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES = M( "CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES" ) CLUSTER_SETUP_SUCCESS = M("CLUSTER_SETUP_SUCCESS") CLUSTER_START_STARTED = M("CLUSTER_START_STARTED") CLUSTER_START_SUCCESS = M("CLUSTER_START_SUCCESS") CLUSTER_WILL_BE_DESTROYED = M("CLUSTER_WILL_BE_DESTROYED") LIVE_ENVIRONMENT_NOT_CONSISTENT = M("LIVE_ENVIRONMENT_NOT_CONSISTENT") LIVE_ENVIRONMENT_REQUIRED = M("LIVE_ENVIRONMENT_REQUIRED") LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE = M( "LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE" ) COROSYNC_ADDRESS_IP_VERSION_WRONG_FOR_LINK = M( "COROSYNC_ADDRESS_IP_VERSION_WRONG_FOR_LINK" ) COROSYNC_AUTHKEY_WRONG_LENGTH = M("COROSYNC_AUTHKEY_WRONG_LENGTH") COROSYNC_BAD_NODE_ADDRESSES_COUNT = M("COROSYNC_BAD_NODE_ADDRESSES_COUNT") COROSYNC_CLUSTER_NAME_INVALID_FOR_GFS2 = M( "COROSYNC_CLUSTER_NAME_INVALID_FOR_GFS2" ) COROSYNC_CONFIG_ACCEPTED_BY_NODE = M("COROSYNC_CONFIG_ACCEPTED_BY_NODE") COROSYNC_CONFIG_CANNOT_SAVE_INVALID_NAMES_VALUES = M( "COROSYNC_CONFIG_CANNOT_SAVE_INVALID_NAMES_VALUES" ) COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR = M( "COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR" ) COROSYNC_CONFIG_DISTRIBUTION_STARTED = M("COROSYNC_CONFIG_DISTRIBUTION_STARTED") COROSYNC_CONFIG_MISSING_NAMES_OF_NODES = M( "COROSYNC_CONFIG_MISSING_NAMES_OF_NODES" ) COROSYNC_CONFIG_NO_NODES_DEFINED = M("COROSYNC_CONFIG_NO_NODES_DEFINED") COROSYNC_CONFIG_RELOADED = M("COROSYNC_CONFIG_RELOADED") COROSYNC_CONFIG_RELOAD_ERROR = M("COROSYNC_CONFIG_RELOAD_ERROR") COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE = M("COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE") COROSYNC_CONFIG_UNSUPPORTED_TRANSPORT = M( "COROSYNC_CONFIG_UNSUPPORTED_TRANSPORT" ) COROSYNC_IP_VERSION_MISMATCH_IN_LINKS = M( "COROSYNC_IP_VERSION_MISMATCH_IN_LINKS" ) COROSYNC_CANNOT_ADD_REMOVE_LINKS_BAD_TRANSPORT = M( "COROSYNC_CANNOT_ADD_REMOVE_LINKS_BAD_TRANSPORT" ) COROSYNC_CANNOT_ADD_REMOVE_LINKS_NO_LINKS_SPECIFIED = M( "COROSYNC_CANNOT_ADD_REMOVE_LINKS_NO_LINKS_SPECIFIED" ) COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS = M( "COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS" ) COROSYNC_LINK_ALREADY_EXISTS_CANNOT_ADD = M( "COROSYNC_LINK_ALREADY_EXISTS_CANNOT_ADD" ) COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_REMOVE = M( "COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_REMOVE" ) COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_UPDATE = M( "COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_UPDATE" ) COROSYNC_LINK_NUMBER_DUPLICATION = M("COROSYNC_LINK_NUMBER_DUPLICATION") COROSYNC_NODE_ADDRESS_COUNT_MISMATCH = M("COROSYNC_NODE_ADDRESS_COUNT_MISMATCH") COROSYNC_NODE_CONFLICT_CHECK_SKIPPED = M("COROSYNC_NODE_CONFLICT_CHECK_SKIPPED") COROSYNC_NODES_MISSING = M("COROSYNC_NODES_MISSING") COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = M( "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR" ) COROSYNC_NOT_RUNNING_CHECK_STARTED = M("COROSYNC_NOT_RUNNING_CHECK_STARTED") COROSYNC_NOT_RUNNING_ON_NODE = M("COROSYNC_NOT_RUNNING_ON_NODE") COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = M( "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE" ) COROSYNC_QUORUM_ATB_CANNOT_BE_DISABLED_DUE_TO_SBD = M( "COROSYNC_QUORUM_ATB_CANNOT_BE_DISABLED_DUE_TO_SBD" ) COROSYNC_QUORUM_ATB_WILL_BE_ENABLED_DUE_TO_SBD = M( "COROSYNC_QUORUM_ATB_WILL_BE_ENABLED_DUE_TO_SBD" ) COROSYNC_QUORUM_GET_STATUS_ERROR = M("COROSYNC_QUORUM_GET_STATUS_ERROR") COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC = M( "COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC" ) COROSYNC_QUORUM_LOSS_UNABLE_TO_CHECK = M("COROSYNC_QUORUM_LOSS_UNABLE_TO_CHECK") COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = M( "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR" ) COROSYNC_QUORUM_WILL_BE_LOST = M("COROSYNC_QUORUM_WILL_BE_LOST") COROSYNC_RUNNING_ON_NODE = M("COROSYNC_RUNNING_ON_NODE") COROSYNC_TOO_MANY_LINKS_OPTIONS = M("COROSYNC_TOO_MANY_LINKS_OPTIONS") COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS = M( "COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS" ) CRM_MON_ERROR = M("CRM_MON_ERROR") DEFAULTS_CAN_BE_OVERRIDEN = M("DEFAULTS_CAN_BE_OVERRIDEN") DEPRECATED_OPTION = M("DEPRECATED_OPTION") DR_CONFIG_ALREADY_EXIST = M("DR_CONFIG_ALREADY_EXIST") DR_CONFIG_DOES_NOT_EXIST = M("DR_CONFIG_DOES_NOT_EXIST") DUPLICATE_CONSTRAINTS_EXIST = M("DUPLICATE_CONSTRAINTS_EXIST") DUPLICATE_CONSTRAINTS_LIST = M("DUPLICATE_CONSTRAINTS_LIST") EMPTY_RESOURCE_SET_LIST = M("EMPTY_RESOURCE_SET_LIST") FENCE_HISTORY_COMMAND_ERROR = M("FENCE_HISTORY_COMMAND_ERROR") FENCE_HISTORY_NOT_SUPPORTED = M("FENCE_HISTORY_NOT_SUPPORTED") FILES_DISTRIBUTION_SKIPPED = M("FILES_DISTRIBUTION_SKIPPED") FILES_DISTRIBUTION_STARTED = M("FILES_DISTRIBUTION_STARTED") FILES_REMOVE_FROM_NODES_STARTED = M("FILES_REMOVE_FROM_NODES_STARTED") FILES_REMOVE_FROM_NODES_SKIPPED = M("FILES_REMOVE_FROM_NODES_SKIPPED") FILE_ALREADY_EXISTS = M("FILE_ALREADY_EXISTS") FILE_DISTRIBUTION_ERROR = M("FILE_DISTRIBUTION_ERROR") FILE_DISTRIBUTION_SUCCESS = M("FILE_DISTRIBUTION_SUCCESS") FILE_IO_ERROR = M("FILE_IO_ERROR") FILE_REMOVE_FROM_NODE_ERROR = M("FILE_REMOVE_FROM_NODE_ERROR") FILE_REMOVE_FROM_NODE_SUCCESS = M("FILE_REMOVE_FROM_NODE_SUCCESS") HOST_NOT_FOUND = M("HOST_NOT_FOUND") HOST_ALREADY_AUTHORIZED = M("HOST_ALREADY_AUTHORIZED") HOST_ALREADY_IN_CLUSTER_CONFIG = M("HOST_ALREADY_IN_CLUSTER_CONFIG") HOST_ALREADY_IN_CLUSTER_SERVICES = M("HOST_ALREADY_IN_CLUSTER_SERVICES") ID_ALREADY_EXISTS = M("ID_ALREADY_EXISTS") ID_BELONGS_TO_UNEXPECTED_TYPE = M("ID_BELONGS_TO_UNEXPECTED_TYPE") ID_NOT_FOUND = M("ID_NOT_FOUND") INVALID_CIB_CONTENT = M("INVALID_CIB_CONTENT") INVALID_ID_BAD_CHAR = M("INVALID_ID_BAD_CHAR") INVALID_ID_IS_EMPTY = M("INVALID_ID_IS_EMPTY") INVALID_OPTIONS = M("INVALID_OPTIONS") INVALID_USERDEFINED_OPTIONS = M("INVALID_USERDEFINED_OPTIONS") INVALID_OPTION_TYPE = M("INVALID_OPTION_TYPE") INVALID_OPTION_VALUE = M("INVALID_OPTION_VALUE") INVALID_RESOURCE_AGENT_NAME = M("INVALID_RESOURCE_AGENT_NAME") INVALID_RESPONSE_FORMAT = M("INVALID_RESPONSE_FORMAT") INVALID_SCORE = M("INVALID_SCORE") INVALID_STONITH_AGENT_NAME = M("INVALID_STONITH_AGENT_NAME") INVALID_TIMEOUT_VALUE = M("INVALID_TIMEOUT_VALUE") MULTIPLE_SCORE_OPTIONS = M("MULTIPLE_SCORE_OPTIONS") MULTIPLE_RESULTS_FOUND = M("MULTIPLE_RESULTS_FOUND") MUTUALLY_EXCLUSIVE_OPTIONS = M("MUTUALLY_EXCLUSIVE_OPTIONS") NODE_ADDRESSES_ALREADY_EXIST = M("NODE_ADDRESSES_ALREADY_EXIST") NODE_ADDRESSES_CANNOT_BE_EMPTY = M("NODE_ADDRESSES_CANNOT_BE_EMPTY") NODE_ADDRESSES_DUPLICATION = M("NODE_ADDRESSES_DUPLICATION") NODE_ADDRESSES_UNRESOLVABLE = M("NODE_ADDRESSES_UNRESOLVABLE") NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL = M( "NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL" ) NODE_COMMUNICATION_DEBUG_INFO = M("NODE_COMMUNICATION_DEBUG_INFO") NODE_COMMUNICATION_ERROR = M("NODE_COMMUNICATION_ERROR") NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = M( "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED" ) NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = M( "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED" ) NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = M( "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT" ) NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = M( "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND" ) NODE_COMMUNICATION_ERROR_TIMED_OUT = M("NODE_COMMUNICATION_ERROR_TIMED_OUT") NODE_COMMUNICATION_FINISHED = M("NODE_COMMUNICATION_FINISHED") NODE_COMMUNICATION_NOT_CONNECTED = M("NODE_COMMUNICATION_NOT_CONNECTED") NODE_COMMUNICATION_NO_MORE_ADDRESSES = M("NODE_COMMUNICATION_NO_MORE_ADDRESSES") NODE_COMMUNICATION_PROXY_IS_SET = M("NODE_COMMUNICATION_PROXY_IS_SET") NODE_COMMUNICATION_RETRYING = M("NODE_COMMUNICATION_RETRYING") NODE_COMMUNICATION_STARTED = M("NODE_COMMUNICATION_STARTED") NODE_NAMES_ALREADY_EXIST = M("NODE_NAMES_ALREADY_EXIST") NODE_NAMES_DUPLICATION = M("NODE_NAMES_DUPLICATION") NODE_NOT_FOUND = M("NODE_NOT_FOUND") NODE_REMOVE_IN_PACEMAKER_FAILED = M("NODE_REMOVE_IN_PACEMAKER_FAILED") NONE_HOST_FOUND = M("NONE_HOST_FOUND") NODE_USED_AS_TIE_BREAKER = M("NODE_USED_AS_TIE_BREAKER") NODES_TO_REMOVE_UNREACHABLE = M("NODES_TO_REMOVE_UNREACHABLE") NODE_TO_CLEAR_IS_STILL_IN_CLUSTER = M("NODE_TO_CLEAR_IS_STILL_IN_CLUSTER") NODE_IN_LOCAL_CLUSTER = M("NODE_IN_LOCAL_CLUSTER") OMITTING_NODE = M("OMITTING_NODE") OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT = M("OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT") PACEMAKER_SIMULATION_RESULT = M("PACEMAKER_SIMULATION_RESULT") PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = M("PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND") PARSE_ERROR_COROSYNC_CONF = M("PARSE_ERROR_COROSYNC_CONF") PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_AFTER_OPENING_BRACE = M( "PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_AFTER_OPENING_BRACE" ) PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_BEFORE_OR_AFTER_CLOSING_BRACE = M( "PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_BEFORE_OR_AFTER_CLOSING_BRACE" ) PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE = M( "PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE" ) PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = M( "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE" ) PARSE_ERROR_COROSYNC_CONF_MISSING_SECTION_NAME_BEFORE_OPENING_BRACE = M( "PARSE_ERROR_COROSYNC_CONF_MISSING_SECTION_NAME_BEFORE_OPENING_BRACE" ) PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = M( "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE" ) PARSE_ERROR_JSON_FILE = M("PARSE_ERROR_JSON_FILE") PCSD_VERSION_TOO_OLD = M("PCSD_VERSION_TOO_OLD") PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED = M( "PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED" ) PCSD_SSL_CERT_AND_KEY_SET_SUCCESS = M("PCSD_SSL_CERT_AND_KEY_SET_SUCCESS") PREREQUISITE_OPTION_MUST_BE_ENABLED_AS_WELL = M( "PREREQUISITE_OPTION_MUST_BE_ENABLED_AS_WELL" ) PREREQUISITE_OPTION_MUST_BE_DISABLED = M("PREREQUISITE_OPTION_MUST_BE_DISABLED") PREREQUISITE_OPTION_MUST_NOT_BE_SET = M("PREREQUISITE_OPTION_MUST_NOT_BE_SET") PREREQUISITE_OPTION_IS_MISSING = M("PREREQUISITE_OPTION_IS_MISSING") QDEVICE_ALREADY_DEFINED = M("QDEVICE_ALREADY_DEFINED") QDEVICE_ALREADY_INITIALIZED = M("QDEVICE_ALREADY_INITIALIZED") QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE = M("QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE") QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED = M( "QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED" ) QDEVICE_CERTIFICATE_REMOVAL_STARTED = M("QDEVICE_CERTIFICATE_REMOVAL_STARTED") QDEVICE_CERTIFICATE_REMOVED_FROM_NODE = M( "QDEVICE_CERTIFICATE_REMOVED_FROM_NODE" ) QDEVICE_CERTIFICATE_IMPORT_ERROR = M("QDEVICE_CERTIFICATE_IMPORT_ERROR") QDEVICE_CERTIFICATE_SIGN_ERROR = M("QDEVICE_CERTIFICATE_SIGN_ERROR") QDEVICE_DESTROY_ERROR = M("QDEVICE_DESTROY_ERROR") QDEVICE_DESTROY_SUCCESS = M("QDEVICE_DESTROY_SUCCESS") QDEVICE_GET_STATUS_ERROR = M("QDEVICE_GET_STATUS_ERROR") QDEVICE_INITIALIZATION_ERROR = M("QDEVICE_INITIALIZATION_ERROR") QDEVICE_INITIALIZATION_SUCCESS = M("QDEVICE_INITIALIZATION_SUCCESS") QDEVICE_NOT_DEFINED = M("QDEVICE_NOT_DEFINED") QDEVICE_NOT_INITIALIZED = M("QDEVICE_NOT_INITIALIZED") QDEVICE_NOT_RUNNING = M("QDEVICE_NOT_RUNNING") QDEVICE_CLIENT_RELOAD_STARTED = M("QDEVICE_CLIENT_RELOAD_STARTED") QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = M( "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED" ) QDEVICE_USED_BY_CLUSTERS = M("QDEVICE_USED_BY_CLUSTERS") REQUIRED_OPTIONS_ARE_MISSING = M("REQUIRED_OPTIONS_ARE_MISSING") REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING = M( "REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING" ) RESOURCE_BAN_PCMK_ERROR = M("RESOURCE_BAN_PCMK_ERROR") RESOURCE_BAN_PCMK_SUCCESS = M("RESOURCE_BAN_PCMK_SUCCESS") RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE = M( "RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE" ) RESOURCE_BUNDLE_UNSUPPORTED_CONTAINER_TYPE = M( "RESOURCE_BUNDLE_UNSUPPORTED_CONTAINER_TYPE" ) RESOURCE_CLEANUP_ERROR = M("RESOURCE_CLEANUP_ERROR") RESOURCE_DOES_NOT_RUN = M("RESOURCE_DOES_NOT_RUN") RESOURCE_DISABLE_AFFECTS_OTHER_RESOURCES = M( "RESOURCE_DISABLE_AFFECTS_OTHER_RESOURCES" ) RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE = M( "RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE" ) RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE = M("RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE") RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE = M( "RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE" ) RESOURCE_IS_GUEST_NODE_ALREADY = M("RESOURCE_IS_GUEST_NODE_ALREADY") RESOURCE_IS_UNMANAGED = M("RESOURCE_IS_UNMANAGED") RESOURCE_MANAGED_NO_MONITOR_ENABLED = M("RESOURCE_MANAGED_NO_MONITOR_ENABLED") RESOURCE_MOVE_PCMK_ERROR = M("RESOURCE_MOVE_PCMK_ERROR") RESOURCE_MOVE_PCMK_SUCCESS = M("RESOURCE_MOVE_PCMK_SUCCESS") RESOURCE_OPERATION_INTERVAL_ADAPTED = M("RESOURCE_OPERATION_INTERVAL_ADAPTED") RESOURCE_OPERATION_INTERVAL_DUPLICATION = M( "RESOURCE_OPERATION_INTERVAL_DUPLICATION" ) RESOURCE_REFRESH_ERROR = M("RESOURCE_REFRESH_ERROR") RESOURCE_REFRESH_TOO_TIME_CONSUMING = M("RESOURCE_REFRESH_TOO_TIME_CONSUMING") RESOURCE_RUNNING_ON_NODES = M("RESOURCE_RUNNING_ON_NODES") RESOURCE_UNMOVE_UNBAN_PCMK_ERROR = M("RESOURCE_UNMOVE_UNBAN_PCMK_ERROR") RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS = M("RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS") RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED = M( "RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED" ) RESOURCE_MOVE_CONSTRAINT_CREATED = M("RESOURCE_MOVE_CONSTRAINT_CREATED") RESOURCE_MOVE_CONSTRAINT_REMOVED = M("RESOURCE_MOVE_CONSTRAINT_REMOVED") RESOURCE_MOVE_AFFECTS_OTRHER_RESOURCES = M( "RESOURCE_MOVE_AFFECTS_OTRHER_RESOURCES" ) RESOURCE_MOVE_AUTOCLEAN_SIMULATION_FAILURE = M( "RESOURCE_MOVE_AUTOCLEAN_SIMULATION_FAILURE" ) RULE_IN_EFFECT_STATUS_DETECTION_NOT_SUPPORTED = M( "RULE_IN_EFFECT_STATUS_DETECTION_NOT_SUPPORTED" ) RULE_EXPRESSION_NOT_ALLOWED = M("RULE_EXPRESSION_NOT_ALLOWED") RULE_EXPRESSION_OPTIONS_DUPLICATION = M("RULE_EXPRESSION_OPTIONS_DUPLICATION") RULE_EXPRESSION_PARSE_ERROR = M("RULE_EXPRESSION_PARSE_ERROR") RULE_EXPRESSION_SINCE_GREATER_THAN_UNTIL = M( "RULE_EXPRESSION_SINCE_GREATER_THAN_UNTIL" ) RUN_EXTERNAL_PROCESS_ERROR = M("RUN_EXTERNAL_PROCESS_ERROR") RUN_EXTERNAL_PROCESS_FINISHED = M("RUN_EXTERNAL_PROCESS_FINISHED") RUN_EXTERNAL_PROCESS_STARTED = M("RUN_EXTERNAL_PROCESS_STARTED") SBD_CHECK_STARTED = M("SBD_CHECK_STARTED") SBD_CHECK_SUCCESS = M("SBD_CHECK_SUCCESS") SBD_CONFIG_ACCEPTED_BY_NODE = M("SBD_CONFIG_ACCEPTED_BY_NODE") SBD_CONFIG_DISTRIBUTION_STARTED = M("SBD_CONFIG_DISTRIBUTION_STARTED") SBD_DEVICE_DOES_NOT_EXIST = M("SBD_DEVICE_DOES_NOT_EXIST") SBD_DEVICE_DUMP_ERROR = M("SBD_DEVICE_DUMP_ERROR") SBD_DEVICE_INITIALIZATION_ERROR = M("SBD_DEVICE_INITIALIZATION_ERROR") SBD_DEVICE_INITIALIZATION_STARTED = M("SBD_DEVICE_INITIALIZATION_STARTED") SBD_DEVICE_INITIALIZATION_SUCCESS = M("SBD_DEVICE_INITIALIZATION_SUCCESS") SBD_DEVICE_IS_NOT_BLOCK_DEVICE = M("SBD_DEVICE_IS_NOT_BLOCK_DEVICE") SBD_DEVICE_LIST_ERROR = M("SBD_DEVICE_LIST_ERROR") SBD_DEVICE_MESSAGE_ERROR = M("SBD_DEVICE_MESSAGE_ERROR") SBD_DEVICE_PATH_NOT_ABSOLUTE = M("SBD_DEVICE_PATH_NOT_ABSOLUTE") SBD_LIST_WATCHDOG_ERROR = M("SBD_LIST_WATCHDOG_ERROR") SBD_NO_DEVICE_FOR_NODE = M("SBD_NO_DEVICE_FOR_NODE") SBD_NOT_INSTALLED = M("SBD_NOT_INSTALLED") SBD_NOT_USED_CANNOT_SET_SBD_OPTIONS = M("SBD_NOT_USED_CANNOT_SET_SBD_OPTIONS") SBD_TOO_MANY_DEVICES_FOR_NODE = M("SBD_TOO_MANY_DEVICES_FOR_NODE") SBD_WITH_DEVICES_NOT_USED_CANNOT_SET_DEVICE = M( "SBD_WITH_DEVICES_NOT_USED_CANNOT_SET_DEVICE" ) SBD_WATCHDOG_NOT_SUPPORTED = M("SBD_WATCHDOG_NOT_SUPPORTED") SBD_WATCHDOG_VALIDATION_INACTIVE = M("SBD_WATCHDOG_VALIDATION_INACTIVE") SBD_WATCHDOG_TEST_ERROR = M("SBD_WATCHDOG_TEST_ERROR") SBD_WATCHDOG_TEST_MULTIPLE_DEVICES = M("SBD_WATCHDOG_TEST_MULTIPLE_DEVICES") SBD_WATCHDOG_TEST_FAILED = M("SBD_WATCHDOG_TEST_FAILED") SERVICE_ACTION_STARTED = M("SERVICE_ACTION_STARTED") SERVICE_ACTION_FAILED = M("SERVICE_ACTION_FAILED") SERVICE_ACTION_SUCCEEDED = M("SERVICE_ACTION_SUCCEEDED") SERVICE_ACTION_SKIPPED = M("SERVICE_ACTION_SKIPPED") SERVICE_NOT_INSTALLED = M("SERVICE_NOT_INSTALLED") SERVICE_VERSION_MISMATCH = M("SERVICE_VERSION_MISMATCH") UNABLE_TO_GET_RESOURCE_OPERATION_DIGESTS = M( "UNABLE_TO_GET_RESOURCE_OPERATION_DIGESTS" ) STONITH_RESOURCES_DO_NOT_EXIST = M("STONITH_RESOURCES_DO_NOT_EXIST") STONITH_RESTARTLESS_UPDATE_OF_SCSI_DEVICES_NOT_SUPPORTED = M( "STONITH_RESTARTLESS_UPDATE_OF_SCSI_DEVICES_NOT_SUPPORTED" ) STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT = M( "STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT" ) STONITH_UNFENCING_FAILED = M("STONITH_UNFENCING_FAILED") STONITH_UNFENCING_DEVICE_STATUS_FAILED = M( "STONITH_UNFENCING_DEVICE_STATUS_FAILED" ) STONITH_UNFENCING_SKIPPED_DEVICES_FENCED = M( "STONITH_UNFENCING_SKIPPED_DEVICES_FENCED" ) STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM = M( "STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM" ) SERVICE_COMMANDS_ON_NODES_STARTED = M("SERVICE_COMMANDS_ON_NODES_STARTED") SERVICE_COMMANDS_ON_NODES_SKIPPED = M("SERVICE_COMMANDS_ON_NODES_SKIPPED") SERVICE_COMMAND_ON_NODE_ERROR = M("SERVICE_COMMAND_ON_NODE_ERROR") SERVICE_COMMAND_ON_NODE_SUCCESS = M("SERVICE_COMMAND_ON_NODE_SUCCESS") SERVICE_UNABLE_TO_DETECT_INIT_SYSTEM = M("SERVICE_UNABLE_TO_DETECT_INIT_SYSTEM") SYSTEM_WILL_RESET = M("SYSTEM_WILL_RESET") # TODO: remove, use ADD_REMOVE reports TAG_ADD_REMOVE_IDS_DUPLICATION = M("TAG_ADD_REMOVE_IDS_DUPLICATION") # TODO: remove, use ADD_REMOVE reports TAG_ADJACENT_REFERENCE_ID_NOT_IN_THE_TAG = M( "TAG_ADJACENT_REFERENCE_ID_NOT_IN_THE_TAG" ) # TODO: remove, use ADD_REMOVE reports TAG_CANNOT_ADD_AND_REMOVE_IDS_AT_THE_SAME_TIME = M( "TAG_CANNOT_ADD_AND_REMOVE_IDS_AT_THE_SAME_TIME" ) # TODO: remove, use ADD_REMOVE reports TAG_CANNOT_ADD_REFERENCE_IDS_ALREADY_IN_THE_TAG = M( "TAG_CANNOT_ADD_REFERENCE_IDS_ALREADY_IN_THE_TAG" ) TAG_CANNOT_CONTAIN_ITSELF = M("TAG_CANNOT_CONTAIN_ITSELF") TAG_CANNOT_CREATE_EMPTY_TAG_NO_IDS_SPECIFIED = M( "TAG_CANNOT_CREATE_EMPTY_TAG_NO_IDS_SPECIFIED" ) # TODO: remove, use ADD_REMOVE reports TAG_CANNOT_PUT_ID_NEXT_TO_ITSELF = M("TAG_CANNOT_PUT_ID_NEXT_TO_ITSELF") # TODO: remove, use ADD_REMOVE reports TAG_CANNOT_REMOVE_ADJACENT_ID = M("TAG_CANNOT_REMOVE_ADJACENT_ID") # TODO: remove, use ADD_REMOVE reports TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG = M( "TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG" ) TAG_CANNOT_REMOVE_TAG_REFERENCED_IN_CONSTRAINTS = M( "TAG_CANNOT_REMOVE_TAG_REFERENCED_IN_CONSTRAINTS" ) TAG_CANNOT_REMOVE_TAGS_NO_TAGS_SPECIFIED = M( "TAG_CANNOT_REMOVE_TAGS_NO_TAGS_SPECIFIED" ) # TODO: remove, use ADD_REMOVE reports TAG_CANNOT_SPECIFY_ADJACENT_ID_WITHOUT_IDS_TO_ADD = M( "TAG_CANNOT_SPECIFY_ADJACENT_ID_WITHOUT_IDS_TO_ADD" ) # TODO: remove, use ADD_REMOVE reports TAG_CANNOT_UPDATE_TAG_NO_IDS_SPECIFIED = M( "TAG_CANNOT_UPDATE_TAG_NO_IDS_SPECIFIED" ) # TODO: remove, use ADD_REMOVE reports TAG_IDS_NOT_IN_THE_TAG = M("TAG_IDS_NOT_IN_THE_TAG") TMP_FILE_WRITE = M("TMP_FILE_WRITE") UNABLE_TO_CONNECT_TO_ANY_REMAINING_NODE = M( "UNABLE_TO_CONNECT_TO_ANY_REMAINING_NODE" ) UNABLE_TO_CONNECT_TO_ALL_REMAINING_NODE = M( "UNABLE_TO_CONNECT_TO_ALL_REMAINING_NODE" ) UNABLE_TO_DETERMINE_USER_UID = M("UNABLE_TO_DETERMINE_USER_UID") UNABLE_TO_DETERMINE_GROUP_GID = M("UNABLE_TO_DETERMINE_GROUP_GID") UNABLE_TO_GET_AGENT_METADATA = M("UNABLE_TO_GET_AGENT_METADATA") UNABLE_TO_GET_SBD_CONFIG = M("UNABLE_TO_GET_SBD_CONFIG") UNABLE_TO_GET_SBD_STATUS = M("UNABLE_TO_GET_SBD_STATUS") UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE = M( "UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE" ) WATCHDOG_INVALID = M("WATCHDOG_INVALID") UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = M( "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS" ) USE_COMMAND_NODE_ADD_REMOTE = M("USE_COMMAND_NODE_ADD_REMOTE") USE_COMMAND_NODE_ADD_GUEST = M("USE_COMMAND_NODE_ADD_GUEST") USE_COMMAND_NODE_REMOVE_GUEST = M("USE_COMMAND_NODE_REMOVE_GUEST") USING_DEFAULT_ADDRESS_FOR_HOST = M("USING_DEFAULT_ADDRESS_FOR_HOST") USING_DEFAULT_WATCHDOG = M("USING_DEFAULT_WATCHDOG") WAIT_FOR_IDLE_STARTED = M("WAIT_FOR_IDLE_STARTED") WAIT_FOR_IDLE_ERROR = M("WAIT_FOR_IDLE_ERROR") WAIT_FOR_IDLE_NOT_LIVE_CLUSTER = M("WAIT_FOR_IDLE_NOT_LIVE_CLUSTER") WAIT_FOR_IDLE_TIMED_OUT = M("WAIT_FOR_IDLE_TIMED_OUT") WAIT_FOR_NODE_STARTUP_ERROR = M("WAIT_FOR_NODE_STARTUP_ERROR") WAIT_FOR_NODE_STARTUP_STARTED = M("WAIT_FOR_NODE_STARTUP_STARTED") WAIT_FOR_NODE_STARTUP_TIMED_OUT = M("WAIT_FOR_NODE_STARTUP_TIMED_OUT") WAIT_FOR_NODE_STARTUP_WITHOUT_START = M("WAIT_FOR_NODE_STARTUP_WITHOUT_START") WATCHDOG_NOT_FOUND = M("WATCHDOG_NOT_FOUND") pcs-0.10.11/pcs/common/reports/const.py000066400000000000000000000033351412706364600177270ustar00rootroot00000000000000from .types import ( AddRemoveContainerType, AddRemoveItemType, BoothConfigUsedWhere, DefaultAddressSource, FenceHistoryCommandType, PcsCommand, ReasonType, ServiceAction, StonithRestartlessUpdateUnableToPerformReason, ) ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE = AddRemoveContainerType("stonith") ADD_REMOVE_ITEM_TYPE_DEVICE = AddRemoveItemType("device") BOOTH_CONFIG_USED_IN_CLUSTER_RESOURCE = BoothConfigUsedWhere( "in a cluster resource" ) BOOTH_CONFIG_USED_ENABLED_IN_SYSTEMD = BoothConfigUsedWhere( "enabled in systemd" ) BOOTH_CONFIG_USED_RUNNING_IN_SYSTEMD = BoothConfigUsedWhere( "running in systemd" ) FENCE_HISTORY_COMMAND_CLEANUP = FenceHistoryCommandType("cleanup") FENCE_HISTORY_COMMAND_SHOW = FenceHistoryCommandType("show") FENCE_HISTORY_COMMAND_UPDATE = FenceHistoryCommandType("update") PCS_COMMAND_OPERATION_DEFAULTS_UPDATE = PcsCommand( "resource op defaults update" ) PCS_COMMAND_RESOURCE_DEFAULTS_UPDATE = PcsCommand("resource defaults update") SERVICE_ACTION_START = ServiceAction("START") SERVICE_ACTION_STOP = ServiceAction("STOP") SERVICE_ACTION_ENABLE = ServiceAction("ENABLE") SERVICE_ACTION_DISABLE = ServiceAction("DISABLE") SERVICE_ACTION_KILL = ServiceAction("KILL") REASON_UNREACHABLE = ReasonType("unreachable") REASON_NOT_LIVE_CIB = ReasonType("not_live_cib") DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS = DefaultAddressSource("known_hosts") DEFAULT_ADDRESS_SOURCE_HOST_NAME = DefaultAddressSource("host_name") STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_NOT_RUNNING = ( StonithRestartlessUpdateUnableToPerformReason("not_running") ) STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER = ( StonithRestartlessUpdateUnableToPerformReason("other") ) pcs-0.10.11/pcs/common/reports/constraints/000077500000000000000000000000001412706364600205725ustar00rootroot00000000000000pcs-0.10.11/pcs/common/reports/constraints/__init__.py000066400000000000000000000021611412706364600227030ustar00rootroot00000000000000from .common import ( constraint_plain as constraint_plain_default, constraint_with_sets, ) from .colocation import constraint_plain as colocation_plain from .order import constraint_plain as order_plain from .ticket import constraint_plain as ticket_plain def constraint_to_str(constraint_type, constraint_info, with_id=True): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ if "resource_sets" in constraint_info: return constraint_with_sets(constraint_info, with_id) return constraint_plain(constraint_type, constraint_info, with_id) def constraint_plain(constraint_type, options_dict, with_id=False): """return console shape for any constraint_type of plain constraint""" type_report_map = { "rsc_colocation": colocation_plain, "rsc_order": order_plain, "rsc_ticket": ticket_plain, } if constraint_type not in type_report_map: return constraint_plain_default(constraint_type, options_dict, with_id) return type_report_map[constraint_type](options_dict, with_id) pcs-0.10.11/pcs/common/reports/constraints/colocation.py000066400000000000000000000015001412706364600232720ustar00rootroot00000000000000def constraint_plain(constraint_info, with_id=False): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ options_dict = constraint_info["options"] co_resource1 = options_dict.get("rsc", "") co_resource2 = options_dict.get("with-rsc", "") co_id = options_dict.get("id", "") co_score = options_dict.get("score", "") score_text = "(score:" + co_score + ")" console_option_list = [ "(%s:%s)" % (option[0], option[1]) for option in sorted(options_dict.items()) if option[0] not in ("rsc", "with-rsc", "id", "score") ] if with_id: console_option_list.append("(id:%s)" % co_id) return " ".join( [co_resource1, "with", co_resource2, score_text] + console_option_list ) pcs-0.10.11/pcs/common/reports/constraints/common.py000066400000000000000000000027301412706364600224360ustar00rootroot00000000000000def constraint_plain(constraint_type, constraint_info, with_id=False): return "{0} {1}".format( constraint_type, " ".join(prepare_options(constraint_info["options"], with_id)), ) def resource_sets(set_list, with_id=True): """ list of dict set_list see resource set in pcs/lib/exchange_formats.md """ report = [] for resource_set in set_list: report.extend( ["set"] + resource_set["ids"] + options(resource_set["options"]) ) if with_id: report.append(id_from_options(resource_set["options"])) return report def options(options_dict): return [ key + "=" + value for key, value in sorted(options_dict.items()) if key != "id" ] def id_from_options(options_dict): return "(id:" + options_dict.get("id", "") + ")" def constraint_with_sets(constraint_info, with_id=True): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ options_dict = options(constraint_info["options"]) return " ".join( resource_sets(constraint_info["resource_sets"], with_id) + (["setoptions"] + options_dict if options_dict else []) + ([id_from_options(constraint_info["options"])] if with_id else []) ) def prepare_options(options_dict, with_id=True): return options(options_dict) + ( [id_from_options(options_dict)] if with_id else [] ) pcs-0.10.11/pcs/common/reports/constraints/order.py000066400000000000000000000037461412706364600222710ustar00rootroot00000000000000# Reiplementing function pcs.lib.pacemaker.values.is_true to avaoid cyslic # imports. This is a temporary solution. def _is_true(val) -> bool: return val.lower() in {"true", "on", "yes", "y", "1"} def constraint_plain(constraint_info, with_id=False): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ options = constraint_info["options"] oc_resource1 = options.get("first", "") oc_resource2 = options.get("then", "") first_action = options.get("first-action", "") then_action = options.get("then-action", "") oc_id = options.get("id", "") oc_score = options.get("score", "") oc_kind = options.get("kind", "") oc_sym = "" oc_id_out = "" oc_options = "" if "symmetrical" in options and not _is_true( options.get("symmetrical", "false") ): oc_sym = "(non-symmetrical)" if oc_kind != "": score_text = "(kind:" + oc_kind + ")" elif oc_kind == "" and oc_score == "": score_text = "(kind:Mandatory)" else: score_text = "(score:" + oc_score + ")" if with_id: oc_id_out = "(id:" + oc_id + ")" already_processed_options = ( "first", "then", "first-action", "then-action", "id", "score", "kind", "symmetrical", ) oc_options = " ".join( [ "{0}={1}".format(name, value) for name, value in options.items() if name not in already_processed_options ] ) if oc_options: oc_options = "(Options: " + oc_options + ")" return " ".join( [ arg for arg in [ first_action, oc_resource1, "then", then_action, oc_resource2, score_text, oc_sym, oc_options, oc_id_out, ] if arg ] ) pcs-0.10.11/pcs/common/reports/constraints/ticket.py000066400000000000000000000012651412706364600224330ustar00rootroot00000000000000from pcs.common.reports.constraints.common import prepare_options def constraint_plain(constraint_info, with_id=False): """ dict constraint_info see constraint in pcs/lib/exchange_formats.md bool with_id have to show id with options_dict """ options = constraint_info["options"] role = options.get("rsc-role", "") role_prefix = "{0} ".format(role) if role else "" return role_prefix + " ".join( [options.get("rsc", "")] + prepare_options( dict( (name, value) for name, value in options.items() if name not in ["rsc-role", "rsc"] ), with_id, ) ) pcs-0.10.11/pcs/common/reports/conversions.py000066400000000000000000000025541412706364600211530ustar00rootroot00000000000000from typing import ( Dict, Optional, ) from pcs.common.tools import get_all_subclasses from . import messages from .dto import ReportItemDto, ReportItemMessageDto from .item import ReportItem, ReportItemContext, ReportItemSeverity def report_dto_to_item( dto_obj: ReportItemDto, context: Optional[ReportItemContext] = None, ) -> ReportItem: return ReportItem( severity=ReportItemSeverity.from_dto(dto_obj.severity), message=report_item_msg_from_dto(dto_obj.message), context=( context if context else ReportItemContext.from_dto(dto_obj.context) if dto_obj.context else None ), ) def _create_report_msg_map() -> Dict[str, type]: result: Dict[str, type] = {} for report_msg_cls in get_all_subclasses(messages.ReportItemMessage): # pylint: disable=protected-access code = report_msg_cls._code if code: if code in result: raise AssertionError() result[code] = report_msg_cls return result REPORT_MSG_MAP = _create_report_msg_map() def report_item_msg_from_dto( obj: ReportItemMessageDto, ) -> messages.ReportItemMessage: try: return REPORT_MSG_MAP[obj.code](**obj.payload) except KeyError: return messages.LegacyCommonMessage(obj.code, obj.payload, obj.message) pcs-0.10.11/pcs/common/reports/dto.py000066400000000000000000000014061412706364600173640ustar00rootroot00000000000000from dataclasses import dataclass from typing import ( Any, Mapping, Optional, ) from pcs.common.interface.dto import DataTransferObject from .types import ( ForceCode, MessageCode, SeverityLevel, ) @dataclass(frozen=True) class ReportItemSeverityDto(DataTransferObject): level: SeverityLevel force_code: Optional[ForceCode] @dataclass(frozen=True) class ReportItemMessageDto(DataTransferObject): code: MessageCode message: str payload: Mapping[str, Any] @dataclass(frozen=True) class ReportItemContextDto(DataTransferObject): node: str @dataclass(frozen=True) class ReportItemDto(DataTransferObject): severity: ReportItemSeverityDto message: ReportItemMessageDto context: Optional[ReportItemContextDto] pcs-0.10.11/pcs/common/reports/item.py000066400000000000000000000114411412706364600175340ustar00rootroot00000000000000from dataclasses import dataclass from typing import ( Any, Dict, List, Optional, ) from pcs.common.interface.dto import ( ImplementsFromDto, ImplementsToDto, ) from .types import ( ForceCode, MessageCode, SeverityLevel, ) from .dto import ( ReportItemContextDto, ReportItemDto, ReportItemMessageDto, ReportItemSeverityDto, ) @dataclass(frozen=True) class ReportItemSeverity(ImplementsToDto, ImplementsFromDto): # pylint: disable=invalid-name ERROR = SeverityLevel("ERROR") WARNING = SeverityLevel("WARNING") INFO = SeverityLevel("INFO") DEBUG = SeverityLevel("DEBUG") level: SeverityLevel force_code: Optional[ForceCode] = None def to_dto(self) -> ReportItemSeverityDto: return ReportItemSeverityDto( level=self.level, force_code=self.force_code, ) @classmethod def from_dto(cls, dto_obj: ReportItemSeverityDto) -> "ReportItemSeverity": return cls( level=dto_obj.level, force_code=dto_obj.force_code, ) @classmethod def error( cls, force_code: Optional[ForceCode] = None ) -> "ReportItemSeverity": return cls(level=cls.ERROR, force_code=force_code) @classmethod def warning(cls) -> "ReportItemSeverity": return cls(level=cls.WARNING) @classmethod def info(cls) -> "ReportItemSeverity": return cls(level=cls.INFO) @classmethod def debug(cls) -> "ReportItemSeverity": return cls(level=cls.DEBUG) def get_severity( force_code: Optional[ForceCode], is_forced: bool ) -> ReportItemSeverity: if is_forced: return ReportItemSeverity(ReportItemSeverity.WARNING) return ReportItemSeverity(ReportItemSeverity.ERROR, force_code) @dataclass(frozen=True, init=False) class ReportItemMessage(ImplementsToDto): _code = MessageCode("") @property def message(self) -> str: raise NotImplementedError() @property def code(self) -> MessageCode: return self._code def to_dto(self) -> ReportItemMessageDto: payload: Dict[str, Any] = {} if hasattr(self.__class__, "__annotations__"): try: annotations = self.__class__.__annotations__ except AttributeError as e: raise AssertionError() from e for attr_name in annotations.keys(): if attr_name.startswith("_") or attr_name in ("message",): continue attr_val = getattr(self, attr_name) if hasattr(attr_val, "to_dto"): payload[attr_name] = attr_val.to_dto() else: payload[attr_name] = attr_val return ReportItemMessageDto( code=self.code, message=self.message, payload=payload, ) @dataclass(frozen=True) class ReportItemContext(ImplementsToDto, ImplementsFromDto): node: str @classmethod def from_dto(cls, dto_obj: ReportItemContextDto) -> "ReportItemContext": return cls(node=dto_obj.node) def to_dto(self) -> ReportItemContextDto: return ReportItemContextDto(node=self.node) @dataclass class ReportItem(ImplementsToDto): severity: ReportItemSeverity message: ReportItemMessage context: Optional[ReportItemContext] = None @classmethod def error( cls, message: ReportItemMessage, force_code: Optional[ForceCode] = None, context: Optional[ReportItemContext] = None, ) -> "ReportItem": return cls( severity=ReportItemSeverity.error(force_code), message=message, context=context, ) @classmethod def warning( cls, message: ReportItemMessage, context: Optional[ReportItemContext] = None, ) -> "ReportItem": return cls( severity=ReportItemSeverity.warning(), message=message, context=context, ) @classmethod def info( cls, message: ReportItemMessage, context: Optional[ReportItemContext] = None, ) -> "ReportItem": return cls( severity=ReportItemSeverity.info(), message=message, context=context, ) @classmethod def debug( cls, message: ReportItemMessage, context: Optional[ReportItemContext] = None, ) -> "ReportItem": return cls( severity=ReportItemSeverity.debug(), message=message, context=context, ) def to_dto(self) -> ReportItemDto: return ReportItemDto( severity=self.severity.to_dto(), context=self.context.to_dto() if self.context else None, message=self.message.to_dto(), ) ReportItemList = List[ReportItem] pcs-0.10.11/pcs/common/reports/messages.py000066400000000000000000006045651412706364600204240ustar00rootroot00000000000000# pylint: disable=too-many-lines from collections import defaultdict from dataclasses import ( dataclass, field, ) from typing import ( cast, Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union, ) from pcs.common import file_type_codes from pcs.common.fencing_topology import TARGET_TYPE_ATTRIBUTE from pcs.common.file import RawFileError from pcs.common.str_tools import ( format_list, format_list_custom_last_separator, format_optional, format_plural, get_plural, indent, is_iterable_not_str, ) from pcs.common.types import CibRuleExpressionType from . import ( codes, const, types, ) from .dto import ReportItemMessageDto from .item import ReportItemMessage from .constraints import constraint_to_str INSTANCE_SUFFIX = "@{0}" NODE_PREFIX = "{0}: " def _stdout_stderr_to_string(stdout: str, stderr: str, prefix: str = "") -> str: new_lines = [prefix] if prefix else [] for line in stdout.splitlines() + stderr.splitlines(): if line.strip(): new_lines.append(line) return "\n".join(new_lines) def _resource_move_ban_clear_master_resource_not_promotable( promotable_id: str, ) -> str: return ( "when specifying master you must use the promotable clone id{_id}" ).format( _id=format_optional(promotable_id, " ({})"), ) def _resource_move_ban_pcmk_success(stdout: str, stderr: str) -> str: new_lines = [] for line in stdout.splitlines() + stderr.splitlines(): if not line.strip(): continue line = line.replace( "WARNING: Creating rsc_location constraint", "Warning: Creating location constraint", ) line = line.replace( " using the clear option or by editing the CIB with an " "appropriate tool", "", ) new_lines.append(line) return "\n".join(new_lines) def _format_fencing_level_target( target_type: Optional[str], target_value: Any ) -> str: if target_type == TARGET_TYPE_ATTRIBUTE: return "{0}={1}".format(target_value[0], target_value[1]) return target_value def _format_booth_default(value: Optional[str], template: str) -> str: return "" if value in ("booth", "", None) else template.format(value) def _key_numeric(item: str) -> Tuple[int, str]: return (int(item), item) if item.isdigit() else (-1, item) _add_remove_container_translation = { const.ADD_REMOVE_CONTAINER_TYPE_STONITH_RESOURCE: "stonith resource", } _add_remove_item_translation = { const.ADD_REMOVE_ITEM_TYPE_DEVICE: "device", } _file_role_translation = { file_type_codes.BOOTH_CONFIG: "Booth configuration", file_type_codes.BOOTH_KEY: "Booth key", file_type_codes.COROSYNC_AUTHKEY: "Corosync authkey", file_type_codes.COROSYNC_CONF: "Corosync configuration", file_type_codes.PCS_DR_CONFIG: "disaster-recovery configuration", file_type_codes.PACEMAKER_AUTHKEY: "Pacemaker authkey", file_type_codes.PCSD_ENVIRONMENT_CONFIG: "pcsd configuration", file_type_codes.PCSD_SSL_CERT: "pcsd SSL certificate", file_type_codes.PCSD_SSL_KEY: "pcsd SSL key", file_type_codes.PCS_KNOWN_HOSTS: "known-hosts", file_type_codes.PCS_SETTINGS_CONF: "pcs configuration", } _type_translation = { "acl_group": "ACL group", "acl_permission": "ACL permission", "acl_role": "ACL role", "acl_target": "ACL user", # Pacemaker-2.0 deprecated masters. Masters are now called promotable # clones. We treat masters as clones. Do not report we were doing something # with a master, say we were doing it with a clone instead. "master": "clone", "primitive": "resource", } _type_articles = { "ACL group": "an", "ACL user": "an", "ACL role": "an", "ACL permission": "an", "options set": "an", } def _add_remove_container_str( container: types.AddRemoveContainerType, ) -> str: return _add_remove_container_translation.get(container, container) def _add_remove_item_str(item: types.AddRemoveItemType) -> str: return _add_remove_item_translation.get(item, item) def _format_file_role(role: file_type_codes.FileTypeCode) -> str: return _file_role_translation.get(role, role) def _format_file_action(action: str) -> str: return _file_operation_translation.get(action, action) _file_operation_translation = { RawFileError.ACTION_CHMOD: "change permissions of", RawFileError.ACTION_CHOWN: "change ownership of", RawFileError.ACTION_READ: "read", RawFileError.ACTION_REMOVE: "remove", RawFileError.ACTION_WRITE: "write", } def _service_action_str(action: types.ServiceAction, suffix: str = "") -> str: base = action.lower() if not suffix: return base base = { const.SERVICE_ACTION_STOP: "stopp", const.SERVICE_ACTION_ENABLE: "enabl", const.SERVICE_ACTION_DISABLE: "disabl", }.get(action, base) return "{}{}".format(base, suffix) def _skip_reason_to_string(reason: types.ReasonType) -> str: return { const.REASON_NOT_LIVE_CIB: "the command does not run on a live cluster", const.REASON_UNREACHABLE: "pcs is unable to connect to the node(s)", }.get(reason, reason) def _typelist_to_string(type_list: Sequence[str], article: bool = False) -> str: if not type_list: return "" # use set to drop duplicate items: # * master is translated to clone # * i.e. "clone, master" is translated to "clone, clone" # * so we want to drop the second clone new_list = sorted( { # get a translation or make a type_name a string _type_translation.get(type_name, "{0}".format(type_name)) for type_name in type_list } ) res_types = "/".join(new_list) if not article: return res_types return "{article} {types}".format( article=_type_articles.get(new_list[0], "a"), types=res_types ) def _type_to_string(type_name: str, article: bool = False) -> str: if not type_name: return "" # get a translation or make a type_name a string translated = _type_translation.get(type_name, "{0}".format(type_name)) if not article: return translated return "{article} {type}".format( article=_type_articles.get(translated, "a"), type=translated ) def _build_node_description(node_types: List[str]) -> str: if not node_types: return "Node" label = "{0} node".format if len(node_types) == 1: return label(node_types[0]) return "nor " + " or ".join([label(ntype) for ntype in node_types]) @dataclass(frozen=True, init=False) class LegacyCommonMessage(ReportItemMessage): """ This class is used for legacy report transport protocol from 'pcs_internal.py' and is used in 'pcs.cluster.RemoteAddNodes'. This method should be replaced with transporting DTOs of reports in the future. """ def __init__( self, code: types.MessageCode, info: Mapping[str, Any], message: str ) -> None: self.__code = code self.info = info self._message = message @property def message(self) -> str: return self._message def to_dto(self) -> ReportItemMessageDto: return ReportItemMessageDto( code=self.__code, message=self.message, payload=dict(self.info), ) @dataclass(frozen=True) class ResourceForConstraintIsMultiinstance(ReportItemMessage): """ When setting up a constraint a resource in a type of a clone was specified resource_id -- specified resource parent_type -- type of a clone (clone, bundle...) parent_id -- clone resource id """ resource_id: str parent_type: str parent_id: str _code = codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE @property def message(self) -> str: return ( f"{self.resource_id} is a {self.parent_type} resource, you should " f"use the {self.parent_type} id: {self.parent_id} when adding " "constraints" ) @dataclass(frozen=True) class DuplicateConstraintsList(ReportItemMessage): """ List duplicate constraints NOTE: This is a temporary solution constraint_type -- "rsc_colocation", "rsc_order", "rsc_ticket" constraint_info_list -- structured constraint data according to type """ constraint_type: str constraint_info_list: List[Mapping[str, Any]] _code = codes.DUPLICATE_CONSTRAINTS_LIST @property def message(self) -> str: return "Duplicate constraints:\n" + "\n".join( [ " " + constraint_to_str(self.constraint_type, constraint_info) for constraint_info in self.constraint_info_list ] ) @dataclass(frozen=True) class DuplicateConstraintsExist(ReportItemMessage): """ When creating a constraint pcs detected a similar constraint already exists constraint_ids -- ids of similar constraints """ constraint_ids: List[str] _code = codes.DUPLICATE_CONSTRAINTS_EXIST @property def message(self) -> str: return "duplicate constraint already exists" @dataclass(frozen=True) class EmptyResourceSetList(ReportItemMessage): """ An empty resource set has been specified, which is not allowed by cib schema """ _code = codes.EMPTY_RESOURCE_SET_LIST @property def message(self) -> str: return "Resource set list is empty" @dataclass(frozen=True) class RequiredOptionsAreMissing(ReportItemMessage): """ Required option has not been specified, command cannot continue option_names -- are required but was not entered option_type -- describes the option """ option_names: List[str] option_type: Optional[str] = None _code = codes.REQUIRED_OPTIONS_ARE_MISSING @property def message(self) -> str: return ( "required {desc}{_option} {option_names_list} {_is} missing" ).format( desc=format_optional(self.option_type), option_names_list=format_list(self.option_names), _option=format_plural(self.option_names, "option"), _is=format_plural(self.option_names, "is"), ) @dataclass(frozen=True) class PrerequisiteOptionIsMissing(ReportItemMessage): """ If the option_name is specified, the prerequisite_option must be specified option_name -- an option which depends on the prerequisite_option prerequisite_name -- the prerequisite option option_type -- describes the option prerequisite_type -- describes the prerequisite_option """ option_name: str prerequisite_name: str option_type: Optional[str] = None prerequisite_type: Optional[str] = None _code = codes.PREREQUISITE_OPTION_IS_MISSING @property def message(self) -> str: return ( "If {opt_desc}option '{option_name}' is specified, " "{pre_desc}option '{prerequisite_name}' must be specified as well" ).format( opt_desc=format_optional(self.option_type), pre_desc=format_optional(self.prerequisite_type), option_name=self.option_name, prerequisite_name=self.prerequisite_name, ) @dataclass(frozen=True) class PrerequisiteOptionMustBeEnabledAsWell(ReportItemMessage): """ If the option_name is enabled, the prerequisite_option must be also enabled option_name -- an option which depends on the prerequisite_option prerequisite_name -- the prerequisite option option_type -- describes the option prerequisite_type -- describes the prerequisite_option """ option_name: str prerequisite_name: str option_type: str = "" prerequisite_type: str = "" _code = codes.PREREQUISITE_OPTION_MUST_BE_ENABLED_AS_WELL @property def message(self) -> str: return ( "If {opt_desc}option '{option_name}' is enabled, " "{pre_desc}option '{prerequisite_name}' must be enabled as well" ).format( opt_desc=format_optional(self.option_type), pre_desc=format_optional(self.prerequisite_type), option_name=self.option_name, prerequisite_name=self.prerequisite_name, ) @dataclass(frozen=True) class PrerequisiteOptionMustBeDisabled(ReportItemMessage): """ If the option_name is enabled, the prerequisite_option must be disabled option_name -- an option which depends on the prerequisite_option prerequisite_name -- the prerequisite option option_type -- describes the option prerequisite_type -- describes the prerequisite_option """ option_name: str prerequisite_name: str option_type: str = "" prerequisite_type: str = "" _code = codes.PREREQUISITE_OPTION_MUST_BE_DISABLED @property def message(self) -> str: return ( "If {opt_desc}option '{option_name}' is enabled, " "{pre_desc}option '{prerequisite_name}' must be disabled" ).format( opt_desc=format_optional(self.option_type), pre_desc=format_optional(self.prerequisite_type), option_name=self.option_name, prerequisite_name=self.prerequisite_name, ) @dataclass(frozen=True) class PrerequisiteOptionMustNotBeSet(ReportItemMessage): """ The option_name cannot be set because the prerequisite_name is already set option_name -- an option which depends on the prerequisite_option prerequisite_name -- the prerequisite option option_type -- describes the option prerequisite_type -- describes the prerequisite_option """ option_name: str prerequisite_name: str option_type: str = "" prerequisite_type: str = "" _code = codes.PREREQUISITE_OPTION_MUST_NOT_BE_SET @property def message(self) -> str: return ( "Cannot set {opt_desc}option '{option_name}' because " "{pre_desc}option '{prerequisite_name}' is already set" ).format( opt_desc=format_optional(self.option_type), pre_desc=format_optional(self.prerequisite_type), option_name=self.option_name, prerequisite_name=self.prerequisite_name, ) @dataclass(frozen=True) class RequiredOptionOfAlternativesIsMissing(ReportItemMessage): """ At least one option has to be specified option_names -- options from which at least one has to be specified option_type -- describes the option """ option_names: List[str] option_type: Optional[str] = None _code = codes.REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING @property def message(self) -> str: return "{desc}option {option_names_list} has to be specified".format( desc=format_optional(self.option_type), option_names_list=format_list(self.option_names, separator=" or "), ) @dataclass(frozen=True) class InvalidOptions(ReportItemMessage): """ Specified option names are not valid, usualy an error or a warning option_names -- specified invalid option names allowed -- possible allowed option names option_type -- describes the option allowed_patterns -- allowed user defind options patterns """ option_names: List[str] allowed: List[str] option_type: Optional[str] = None allowed_patterns: List[str] = field(default_factory=list) _code = codes.INVALID_OPTIONS @property def message(self) -> str: template = "invalid {desc}option{plural_options} {option_names_list}," if not self.allowed and not self.allowed_patterns: template += " there are no options allowed" elif not self.allowed_patterns: template += " allowed option{plural_allowed} {allowed_values}" elif not self.allowed: template += ( " allowed are options matching patterns: " "{allowed_patterns_values}" ) else: template += ( " allowed option{plural_allowed} {allowed_values}" " and" " options matching patterns: {allowed_patterns_values}" ) return template.format( desc=format_optional(self.option_type), allowed_values=format_list(self.allowed), allowed_patterns_values=format_list(self.allowed_patterns), option_names_list=format_list(self.option_names), plural_options=format_plural(self.option_names, "", "s:"), plural_allowed=format_plural(self.allowed, " is", "s are:"), ) @dataclass(frozen=True) class InvalidUserdefinedOptions(ReportItemMessage): """ Specified option names defined by a user are not valid This is different than invalid_options. In this case, the options are supposed to be defined by a user. This report carries information that the option names do not meet requirements, i.e. contain not allowed characters. Invalid_options is used when the options are predefined by pcs (or underlying tools). option_names -- specified invalid option names allowed_characters -- which characters are allowed in the names option_type -- describes the option """ option_names: List[str] allowed_characters: str option_type: Optional[str] = None _code = codes.INVALID_USERDEFINED_OPTIONS @property def message(self) -> str: return ( "invalid {desc}option{plural_options} {option_names_list}, " "{desc}options may contain {allowed_characters} characters only" ).format( desc=format_optional(self.option_type), option_names_list=format_list(self.option_names), plural_options=format_plural(self.option_names, "", "s:"), allowed_characters=self.allowed_characters, ) @dataclass(frozen=True) class InvalidOptionType(ReportItemMessage): """ Specified value is not of a valid type for the option option_name -- option name whose value is not of a valid type allowed_types -- list of allowed types or string description """ option_name: str allowed_types: Union[List[str], str] _code = codes.INVALID_OPTION_TYPE @property def message(self) -> str: return "specified {option_name} is not valid, use {hint}".format( hint=( format_list(cast(List[str], self.allowed_types)) if is_iterable_not_str(self.allowed_types) else self.allowed_types ), option_name=self.option_name, ) @dataclass(frozen=True) class InvalidOptionValue(ReportItemMessage): """ Specified value is not valid for the option, usualy an error or a warning option_name -- specified option name whose value is not valid option_value -- specified value which is not valid allowed_values -- a list or description of allowed values, may be undefined cannot_be_empty -- the value is empty and that is not allowed forbidden_characters -- characters the value cannot contain """ option_name: str option_value: str allowed_values: Union[List[str], str, None] cannot_be_empty: bool = False forbidden_characters: Optional[str] = None _code = codes.INVALID_OPTION_VALUE @property def message(self) -> str: if self.cannot_be_empty: template = "{option_name} cannot be empty" elif self.forbidden_characters: template = ( "{option_name} cannot contain {forbidden_characters} characters" ) else: template = "'{option_value}' is not a valid {option_name} value" if self.allowed_values: template += ", use {hint}" return template.format( hint=( format_list(cast(List[str], self.allowed_values)) if is_iterable_not_str(self.allowed_values) else self.allowed_values ), option_name=self.option_name, option_value=self.option_value, forbidden_characters=self.forbidden_characters, ) @dataclass(frozen=True) class DeprecatedOption(ReportItemMessage): """ Specified option name is deprecated and has been replaced by other option(s) option_name -- the deprecated option replaced_by -- new option(s) to be used instead option_type -- option description """ option_name: str replaced_by: List[str] option_type: str _code = codes.DEPRECATED_OPTION @property def message(self) -> str: return ( "{desc}option '{option_name}' is deprecated and should not be " "used, use {hint} instead" ).format( option_name=self.option_name, desc=format_optional(self.option_type), hint=format_list(self.replaced_by), ) @dataclass(frozen=True) class MutuallyExclusiveOptions(ReportItemMessage): """ Entered options can not coexist option_names -- contain entered mutually exclusive options option_type -- describes the option """ option_names: List[str] option_type: Optional[str] = None _code = codes.MUTUALLY_EXCLUSIVE_OPTIONS @property def message(self) -> str: return "Only one of {desc}options {option_names} can be used".format( desc=format_optional(self.option_type), option_names=format_list_custom_last_separator( self.option_names, " and " ), ) @dataclass(frozen=True) class InvalidCibContent(ReportItemMessage): """ Given cib content is not valid report -- human readable explanation of a cib invalidity (a stdert of `crm_verify`) can_be_more_verbose -- can the user ask for a more verbose report """ report: str can_be_more_verbose: bool _code = codes.INVALID_CIB_CONTENT @property def message(self) -> str: return "invalid cib:\n{report}".format(report=self.report) @dataclass(frozen=True) class InvalidIdIsEmpty(ReportItemMessage): """ Empty string was specified as an id, which is not valid id_description -- decribe id's role """ id_description: str _code = codes.INVALID_ID_IS_EMPTY @property def message(self) -> str: return f"{self.id_description} cannot be empty" @dataclass(frozen=True) class InvalidIdBadChar(ReportItemMessage): """ specified id is not valid as it contains a forbidden character id string specified id id_description string decribe id's role invalid_character forbidden character is_first_char is it the first character which is forbidden? """ # pylint: disable=invalid-name, redefined-builtin id: str id_description: str invalid_character: str is_first_char: bool _code = codes.INVALID_ID_BAD_CHAR @property def message(self) -> str: desc = "first " if self.is_first_char else "" return ( f"invalid {self.id_description} '{self.id}', " f"'{self.invalid_character}' is not a valid {desc}character for a " f"{self.id_description}" ) @dataclass(frozen=True) class InvalidTimeoutValue(ReportItemMessage): """ Specified timeout is not valid (number or other format e.g. 2min) timeout -- specified invalid timeout """ timeout: str _code = codes.INVALID_TIMEOUT_VALUE @property def message(self) -> str: return f"'{self.timeout}' is not a valid number of seconds to wait" @dataclass(frozen=True) class InvalidScore(ReportItemMessage): """ Specified score value is not valid score -- specified score value """ score: str _code = codes.INVALID_SCORE @property def message(self) -> str: return ( f"invalid score '{self.score}', use integer or INFINITY or " "-INFINITY" ) @dataclass(frozen=True) class MultipleScoreOptions(ReportItemMessage): """ More than one of mutually exclusive score options has been set (score, score-attribute, score-attribute-mangle in rules or colocation sets) """ _code = codes.MULTIPLE_SCORE_OPTIONS @property def message(self) -> str: return "multiple score options cannot be specified" @dataclass(frozen=True) class RunExternalProcessStarted(ReportItemMessage): """ Information about running an external process command -- the external process command stdin -- passed to the external process via its stdin environment -- environment variables for the command """ command: str stdin: Optional[str] environment: Mapping[str, str] _code = codes.RUN_EXTERNAL_PROCESS_STARTED @property def message(self) -> str: return ( "Running: {command}\nEnvironment:{env_part}\n{stdin_part}" ).format( command=self.command, stdin_part=format_optional( self.stdin, "--Debug Input Start--\n{}\n--Debug Input End--\n" ), env_part=( "" if not self.environment else "\n" + "\n".join( [ " {}={}".format(key, val) for key, val in sorted(self.environment.items()) ] ) ), ) @dataclass(frozen=True) class RunExternalProcessFinished(ReportItemMessage): """ Information about result of running an external process command -- the external process command return_value -- external process's return (exit) code stdout -- external process's stdout stderr -- external process's stderr """ command: str return_value: int stdout: str stderr: str _code = codes.RUN_EXTERNAL_PROCESS_FINISHED @property def message(self) -> str: return ( f"Finished running: {self.command}\n" f"Return value: {self.return_value}\n" "--Debug Stdout Start--\n" f"{self.stdout}\n" "--Debug Stdout End--\n" "--Debug Stderr Start--\n" f"{self.stderr}\n" "--Debug Stderr End--\n" ) @dataclass(frozen=True) class RunExternalProcessError(ReportItemMessage): """ Attempt to run an external process failed command -- the external process command reason -- error description """ command: str reason: str _code = codes.RUN_EXTERNAL_PROCESS_ERROR @property def message(self) -> str: return f"unable to run command {self.command}: {self.reason}" @dataclass(frozen=True) class NodeCommunicationStarted(ReportItemMessage): """ Request is about to be sent to a remote node, debug info target -- where the request is about to be sent to data -- request's data """ target: str data: str _code = codes.NODE_COMMUNICATION_STARTED @property def message(self) -> str: data = format_optional( self.data, "--Debug Input Start--\n{}\n--Debug Input End--\n" ) return f"Sending HTTP Request to: {self.target}\n{data}" @dataclass(frozen=True) class NodeCommunicationFinished(ReportItemMessage): """ Remote node request has been finished, debug info target -- where the request was sent to response_code -- response return code response_data -- response data """ target: str response_code: int response_data: str _code = codes.NODE_COMMUNICATION_FINISHED @property def message(self) -> str: return ( f"Finished calling: {self.target}\n" f"Response Code: {self.response_code}\n" "--Debug Response Start--\n" f"{self.response_data}\n" "--Debug Response End--\n" ) @dataclass(frozen=True) class NodeCommunicationDebugInfo(ReportItemMessage): """ Node communication debug info from pycurl target -- request target data -- pycurl communication data """ target: str data: str _code = codes.NODE_COMMUNICATION_DEBUG_INFO @property def message(self) -> str: return ( f"Communication debug info for calling: {self.target}\n" "--Debug Communication Info Start--\n" f"{self.data}\n" "--Debug Communication Info End--\n" ) @dataclass(frozen=True) class NodeCommunicationNotConnected(ReportItemMessage): """ An error occured when connecting to a remote node, debug info node -- node address / name reason -- decription of the error """ node: str reason: str _code = codes.NODE_COMMUNICATION_NOT_CONNECTED @property def message(self) -> str: return f"Unable to connect to {self.node} ({self.reason})" @dataclass(frozen=True) class NodeCommunicationNoMoreAddresses(ReportItemMessage): """ Request failed and there are no more addresses to try it again """ node: str request: str _code = codes.NODE_COMMUNICATION_NO_MORE_ADDRESSES @property def message(self) -> str: return f"Unable to connect to '{self.node}' via any of its addresses" @dataclass(frozen=True) class NodeCommunicationErrorNotAuthorized(ReportItemMessage): """ Node rejected a request as we are not authorized node -- node address / name command -- executed command reason -- decription of the error """ node: str command: str reason: str _code = codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED @property def message(self) -> str: return f"Unable to authenticate to {self.node} ({self.reason})" @dataclass(frozen=True) class NodeCommunicationErrorPermissionDenied(ReportItemMessage): """ Node rejected a request as we do not have permissions to run the request node -- node address / name command -- executed command reason -- decription of the error """ node: str command: str reason: str _code = codes.NODE_COMMUNICATION_ERROR_PERMISSION_DENIED @property def message(self) -> str: return f"{self.node}: Permission denied ({self.reason})" @dataclass(frozen=True) class NodeCommunicationErrorUnsupportedCommand(ReportItemMessage): """ Node rejected a request as it does not support the request node -- node address / name command -- executed command reason -- decription of the error """ node: str command: str reason: str _code = codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND @property def message(self) -> str: return ( f"{self.node}: Unsupported command ({self.reason}), try upgrading " "pcsd" ) @dataclass(frozen=True) class NodeCommunicationCommandUnsuccessful(ReportItemMessage): """ Node rejected a request for another reason with a plain text explanation node -- node address / name command -- executed command reason -- decription of the error """ node: str command: str reason: str _code = codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL @property def message(self) -> str: return f"{self.node}: {self.reason}" @dataclass(frozen=True) class NodeCommunicationError(ReportItemMessage): """ Node rejected a request for another reason (may be faulty node) node -- node address / name command -- executed command reason -- decription of the error """ node: str command: str reason: str _code = codes.NODE_COMMUNICATION_ERROR @property def message(self) -> str: return f"Error connecting to {self.node} ({self.reason})" @dataclass(frozen=True) class NodeCommunicationErrorUnableToConnect(ReportItemMessage): """ We were unable to connect to a node node -- node address / name command -- executed command reason -- decription of the error """ node: str command: str reason: str _code = codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT @property def message(self) -> str: return f"Unable to connect to {self.node} ({self.reason})" @dataclass(frozen=True) class NodeCommunicationErrorTimedOut(ReportItemMessage): """ Communication with node timed out. node -- node address / name command -- executed command reason -- decription of the error """ node: str command: str reason: str _code = codes.NODE_COMMUNICATION_ERROR_TIMED_OUT @property def message(self) -> str: return f"{self.node}: Connection timeout ({self.reason})" @dataclass(frozen=True) class NodeCommunicationProxyIsSet(ReportItemMessage): """ Warning when connection failed and there is proxy set in environment variables """ node: str = "" address: str = "" _code = codes.NODE_COMMUNICATION_PROXY_IS_SET @property def message(self) -> str: return "Proxy is set in environment variables, try disabling it" @dataclass(frozen=True) class NodeCommunicationRetrying(ReportItemMessage): """ Request failed due communication error connecting via specified address, therefore trying another address if there is any. """ node: str failed_address: str failed_port: str next_address: str next_port: str request: str _code = codes.NODE_COMMUNICATION_RETRYING @property def message(self) -> str: return ( f"Unable to connect to '{self.node}' via address " f"'{self.failed_address}' and port '{self.failed_port}'. Retrying " f"request '{self.request}' via address '{self.next_address}' and " f"port '{self.next_port}'" ) @dataclass(frozen=True) class DefaultsCanBeOverriden(ReportItemMessage): """ Warning when settings defaults (op_defaults, rsc_defaults...) """ _code = codes.DEFAULTS_CAN_BE_OVERRIDEN @property def message(self) -> str: return ( "Defaults do not apply to resources which override them with their " "own defined values" ) @dataclass(frozen=True) class CorosyncAuthkeyWrongLength(ReportItemMessage): """ Wrong corosync authkey length. """ _code = codes.COROSYNC_AUTHKEY_WRONG_LENGTH actual_length: int min_length: int max_length: int @property def message(self) -> str: if self.min_length == self.max_length: template = ( "{max_length} {bytes_allowed} key must be provided for a " "corosync authkey, {actual_length} {bytes_provided} key " "provided" ) else: template = ( "At least {min_length} and at most {max_length} " "{bytes_allowed} key must be provided for a corosync " "authkey, {actual_length} {bytes_provided} key provided" ) return template.format( min_length=self.min_length, max_length=self.max_length, actual_length=self.actual_length, bytes_allowed=format_plural(self.max_length, "byte"), bytes_provided=format_plural(self.actual_length, "byte"), ) @dataclass(frozen=True) class CorosyncConfigDistributionStarted(ReportItemMessage): """ Corosync configuration is about to be sent to nodes """ _code = codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED @property def message(self) -> str: return "Sending updated corosync.conf to nodes..." @dataclass(frozen=True) class CorosyncConfigAcceptedByNode(ReportItemMessage): """ corosync configuration has been accepted by a node node -- node address / name """ node: str _code = codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE @property def message(self) -> str: return f"{self.node}: Succeeded" @dataclass(frozen=True) class CorosyncConfigDistributionNodeError(ReportItemMessage): """ Communication error occured when saving corosync configuration to a node node -- faulty node address / name """ node: str _code = codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR @property def message(self) -> str: return f"{self.node}: Unable to set corosync config" @dataclass(frozen=True) class CorosyncNotRunningCheckStarted(ReportItemMessage): """ We are about to make sure corosync is not running on nodes """ _code = codes.COROSYNC_NOT_RUNNING_CHECK_STARTED @property def message(self) -> str: return "Checking corosync is not running on nodes..." @dataclass(frozen=True) class CorosyncNotRunningCheckNodeError(ReportItemMessage): """ Communication error occured when checking corosync is not running on a nodea node -- faulty node address / name """ node: str _code = codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR @property def message(self) -> str: return f"{self.node}: Unable to check if corosync is not running" @dataclass(frozen=True) class CorosyncNotRunningOnNode(ReportItemMessage): """ Corosync is not running on a node node -- node address / name """ node: str _code = codes.COROSYNC_NOT_RUNNING_ON_NODE @property def message(self) -> str: return f"{self.node}: corosync is not running" @dataclass(frozen=True) class CorosyncRunningOnNode(ReportItemMessage): """ Corosync is running on a node, which is not ok node -- node address / name """ node: str _code = codes.COROSYNC_RUNNING_ON_NODE @property def message(self) -> str: return f"{self.node}: corosync is running" @dataclass(frozen=True) class CorosyncQuorumGetStatusError(ReportItemMessage): """ Unable to get runtime status of quorum reason -- an error message node -- a node where the error occurred, local node if not specified """ reason: str node: str = "" _code = codes.COROSYNC_QUORUM_GET_STATUS_ERROR @property def message(self) -> str: node = format_optional(self.node, "{}: ") return f"{node}Unable to get quorum status: {self.reason}" @dataclass(frozen=True) class CorosyncQuorumHeuristicsEnabledWithNoExec(ReportItemMessage): """ No exec_ is specified, therefore heuristics are effectively disabled """ _code = codes.COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC @property def message(self) -> str: return ( "No exec_NAME options are specified, so heuristics are effectively " "disabled" ) @dataclass(frozen=True) class CorosyncQuorumSetExpectedVotesError(ReportItemMessage): """ Unable to set expcted votes in a live cluster reason -- an error message """ reason: str _code = codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR @property def message(self) -> str: return f"Unable to set expected votes: {self.reason}" @dataclass(frozen=True) class CorosyncConfigReloaded(ReportItemMessage): """ Corosync configuration has been reloaded node -- node label on which operation has been executed """ node: str = "" _code = codes.COROSYNC_CONFIG_RELOADED @property def message(self) -> str: return "{node}Corosync configuration reloaded".format( node=format_optional(self.node, "{}: "), ) @dataclass(frozen=True) class CorosyncConfigReloadError(ReportItemMessage): """ An error occured when reloading corosync configuration reason -- an error message node -- node label """ reason: str node: str = "" _code = codes.COROSYNC_CONFIG_RELOAD_ERROR @property def message(self) -> str: node = format_optional(self.node, "{}: ") return f"{node}Unable to reload corosync configuration: {self.reason}" @dataclass(frozen=True) class CorosyncConfigReloadNotPossible(ReportItemMessage): """ Corosync configuration cannot be reloaded because corosync is not running on the specified node node -- node label on which confi """ node: str _code = codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE @property def message(self) -> str: return ( f"{self.node}: Corosync is not running, therefore reload of the " "corosync configuration is not possible" ) @dataclass(frozen=True) class CorosyncConfigUnsupportedTransport(ReportItemMessage): """ Transport type defined in corosync.conf is unknown. """ actual_transport: str supported_transport_types: List[str] _code = codes.COROSYNC_CONFIG_UNSUPPORTED_TRANSPORT @property def message(self) -> str: return ( "Transport '{actual_transport}' currently configured in " "corosync.conf is unsupported. Supported transport types are: " "{supported_transport_types}" ).format( actual_transport=self.actual_transport, supported_transport_types=format_list( self.supported_transport_types ), ) @dataclass(frozen=True) class ParseErrorCorosyncConfMissingClosingBrace(ReportItemMessage): """ Corosync config cannot be parsed due to missing closing brace """ _code = codes.PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE @property def message(self) -> str: return "Unable to parse corosync config: missing closing brace" @dataclass(frozen=True) class ParseErrorCorosyncConfUnexpectedClosingBrace(ReportItemMessage): """ Corosync config cannot be parsed due to unexpected closing brace """ _code = codes.PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE @property def message(self) -> str: return "Unable to parse corosync config: unexpected closing brace" @dataclass(frozen=True) class ParseErrorCorosyncConfMissingSectionNameBeforeOpeningBrace( ReportItemMessage ): """ Corosync config cannot be parsed due to a section name missing before { """ # pylint: disable=line-too-long _code = ( codes.PARSE_ERROR_COROSYNC_CONF_MISSING_SECTION_NAME_BEFORE_OPENING_BRACE ) @property def message(self) -> str: return ( "Unable to parse corosync config: missing a section name before {" ) @dataclass(frozen=True) class ParseErrorCorosyncConfExtraCharactersAfterOpeningBrace(ReportItemMessage): """ Corosync config cannot be parsed due to extra characters after { """ _code = codes.PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_AFTER_OPENING_BRACE @property def message(self) -> str: return "Unable to parse corosync config: extra characters after {" @dataclass(frozen=True) class ParseErrorCorosyncConfExtraCharactersBeforeOrAfterClosingBrace( ReportItemMessage ): """ Corosync config cannot be parsed due to extra characters before or after } """ # pylint: disable=line-too-long _code = ( codes.PARSE_ERROR_COROSYNC_CONF_EXTRA_CHARACTERS_BEFORE_OR_AFTER_CLOSING_BRACE ) @property def message(self) -> str: return "Unable to parse corosync config: extra characters before or after }" @dataclass(frozen=True) class ParseErrorCorosyncConfLineIsNotSectionNorKeyValue(ReportItemMessage): """ Corosync config cannot be parsed due to a line is not a section nor key:val """ _code = codes.PARSE_ERROR_COROSYNC_CONF_LINE_IS_NOT_SECTION_NOR_KEY_VALUE @property def message(self) -> str: return ( "Unable to parse corosync config: a line is not opening or closing " "a section or key: value" ) @dataclass(frozen=True) class ParseErrorCorosyncConf(ReportItemMessage): """ Corosync config cannot be parsed, the cause is not specified. It is better to use more specific error if possible. """ _code = codes.PARSE_ERROR_COROSYNC_CONF @property def message(self) -> str: return "Unable to parse corosync config" @dataclass(frozen=True) class CorosyncConfigCannotSaveInvalidNamesValues(ReportItemMessage): """ cannot save corosync.conf - it contains forbidden characters which break it section_name_list -- bad names of sections attribute_name_list -- bad names of attributes attribute_value_pairs -- tuples (attribute_name, its_bad_value) """ section_name_list: List[str] attribute_name_list: List[str] attribute_value_pairs: List[Tuple[str, str]] _code = codes.COROSYNC_CONFIG_CANNOT_SAVE_INVALID_NAMES_VALUES @property def message(self) -> str: prefix = "Cannot save corosync.conf containing " if ( not self.section_name_list and not self.attribute_name_list and not self.attribute_value_pairs ): return ( f"{prefix}invalid section names, option names or option values" ) parts = [] if self.section_name_list: parts.append( "invalid section name(s): {}".format( format_list(self.section_name_list) ) ) if self.attribute_name_list: parts.append( "invalid option name(s): {}".format( format_list(self.attribute_name_list) ) ) if self.attribute_value_pairs: pairs = ", ".join( [ f"'{value}' (option '{name}')" for name, value in self.attribute_value_pairs ] ) parts.append(f"invalid option value(s): {pairs}") return "{}{}".format(prefix, "; ".join(parts)) @dataclass(frozen=True) class CorosyncConfigMissingNamesOfNodes(ReportItemMessage): """ Some nodes in corosync.conf do not have their name set, they will be omitted fatal -- if True, pcs cannot continue """ fatal: bool = False _code = codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES @property def message(self) -> str: note = ( "unable to continue" if self.fatal else "those nodes were omitted" ) return f"Some nodes are missing names in corosync.conf, {note}" @dataclass(frozen=True) class CorosyncConfigNoNodesDefined(ReportItemMessage): """ No nodes found in corosync.conf """ _code = codes.COROSYNC_CONFIG_NO_NODES_DEFINED @property def message(self) -> str: return "No nodes found in corosync.conf" @dataclass(frozen=True) class CorosyncOptionsIncompatibleWithQdevice(ReportItemMessage): """ Cannot set specified corosync options when qdevice is in use options -- incompatible options names """ options: List[str] _code = codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE @property def message(self) -> str: return ( "These options cannot be set when the cluster uses a quorum " "device: {}" ).format(format_list(self.options)) @dataclass(frozen=True) class CorosyncClusterNameInvalidForGfs2(ReportItemMessage): """ Chosen cluster name will prevent using GFS2 volumes in the cluster cluster_name -- the entered cluster name max_length -- maximal cluster name length supported by GFS2 allowed_characters -- allowed cluster name characters supported by GFS2 """ cluster_name: str max_length: int allowed_characters: str _code = codes.COROSYNC_CLUSTER_NAME_INVALID_FOR_GFS2 @property def message(self) -> str: return ( f"Chosen cluster name '{self.cluster_name}' will prevent mounting " f"GFS2 volumes in the cluster, use at most {self.max_length} " f"of {self.allowed_characters} characters; you may safely " f"override this if you do not intend to use GFS2" ) @dataclass(frozen=True) class CorosyncBadNodeAddressesCount(ReportItemMessage): """ Wrong number of addresses set for a corosync node. actual_count -- how many addresses set for a node min_count -- minimal allowed addresses count max_count -- maximal allowed addresses count node_name -- optionally specify node name node_index -- optionally specify node index (helps to identify a node if a name is missing) """ actual_count: int min_count: int max_count: int node_name: str = "" node_index: Optional[int] = None _code = codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT @property def message(self) -> str: if self.min_count == self.max_count: template = ( "{max_count} {addr_allowed} must be specified for a node, " "{actual_count} {addr_specified} specified{node_desc}" ) else: template = ( "At least {min_count} and at most {max_count} {addr_allowed} " "must be specified for a node, {actual_count} " "{addr_specified} specified{node_desc}" ) node_template = " for node '{}'" return template.format( node_desc=( format_optional(self.node_name, node_template) or format_optional(self.node_index, node_template) ), min_count=self.min_count, max_count=self.max_count, actual_count=self.actual_count, addr_allowed=format_plural(self.max_count, "address"), addr_specified=format_plural(self.actual_count, "address"), ) @dataclass(frozen=True) class CorosyncIpVersionMismatchInLinks(ReportItemMessage): """ Mixing IPv4 and IPv6 in one or more links, which is not allowed link_numbers -- numbers of links with mismatched IP versions """ link_numbers: List[str] = field(default_factory=list) _code = codes.COROSYNC_IP_VERSION_MISMATCH_IN_LINKS @property def message(self) -> str: links = format_optional( (format_list(self.link_numbers) if self.link_numbers else ""), " on link(s): {}", ) return ( "Using both IPv4 and IPv6 on one link is not allowed; please, use " f"either IPv4 or IPv6{links}" ) @dataclass(frozen=True) class CorosyncAddressIpVersionWrongForLink(ReportItemMessage): """ Cannot use an address in a link as it does not match the link's IP version. address -- a provided address expected_address_type -- an address type used in a link link_number -- number of the link """ address: str expected_address_type: str link_number: Optional[int] = None _code = codes.COROSYNC_ADDRESS_IP_VERSION_WRONG_FOR_LINK @property def message(self) -> str: link = format_optional(self.link_number, "link '{}'", "the link") return ( f"Address '{self.address}' cannot be used in {link} " f"because the link uses {self.expected_address_type} addresses" ) @dataclass(frozen=True) class CorosyncLinkNumberDuplication(ReportItemMessage): """ Trying to set one link_number for more links, link numbers must be unique link_number_list -- list of nonunique link numbers """ link_number_list: List[str] _code = codes.COROSYNC_LINK_NUMBER_DUPLICATION @property def message(self) -> str: nums = format_list(sorted(self.link_number_list, key=_key_numeric)) return f"Link numbers must be unique, duplicate link numbers: {nums}" @dataclass(frozen=True) class CorosyncNodeAddressCountMismatch(ReportItemMessage): """ Nodes do not have the same number of addresses dict node_addr_count -- key: node name, value: number of addresses """ node_addr_count: Mapping[str, int] _code = codes.COROSYNC_NODE_ADDRESS_COUNT_MISMATCH @property def message(self) -> str: count_node: Dict[int, List[str]] = defaultdict(list) for node_name, count in self.node_addr_count.items(): count_node[count].append(node_name) parts = ["All nodes must have the same number of addresses"] # List most common number of addresses first. for count, nodes in sorted( count_node.items(), key=lambda pair: len(pair[1]), reverse=True ): parts.append( "{node} {nodes} {has} {count} {address}".format( node=format_plural(nodes, "node"), nodes=format_list(nodes), has=format_plural(nodes, "has"), count=count, address=format_plural(count, "address"), ) ) return "; ".join(parts) @dataclass(frozen=True) class NodeAddressesAlreadyExist(ReportItemMessage): """ Trying add node(s) with addresses already used by other nodes address_list -- list of specified already existing addresses """ address_list: List[str] _code = codes.NODE_ADDRESSES_ALREADY_EXIST @property def message(self) -> str: pluralize = lambda word: format_plural(self.address_list, word) return ( "Node {address} {addr_list} {_is} already used by existing nodes; " "please, use other {address}" ).format( address=pluralize("address"), addr_list=format_list(self.address_list), _is=pluralize("is"), ) @dataclass(frozen=True) class NodeAddressesCannotBeEmpty(ReportItemMessage): """ Trying to set an empty node address or remove a node address in an update node_name_list -- list of node names with empty addresses """ node_name_list: List[str] _code = codes.NODE_ADDRESSES_CANNOT_BE_EMPTY @property def message(self) -> str: return ( "Empty address set for {node} {node_list}, an address cannot be " "empty" ).format( node=format_plural(self.node_name_list, "node"), node_list=format_list(self.node_name_list), ) @dataclass(frozen=True) class NodeAddressesDuplication(ReportItemMessage): """ Trying to set one address for more nodes or links, addresses must be unique address_list -- list of nonunique addresses """ address_list: List[str] _code = codes.NODE_ADDRESSES_DUPLICATION @property def message(self) -> str: addrs = format_list(self.address_list) return f"Node addresses must be unique, duplicate addresses: {addrs}" @dataclass(frozen=True) class NodeNamesAlreadyExist(ReportItemMessage): """ Trying add node(s) with name(s) already used by other nodes name_list -- list of specified already used node names """ name_list: List[str] _code = codes.NODE_NAMES_ALREADY_EXIST @property def message(self) -> str: pluralize = lambda word: format_plural(self.name_list, word) return ( "Node {name} {name_list} {_is} already used by existing nodes; " "please, use other {name}" ).format( name=pluralize("name"), name_list=format_list(self.name_list), _is=pluralize("is"), ) @dataclass(frozen=True) class NodeNamesDuplication(ReportItemMessage): """ Trying to set one node name for more nodes, node names must be unique name_list -- list of nonunique node names """ name_list: List[str] _code = codes.NODE_NAMES_DUPLICATION @property def message(self) -> str: names = format_list(self.name_list) return f"Node names must be unique, duplicate names: {names}" @dataclass(frozen=True) class CorosyncNodesMissing(ReportItemMessage): """ No nodes have been specified """ _code = codes.COROSYNC_NODES_MISSING @property def message(self) -> str: return "No nodes have been specified" @dataclass(frozen=True) class CorosyncTooManyLinksOptions(ReportItemMessage): """ Options for more links than defined by nodes' addresses have been specified links_options_count -- options for how many links have been specified links_count -- for how many links is defined """ links_options_count: int links_count: int _code = codes.COROSYNC_TOO_MANY_LINKS_OPTIONS @property def message(self) -> str: return ( "Cannot specify options for more links " f"({self.links_options_count}) than how many is defined by " f"number of addresses per node ({self.links_count})" ) @dataclass(frozen=True) class CorosyncCannotAddRemoveLinksBadTransport(ReportItemMessage): """ Cannot add or remove corosync links, used transport does not allow that actual_transport -- transport used in the cluster required_transports -- transports allowing links to be added / removed add_or_not_remove -- True for add, False for remove """ actual_transport: str required_transports: List[str] add_or_not_remove: bool _code = codes.COROSYNC_CANNOT_ADD_REMOVE_LINKS_BAD_TRANSPORT @property def message(self) -> str: action = "adding" if self.add_or_not_remove else "removing" return ( f"Cluster is using {self.actual_transport} transport which does " f"not support {action} links" ) # TODO: add_or_note_move should be changed to an action @dataclass(frozen=True) class CorosyncCannotAddRemoveLinksNoLinksSpecified(ReportItemMessage): """ Cannot add or remove links, no links were specified add_or_not_remove -- True for add, False for remove """ add_or_not_remove: bool _code = codes.COROSYNC_CANNOT_ADD_REMOVE_LINKS_NO_LINKS_SPECIFIED @property def message(self) -> str: return "Cannot {action} links, no links to {action} specified".format( action=("add" if self.add_or_not_remove else "remove"), ) @dataclass(frozen=True) class CorosyncCannotAddRemoveLinksTooManyFewLinks(ReportItemMessage): """ Cannot add or remove links, link count would exceed allowed limits links_change_count -- how many links to add / remove links_new_count -- how many links would be defined after the action links_limit_count -- maximal / minimal number of links allowed add_or_not_remove -- True for add, False for remove """ links_change_count: int links_new_count: int links_limit_count: int add_or_not_remove: bool _code = codes.COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS @property def message(self) -> str: return ( "Cannot {action} {links_change_count} {link_change}, there " "would be {links_new_count} {link_new} defined which is " "{more_less} than allowed number of {links_limit_count} " "{link_limit}" ).format( links_change_count=self.links_change_count, links_new_count=self.links_new_count, links_limit_count=self.links_limit_count, action=("add" if self.add_or_not_remove else "remove"), more_less=("more" if self.add_or_not_remove else "less"), link_change=format_plural(self.links_change_count, "link"), link_new=format_plural(self.links_new_count, "link"), link_limit=format_plural(self.links_limit_count, "link"), ) @dataclass(frozen=True) class CorosyncLinkAlreadyExistsCannotAdd(ReportItemMessage): """ Cannot add a link with specified linknumber as it already exists """ link_number: str _code = codes.COROSYNC_LINK_ALREADY_EXISTS_CANNOT_ADD @property def message(self) -> str: return f"Cannot add link '{self.link_number}', it already exists" @dataclass(frozen=True) class CorosyncLinkDoesNotExistCannotRemove(ReportItemMessage): """ Cannot remove links which don't exist link_list -- links to remove which don't exist existing_link_list -- linknumbers of existing links """ link_list: List[str] existing_link_list: List[str] _code = codes.COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_REMOVE @property def message(self) -> str: return ( "Cannot remove non-existent {link} {to_remove}, existing links: " "{existing}" ).format( link=format_plural(self.link_list, "link"), to_remove=format_list(self.link_list), existing=format_list(self.existing_link_list), ) @dataclass(frozen=True) class CorosyncLinkDoesNotExistCannotUpdate(ReportItemMessage): """ Cannot set options for the defined link because the link does not exist link_number -- number of the link to be updated existing_link_list -- linknumbers of existing links """ link_number: int existing_link_list: List[str] _code = codes.COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_UPDATE @property def message(self) -> str: link_list = format_list(self.existing_link_list) return ( f"Cannot set options for non-existent link '{self.link_number}', " f"existing links: {link_list}" ) @dataclass(frozen=True) class CorosyncTransportUnsupportedOptions(ReportItemMessage): """ A type of options is not supported with the given transport """ option_type: str actual_transport: str required_transports: List[str] _code = codes.COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS @property def message(self) -> str: required_transports = format_list(self.required_transports) return ( f"The {self.actual_transport} transport does not support " f"'{self.option_type}' options, use {required_transports} transport" ) @dataclass(frozen=True) class QdeviceAlreadyDefined(ReportItemMessage): """ Qdevice is already set up in a cluster, when it was expected not to be """ _code = codes.QDEVICE_ALREADY_DEFINED @property def message(self) -> str: return "quorum device is already defined" @dataclass(frozen=True) class QdeviceNotDefined(ReportItemMessage): """ Qdevice is not set up in a cluster, when it was expected to be """ _code = codes.QDEVICE_NOT_DEFINED @property def message(self) -> str: return "no quorum device is defined in this cluster" @dataclass(frozen=True) class QdeviceClientReloadStarted(ReportItemMessage): """ Qdevice client configuration is about to be reloaded on nodes """ _code = codes.QDEVICE_CLIENT_RELOAD_STARTED @property def message(self) -> str: return "Reloading qdevice configuration on nodes..." @dataclass(frozen=True) class QdeviceAlreadyInitialized(ReportItemMessage): """ Cannot create qdevice on local host, it has been already created model -- qdevice model """ model: str _code = codes.QDEVICE_ALREADY_INITIALIZED @property def message(self) -> str: return f"Quorum device '{self.model}' has been already initialized" @dataclass(frozen=True) class QdeviceNotInitialized(ReportItemMessage): """ Cannot work with qdevice on local host, it has not been created yet model -- qdevice model """ model: str _code = codes.QDEVICE_NOT_INITIALIZED @property def message(self) -> str: return f"Quorum device '{self.model}' has not been initialized yet" @dataclass(frozen=True) class QdeviceInitializationSuccess(ReportItemMessage): """ qdevice was successfully initialized on local host model -- qdevice model """ model: str _code = codes.QDEVICE_INITIALIZATION_SUCCESS @property def message(self) -> str: return f"Quorum device '{self.model}' initialized" @dataclass(frozen=True) class QdeviceInitializationError(ReportItemMessage): """ An error occured when creating qdevice on local host model -- qdevice model reason -- an error message """ model: str reason: str _code = codes.QDEVICE_INITIALIZATION_ERROR @property def message(self) -> str: return ( f"Unable to initialize quorum device '{self.model}': {self.reason}" ) @dataclass(frozen=True) class QdeviceCertificateDistributionStarted(ReportItemMessage): """ Qdevice certificates are about to be set up on nodes """ _code = codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED @property def message(self) -> str: return "Setting up qdevice certificates on nodes..." @dataclass(frozen=True) class QdeviceCertificateAcceptedByNode(ReportItemMessage): """ Qdevice certificates have been saved to a node node -- node on which certificates have been saved """ node: str _code = codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE @property def message(self) -> str: return f"{self.node}: Succeeded" @dataclass(frozen=True) class QdeviceCertificateRemovalStarted(ReportItemMessage): """ Qdevice certificates are about to be removed from nodes """ _code = codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED @property def message(self) -> str: return "Removing qdevice certificates from nodes..." @dataclass(frozen=True) class QdeviceCertificateRemovedFromNode(ReportItemMessage): """ Qdevice certificates have been removed from a node node -- node on which certificates have been deleted """ node: str _code = codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE @property def message(self) -> str: return f"{self.node}: Succeeded" @dataclass(frozen=True) class QdeviceCertificateImportError(ReportItemMessage): """ An error occured when importing qdevice certificate to a node reason -- an error message """ reason: str _code = codes.QDEVICE_CERTIFICATE_IMPORT_ERROR @property def message(self) -> str: return f"Unable to import quorum device certificate: {self.reason}" @dataclass(frozen=True) class QdeviceCertificateSignError(ReportItemMessage): """ an error occured when signing qdevice certificate reason -- an error message """ reason: str _code = codes.QDEVICE_CERTIFICATE_SIGN_ERROR @property def message(self) -> str: return f"Unable to sign quorum device certificate: {self.reason}" @dataclass(frozen=True) class QdeviceDestroySuccess(ReportItemMessage): """ Qdevice configuration successfully removed from local host model -- qdevice model """ model: str _code = codes.QDEVICE_DESTROY_SUCCESS @property def message(self) -> str: return f"Quorum device '{self.model}' configuration files removed" @dataclass(frozen=True) class QdeviceDestroyError(ReportItemMessage): """ An error occured when removing qdevice configuration from local host model -- qdevice model reason -- an error message """ model: str reason: str _code = codes.QDEVICE_DESTROY_ERROR @property def message(self) -> str: return f"Unable to destroy quorum device '{self.model}': {self.reason}" @dataclass(frozen=True) class QdeviceNotRunning(ReportItemMessage): """ Qdevice is expected to be running but is not running model -- qdevice model """ model: str _code = codes.QDEVICE_NOT_RUNNING @property def message(self) -> str: return f"Quorum device '{self.model}' is not running" @dataclass(frozen=True) class QdeviceGetStatusError(ReportItemMessage): """ Unable to get runtime status of qdevice model -- qdevice model reason -- an error message """ model: str reason: str _code = codes.QDEVICE_GET_STATUS_ERROR @property def message(self) -> str: return ( f"Unable to get status of quorum device '{self.model}': " f"{self.reason}" ) @dataclass(frozen=True) class QdeviceUsedByClusters(ReportItemMessage): """ Qdevice is currently being used by clusters, cannot stop it unless forced """ clusters: List[str] _code = codes.QDEVICE_USED_BY_CLUSTERS @property def message(self) -> str: cluster_list = format_list(self.clusters) return ( "Quorum device is currently being used by cluster(s): " f"{cluster_list}" ) @dataclass(frozen=True) class IdAlreadyExists(ReportItemMessage): """ Specified id already exists in CIB and cannot be used for a new CIB object id -- existing id """ # pylint: disable=invalid-name, redefined-builtin id: str _code = codes.ID_ALREADY_EXISTS @property def message(self) -> str: return f"'{self.id}' already exists" @dataclass(frozen=True) class IdBelongsToUnexpectedType(ReportItemMessage): """ Specified id exists but for another element than expected. For example user wants to create resource in group that is specifies by id. But id does not belong to group. """ # pylint: disable=invalid-name, redefined-builtin id: str expected_types: List[str] current_type: str _code = codes.ID_BELONGS_TO_UNEXPECTED_TYPE @property def message(self) -> str: expected_type = _typelist_to_string(self.expected_types, article=True) return f"'{self.id}' is not {expected_type}" @dataclass(frozen=True) class ObjectWithIdInUnexpectedContext(ReportItemMessage): """ Object specified by object_type (tag) and object_id exists but not inside given context (expected_context_type, expected_context_id). """ object_type: str object_id: str expected_context_type: str expected_context_id: str _code = codes.OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT @property def message(self) -> str: context_type = _type_to_string(self.expected_context_type) if self.expected_context_id: context = f"{context_type} '{self.expected_context_id}'" else: context = f"'{context_type}'" object_type = _type_to_string(self.object_type) return ( f"{object_type} '{self.object_id}' exists but does not belong to " f"{context}" ) @dataclass(frozen=True) class IdNotFound(ReportItemMessage): """ Specified id does not exist in CIB, user referenced a nonexisting id id -- specified id expected_types -- list of id's roles - expected types with the id context_type -- context_id's role / type context_id -- specifies the search area """ # pylint: disable=invalid-name, redefined-builtin id: str expected_types: List[str] context_type: str = "" context_id: str = "" _code = codes.ID_NOT_FOUND @property def message(self) -> str: desc = format_optional(_typelist_to_string(self.expected_types)) if not self.context_type or not self.context_id: return f"{desc}'{self.id}' does not exist" return ( f"there is no {desc}'{self.id}' in the {self.context_type} " f"'{self.context_id}'" ) @dataclass(frozen=True) class ResourceBundleAlreadyContainsAResource(ReportItemMessage): """ The bundle already contains a resource, another one caanot be added bundle_id -- id of the bundle resource_id -- id of the resource already contained in the bundle """ bundle_id: str resource_id: str _code = codes.RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE @property def message(self) -> str: return ( f"bundle '{self.bundle_id}' already contains resource " f"'{self.resource_id}', a bundle may contain at most one resource" ) # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class CannotGroupResourceAdjacentResourceForNewGroup(ReportItemMessage): """ Cannot put resources next to an adjacent resource in a group, because the group does not exist yet and therefore cannot contain the adjacent resource adjacent_resource_id -- id of an adjacent resource group_id -- id of the group resources cannot be put into """ adjacent_resource_id: str group_id: str _code = codes.CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_FOR_NEW_GROUP @property def message(self) -> str: return ( f"Group '{self.group_id}' does not exist and therefore does not " f"contain '{self.adjacent_resource_id}' resource to put resources " "next to" ) # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class CannotGroupResourceAdjacentResourceNotInGroup(ReportItemMessage): """ Cannot put resources next to an adjacent resource in a group, because the adjacent resource does not belong to the group adjacent_resource_id -- id of an adjacent resource group_id -- id of the group resources cannot be put into """ adjacent_resource_id: str group_id: str _code = codes.CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP @property def message(self) -> str: return ( f"There is no resource '{self.adjacent_resource_id}' in the group " f"'{self.group_id}', cannot put resources next to it in the group" ) # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class CannotGroupResourceAlreadyInTheGroup(ReportItemMessage): """ Cannot put resources into a group, they are already there resource_list -- ids of resources which cannot be put into a group group_id -- id of the group the resource cannot be put into """ resource_list: List[str] group_id: str _code = codes.CANNOT_GROUP_RESOURCE_ALREADY_IN_THE_GROUP @property def message(self) -> str: resources = format_list(self.resource_list) exist = format_plural(self.resource_list, "exists", "exist") return f"{resources} already {exist} in '{self.group_id}'" # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class CannotGroupResourceMoreThanOnce(ReportItemMessage): """ Cannot put the same resources into a group more than once resource_list -- ids of resources specified more than once """ resource_list: List[str] _code = codes.CANNOT_GROUP_RESOURCE_MORE_THAN_ONCE @property def message(self) -> str: resources = format_list(self.resource_list) return f"Resources specified more than once: {resources}" # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class CannotGroupResourceNoResources(ReportItemMessage): """ Cannot put resources into a group, no resources were specified """ _code = codes.CANNOT_GROUP_RESOURCE_NO_RESOURCES @property def message(self) -> str: return "No resources to add" # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class CannotGroupResourceNextToItself(ReportItemMessage): """ Cannot put a resource into a group next to itself resource_id -- id of the resource which cannot be put into a group """ resource_id: str _code = codes.CANNOT_GROUP_RESOURCE_NEXT_TO_ITSELF @property def message(self) -> str: return f"Cannot put resource '{self.resource_id}' next to itself" @dataclass(frozen=True) class CannotGroupResourceWrongType(ReportItemMessage): """ Cannot put a resource into a group as the resource is not a primitive resource_id -- id of the element which cannot be put into a group resource_type -- tag of the element which cannot be put into a group """ resource_id: str resource_type: str _code = codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE @property def message(self) -> str: return ( "'{resource_id}' is {_type_article} resource, {_type} resources " "cannot be put into a group" ).format( resource_id=self.resource_id, _type_article=_type_to_string(self.resource_type, article=True), _type=_type_to_string(self.resource_type, article=False), ) @dataclass(frozen=True) class UnableToGetResourceOperationDigests(ReportItemMessage): """ Unable to get resource digests from pacemaker crm_resource tool. output -- stdout and stderr from crm_resource """ output: str _code = codes.UNABLE_TO_GET_RESOURCE_OPERATION_DIGESTS @property def message(self) -> str: return f"unable to get resource operation digets:\n{self.output}" @dataclass(frozen=True) class StonithResourcesDoNotExist(ReportItemMessage): """ specified stonith resource doesn't exist (e.g. when creating in constraints) stoniths -- list of specified stonith id """ stonith_ids: List[str] _code = codes.STONITH_RESOURCES_DO_NOT_EXIST @property def message(self) -> str: stoniths = format_list(self.stonith_ids) return f"Stonith resource(s) {stoniths} do not exist" @dataclass(frozen=True) class StonithRestartlessUpdateOfScsiDevicesNotSupported(ReportItemMessage): """ Pacemaker does not support the digests option for calculation of digests needed for restartless update of scsi devices. """ _code = codes.STONITH_RESTARTLESS_UPDATE_OF_SCSI_DEVICES_NOT_SUPPORTED @property def message(self) -> str: return ( "Restartless update of scsi devices is not supported, please " "upgrade pacemaker" ) @dataclass(frozen=True) class StonithRestartlessUpdateUnsupportedAgent(ReportItemMessage): """ Specified resource is not supported for scsi devices update. resource_id -- resource id resource_type -- resource type supported_stonith_types -- list of supported stonith types """ resource_id: str resource_type: str supported_stonith_types: List[str] _code = codes.STONITH_RESTARTLESS_UPDATE_UNSUPPORTED_AGENT @property def message(self) -> str: return ( "Resource '{resource_id}' is not a stonith resource or its type " "'{resource_type}' is not supported for devices update. Supported " "{_type}: {supported_types}" ).format( resource_id=self.resource_id, resource_type=self.resource_type, _type=format_plural(self.supported_stonith_types, "type"), supported_types=format_list(self.supported_stonith_types), ) @dataclass(frozen=True) class StonithUnfencingFailed(ReportItemMessage): """ Unfencing failed on a cluster node. """ reason: str _code = codes.STONITH_UNFENCING_FAILED @property def message(self) -> str: return f"Unfencing failed:\n{self.reason}" @dataclass(frozen=True) class StonithUnfencingDeviceStatusFailed(ReportItemMessage): """ Unfencing failed on a cluster node. """ device: str reason: str _code = codes.STONITH_UNFENCING_DEVICE_STATUS_FAILED @property def message(self) -> str: return ( "Unfencing failed, unable to check status of device " f"'{self.device}': {self.reason}" ) @dataclass(frozen=True) class StonithUnfencingSkippedDevicesFenced(ReportItemMessage): """ Unfencing skipped on a cluster node, because fenced devices were found on the node. """ devices: List[str] _code = codes.STONITH_UNFENCING_SKIPPED_DEVICES_FENCED @property def message(self) -> str: return ( "Unfencing skipped, {device_pl} {devices} {is_pl} fenced" ).format( device_pl=format_plural(self.devices, "device"), devices=format_list(self.devices), is_pl=format_plural(self.devices, "is", "are"), ) @dataclass(frozen=True) class StonithRestartlessUpdateUnableToPerform(ReportItemMessage): """ Unable to update scsi devices without restart for various reason reason -- reason reason_type -- type for reason differentiation """ reason: str reason_type: types.StonithRestartlessUpdateUnableToPerformReason = ( const.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_OTHER ) _code = codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM @property def message(self) -> str: return ( "Unable to perform restartless update of scsi devices: " f"{self.reason}" ) @dataclass(frozen=True) class ResourceRunningOnNodes(ReportItemMessage): """ Resource is running on some nodes. Taken from cluster state. resource_id -- represent the resource """ resource_id: str roles_with_nodes: Dict[str, List[str]] _code = codes.RESOURCE_RUNNING_ON_NODES @property def message(self) -> str: role_label_map = { "Started": "running", } state_info: Dict[str, List[str]] = {} for state, node_list in self.roles_with_nodes.items(): state_info.setdefault( role_label_map.get(state, state.lower()), [] ).extend(node_list) return "resource '{resource_id}' is {detail_list}".format( resource_id=self.resource_id, detail_list="; ".join( sorted( [ "{run_type} on {node} {node_list}".format( run_type=run_type, node=format_plural(node_list, "node"), node_list=format_list(node_list), ) for run_type, node_list in state_info.items() ] ) ), ) @dataclass(frozen=True) class ResourceDoesNotRun(ReportItemMessage): """ Resource is not running on any node. Taken from cluster state. resource_id -- represent the resource """ resource_id: str _code = codes.RESOURCE_DOES_NOT_RUN @property def message(self) -> str: return f"resource '{self.resource_id}' is not running on any node" @dataclass(frozen=True) class ResourceIsGuestNodeAlready(ReportItemMessage): """ The resource is already used as guest node (i.e. has meta attribute remote-node). resource_id -- id of the resource that is guest node """ resource_id: str _code = codes.RESOURCE_IS_GUEST_NODE_ALREADY @property def message(self) -> str: return f"the resource '{self.resource_id}' is already a guest node" @dataclass(frozen=True) class ResourceIsUnmanaged(ReportItemMessage): """ The resource the user works with is unmanaged (e.g. in enable/disable) resource_id -- id of the unmanaged resource """ resource_id: str _code = codes.RESOURCE_IS_UNMANAGED @property def message(self) -> str: return f"'{self.resource_id}' is unmanaged" @dataclass(frozen=True) class ResourceManagedNoMonitorEnabled(ReportItemMessage): """ The resource which was set to managed mode has no monitor operations enabled resource_id -- id of the resource """ resource_id: str _code = codes.RESOURCE_MANAGED_NO_MONITOR_ENABLED @property def message(self) -> str: return ( f"Resource '{self.resource_id}' has no enabled monitor operations" ) @dataclass(frozen=True) class CibLoadError(ReportItemMessage): """ Cannot load cib from cibadmin, cibadmin exited with non-zero code reason -- error description """ reason: str _code = codes.CIB_LOAD_ERROR @property def message(self) -> str: return "unable to get cib" @dataclass(frozen=True) class CibLoadErrorGetNodesForValidation(ReportItemMessage): """ Unable to load CIB, unable to get remote and guest nodes for validation """ _code = codes.CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION @property def message(self) -> str: return ( "Unable to load CIB to get guest and remote nodes from it, " "those nodes cannot be considered in configuration validation" ) @dataclass(frozen=True) class CibLoadErrorScopeMissing(ReportItemMessage): """ Cannot load cib from cibadmin, specified scope is missing in the cib scope -- requested cib scope reason -- error description """ scope: str reason: str _code = codes.CIB_LOAD_ERROR_SCOPE_MISSING @property def message(self) -> str: return f"unable to get cib, scope '{self.scope}' not present in cib" @dataclass(frozen=True) class CibLoadErrorBadFormat(ReportItemMessage): """ Cib does not conform to the schema """ reason: str _code = codes.CIB_LOAD_ERROR_BAD_FORMAT @property def message(self) -> str: return f"unable to get cib, {self.reason}" @dataclass(frozen=True) class CibCannotFindMandatorySection(ReportItemMessage): """ CIB is missing a section which is required to be present section -- name of the missing section (element name or path) """ section: str _code = codes.CIB_CANNOT_FIND_MANDATORY_SECTION @property def message(self) -> str: return f"Unable to get '{self.section}' section of cib" @dataclass(frozen=True) class CibPushError(ReportItemMessage): """ Cannot push cib to cibadmin, cibadmin exited with non-zero code reason -- error description pushed_cib -- cib which failed to be pushed """ reason: str pushed_cib: str _code = codes.CIB_PUSH_ERROR @property def message(self) -> str: return f"Unable to update cib\n{self.reason}\n{self.pushed_cib}" @dataclass(frozen=True) class CibSaveTmpError(ReportItemMessage): """ Cannot save CIB into a temporary file reason -- error description """ reason: str _code = codes.CIB_SAVE_TMP_ERROR @property def message(self) -> str: return f"Unable to save CIB to a temporary file: {self.reason}" @dataclass(frozen=True) class CibDiffError(ReportItemMessage): """ Cannot obtain a diff of CIBs reason -- error description cib_old -- the CIB to be diffed against cib_new -- the CIB diffed against the old cib """ reason: str cib_old: str cib_new: str _code = codes.CIB_DIFF_ERROR @property def message(self) -> str: return f"Unable to diff CIB: {self.reason}\n{self.cib_new}" @dataclass(frozen=True) class CibSimulateError(ReportItemMessage): """ Cannot simulate effects a CIB would have on a live cluster reason -- error description """ reason: str _code = codes.CIB_SIMULATE_ERROR @property def message(self) -> str: return "Unable to simulate changes in CIB{_reason}".format( _reason=format_optional(self.reason, ": {0}"), ) @dataclass(frozen=True) class CrmMonError(ReportItemMessage): """ Cannot load cluster status from crm_mon, crm_mon exited with non-zero code reason -- description of the error """ reason: str _code = codes.CRM_MON_ERROR @property def message(self) -> str: return "error running crm_mon, is pacemaker running?{reason}".format( reason=( ("\n" + "\n".join(indent(self.reason.strip().splitlines()))) if self.reason.strip() else "" ), ) @dataclass(frozen=True) class BadClusterStateFormat(ReportItemMessage): """ crm_mon xml output does not conform to the schema """ _code = codes.BAD_CLUSTER_STATE_FORMAT @property def message(self) -> str: return "cannot load cluster status, xml does not conform to the schema" @dataclass(frozen=True) class WaitForIdleStarted(ReportItemMessage): """ Waiting for cluster to apply updated configuration and to settle down timeout -- wait timeout in seconds """ timeout: int _code = codes.WAIT_FOR_IDLE_STARTED @property def message(self) -> str: timeout_str = ( " (timeout: {timeout} {second_pl})".format( timeout=self.timeout, second_pl=format_plural(self.timeout, "second"), ) if self.timeout > 0 else "" ) return ( "Waiting for the cluster to apply configuration changes" f"{timeout_str}..." ) @dataclass(frozen=True) class WaitForIdleTimedOut(ReportItemMessage): """ Waiting for resources (crm_resource --wait) failed, timeout expired reason -- error description """ reason: str _code = codes.WAIT_FOR_IDLE_TIMED_OUT @property def message(self) -> str: return f"waiting timeout\n\n{self.reason}" @dataclass(frozen=True) class WaitForIdleError(ReportItemMessage): """ Waiting for resources (crm_resource --wait) failed reason -- error description """ reason: str _code = codes.WAIT_FOR_IDLE_ERROR @property def message(self) -> str: return self.reason @dataclass(frozen=True) class WaitForIdleNotLiveCluster(ReportItemMessage): """ Cannot wait for the cluster if not running with a live cluster """ _code = codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER @property def message(self) -> str: return "Cannot use 'mocked CIB' together with 'wait'" @dataclass(frozen=True) class ResourceCleanupError(ReportItemMessage): """ An error occured when deleting resource failed operations in pacemaker reason -- error description resource -- resource which has been cleaned up node -- node which has been cleaned up """ reason: str resource: Optional[str] = None node: Optional[str] = None _code = codes.RESOURCE_CLEANUP_ERROR @property def message(self) -> str: if self.resource: return ( "Unable to forget failed operations of resource: " f"{self.resource}\n{self.reason}" ) return f"Unable to forget failed operations of resources\n{self.reason}" @dataclass(frozen=True) class ResourceRefreshError(ReportItemMessage): """ An error occured when deleting resource history in pacemaker reason -- error description resource -- resource which has been cleaned up node -- node which has been cleaned up """ reason: str resource: Optional[str] = None node: Optional[str] = None _code = codes.RESOURCE_REFRESH_ERROR @property def message(self) -> str: if self.resource: return ( "Unable to delete history of resource: " f"{self.resource}\n{self.reason}" ) return f"Unable to delete history of resources\n{self.reason}" @dataclass(frozen=True) class ResourceRefreshTooTimeConsuming(ReportItemMessage): """ Resource refresh would execute more than threshold operations in a cluster threshold -- current threshold for trigerring this error """ threshold: int _code = codes.RESOURCE_REFRESH_TOO_TIME_CONSUMING @property def message(self) -> str: return ( "Deleting history of all resources on all nodes will execute more " f"than {self.threshold} operations in the cluster, which may " "negatively impact the responsiveness of the cluster. " "Consider specifying resource and/or node" ) @dataclass(frozen=True) class ResourceOperationIntervalDuplication(ReportItemMessage): """ More operations with same name and same interval apeared. Each operation with the same name (e.g. monitoring) need to have unique interval. dict duplications see resource operation interval duplication in pcs/lib/exchange_formats.md """ duplications: Mapping[str, List[List[str]]] _code = codes.RESOURCE_OPERATION_INTERVAL_DUPLICATION @property def message(self) -> str: return ( "multiple specification of the same operation with the same " "interval:\n" + "\n".join( [ "{0} with intervals {1}".format(name, ", ".join(intervals)) for name, intervals_list in self.duplications.items() for intervals in intervals_list ] ) ) @dataclass(frozen=True) class ResourceOperationIntervalAdapted(ReportItemMessage): """ Interval of resource operation was adopted to operation (with the same name) intervals were unique. Each operation with the same name (e.g. monitoring) need to have unique interval. """ operation_name: str original_interval: str adapted_interval: str _code = codes.RESOURCE_OPERATION_INTERVAL_ADAPTED @property def message(self) -> str: return ( f"changing a {self.operation_name} operation interval from " f"{self.original_interval} to {self.adapted_interval} to make the " "operation unique" ) @dataclass(frozen=True) class NodeNotFound(ReportItemMessage): """ Specified node does not exist node -- specified node searched_types """ node: str searched_types: List[str] = field(default_factory=list) _code = codes.NODE_NOT_FOUND @property def message(self) -> str: desc = _build_node_description(self.searched_types) return f"{desc} '{self.node}' does not appear to exist in configuration" @dataclass(frozen=True) class NodeToClearIsStillInCluster(ReportItemMessage): """ specified node is still in cluster and `crm_node --remove` should be not used node -- specified node """ node: str _code = codes.NODE_TO_CLEAR_IS_STILL_IN_CLUSTER @property def message(self) -> str: return ( f"node '{self.node}' seems to be still in the cluster; this " "command should be used only with nodes that have been removed " "from the cluster" ) @dataclass(frozen=True) class NodeRemoveInPacemakerFailed(ReportItemMessage): """ Removing nodes from pacemaker failed. node_list_to_remove -- nodes which should be removed node -- node on which operation was performed reason -- reason of failure """ node_list_to_remove: List[str] node: str = "" reason: str = "" _code = codes.NODE_REMOVE_IN_PACEMAKER_FAILED @property def message(self) -> str: return ( "{node}Unable to remove node(s) {node_list} from pacemaker{reason}" ).format( node=format_optional(self.node, "{}: "), reason=format_optional(self.reason, ": {}"), node_list=format_list(self.node_list_to_remove), ) @dataclass(frozen=True) class MultipleResultsFound(ReportItemMessage): """ Multiple result was found when something was looked for. E.g. resource for remote node. result_type -- specifies what was looked for, e.g. "resource" result_identifier_list -- contains identifiers of results e.g. resource ids search_description -- e.g. name of remote_node """ result_type: str result_identifier_list: List[str] search_description: str = "" _code = codes.MULTIPLE_RESULTS_FOUND @property def message(self) -> str: return "more than one {result_type}{desc} found: {what_found}".format( what_found=format_list(self.result_identifier_list), desc=format_optional(self.search_description, " for '{}'"), result_type=self.result_type, ) @dataclass(frozen=True) class PacemakerSimulationResult(ReportItemMessage): """ This report contains crm_simulate output. str plaintext_output -- plaintext output from crm_simulate """ plaintext_output: str _code = codes.PACEMAKER_SIMULATION_RESULT @property def message(self) -> str: return f"\nSimulation result:\n{self.plaintext_output}" @dataclass(frozen=True) class PacemakerLocalNodeNameNotFound(ReportItemMessage): """ We are unable to figure out pacemaker's local node's name reason -- error message """ reason: str _code = codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND @property def message(self) -> str: return f"unable to get local node name from pacemaker: {self.reason}" @dataclass(frozen=True) class ServiceActionStarted(ReportItemMessage): """ System service action started action -- started service action service -- service name or description instance -- instance of service """ action: types.ServiceAction service: str instance: str = "" _code = codes.SERVICE_ACTION_STARTED @property def message(self) -> str: action_str = _service_action_str(self.action, "ing").capitalize() instance_suffix = format_optional(self.instance, INSTANCE_SUFFIX) return f"{action_str} {self.service}{instance_suffix}..." @dataclass(frozen=True) class ServiceActionFailed(ReportItemMessage): """ System service action failed action -- failed service action service -- service name or description reason -- error message node -- node on which service has been requested to start instance -- instance of service """ action: types.ServiceAction service: str reason: str node: str = "" instance: str = "" _code = codes.SERVICE_ACTION_FAILED @property def message(self) -> str: return ( "{node_prefix}Unable to {action} {service}{instance_suffix}:" " {reason}" ).format( action=_service_action_str(self.action), service=self.service, reason=self.reason, instance_suffix=format_optional(self.instance, INSTANCE_SUFFIX), node_prefix=format_optional(self.node, NODE_PREFIX), ) @dataclass(frozen=True) class ServiceActionSucceeded(ReportItemMessage): """ System service action was successful action -- sucessful service action service -- service name or description node -- node on which service has been requested to start instance -- instance of service """ action: types.ServiceAction service: str node: str = "" instance: str = "" _code = codes.SERVICE_ACTION_SUCCEEDED @property def message(self) -> str: return "{node_prefix}{service}{instance_suffix} {action}".format( action=_service_action_str(self.action, "ed"), service=self.service, instance_suffix=format_optional(self.instance, INSTANCE_SUFFIX), node_prefix=format_optional(self.node, NODE_PREFIX), ) @dataclass(frozen=True) class ServiceActionSkipped(ReportItemMessage): """ System service action was skipped, no error occured action -- skipped service action service -- service name or description reason why the start has been skipped node node on which service has been requested to start instance instance of service """ action: types.ServiceAction service: str reason: str node: str = "" instance: str = "" _code = codes.SERVICE_ACTION_SKIPPED @property def message(self) -> str: return ( "{node_prefix}not {action} {service}{instance_suffix}: {reason}" ).format( action=_service_action_str(self.action, "ing"), service=self.service, reason=self.reason, instance_suffix=format_optional(self.instance, INSTANCE_SUFFIX), node_prefix=format_optional(self.node, NODE_PREFIX), ) @dataclass(frozen=True) class ServiceUnableToDetectInitSystem(ReportItemMessage): """ Autodetection of currently used init system was not successful, therefore system service management is not be available. """ _code = codes.SERVICE_UNABLE_TO_DETECT_INIT_SYSTEM @property def message(self) -> str: return ( "Unable to detect init system. All actions related to system " "services will be skipped." ) @dataclass(frozen=True) class UnableToGetAgentMetadata(ReportItemMessage): """ There were some issues trying to get metadata of agent agent -- agent which metadata were unable to obtain reason -- reason of failure """ agent: str reason: str _code = codes.UNABLE_TO_GET_AGENT_METADATA @property def message(self) -> str: return ( f"Agent '{self.agent}' is not installed or does not provide valid" f" metadata: {self.reason}" ) @dataclass(frozen=True) class InvalidResourceAgentName(ReportItemMessage): """ The entered resource agent name is not valid. This name has the internal structure. The code needs to work with parts of this structure and fails if parts can not be obtained. name -- entered name """ name: str _code = codes.INVALID_RESOURCE_AGENT_NAME @property def message(self) -> str: return ( f"Invalid resource agent name '{self.name}'." " Use standard:provider:type when standard is 'ocf' or" " standard:type otherwise." " List of standards and providers can be obtained by using commands" " 'pcs resource standards' and 'pcs resource providers'" ) @dataclass(frozen=True) class InvalidStonithAgentName(ReportItemMessage): """ The entered stonith agent name is not valid. name -- entered stonith agent name """ name: str _code = codes.INVALID_STONITH_AGENT_NAME @property def message(self) -> str: return ( f"Invalid stonith agent name '{self.name}'." " List of agents can be obtained by using command" " 'pcs stonith list'. Do not use the 'stonith:' prefix. Agent name" " cannot contain the ':' character." ) @dataclass(frozen=True) class AgentNameGuessed(ReportItemMessage): """ Resource agent name was deduced from the entered name. Pcs supports the using of abbreviated resource agent name (e.g. ocf:heartbeat:Delay => Delay) when it can be clearly deduced. entered_name -- entered name guessed_name -- deduced name """ entered_name: str guessed_name: str _code = codes.AGENT_NAME_GUESSED @property def message(self) -> str: return ( f"Assumed agent name '{self.guessed_name}' (deduced from " f"'{self.entered_name}')" ) @dataclass(frozen=True) class AgentNameGuessFoundMoreThanOne(ReportItemMessage): """ More than one agents found based on the search string, specify one of them agent -- searched name of an agent possible_agents -- full names of agents matching the search """ agent: str possible_agents: List[str] _code = codes.AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE @property def message(self) -> str: possible = format_list(self.possible_agents) return ( f"Multiple agents match '{self.agent}', please specify full name: " f"{possible}" ) @dataclass(frozen=True) class AgentNameGuessFoundNone(ReportItemMessage): """ Specified agent doesn't exist agent -- name of the agent which doesn't exist """ agent: str _code = codes.AGENT_NAME_GUESS_FOUND_NONE @property def message(self) -> str: return ( f"Unable to find agent '{self.agent}', try specifying its full name" ) @dataclass(frozen=True) class OmittingNode(ReportItemMessage): """ Warning that specified node will be omitted in following actions node -- node name """ node: str _code = codes.OMITTING_NODE @property def message(self) -> str: return f"Omitting node '{self.node}'" @dataclass(frozen=True) class SbdCheckStarted(ReportItemMessage): """ Info that SBD pre-enabling checks started """ _code = codes.SBD_CHECK_STARTED @property def message(self) -> str: return "Running SBD pre-enabling checks..." @dataclass(frozen=True) class SbdCheckSuccess(ReportItemMessage): """ info that SBD pre-enabling check finished without issues on specified node node -- node name """ node: str _code = codes.SBD_CHECK_SUCCESS @property def message(self) -> str: return f"{self.node}: SBD pre-enabling checks done" @dataclass(frozen=True) class SbdConfigDistributionStarted(ReportItemMessage): """ Distribution of SBD configuration started """ _code = codes.SBD_CONFIG_DISTRIBUTION_STARTED @property def message(self) -> str: return "Distributing SBD config..." @dataclass(frozen=True) class SbdConfigAcceptedByNode(ReportItemMessage): """ info that SBD configuration has been saved successfully on specified node node -- node name """ node: str _code = codes.SBD_CONFIG_ACCEPTED_BY_NODE @property def message(self) -> str: return f"{self.node}: SBD config saved" @dataclass(frozen=True) class UnableToGetSbdConfig(ReportItemMessage): """ Unable to get SBD config from specified node (communication or parsing error) node -- node name reason -- reason of failure """ node: str reason: str _code = codes.UNABLE_TO_GET_SBD_CONFIG @property def message(self) -> str: return ( "Unable to get SBD configuration from node '{node}'{reason}" ).format( node=self.node, reason=format_optional(self.reason, ": {}"), ) @dataclass(frozen=True) class SbdDeviceInitializationStarted(ReportItemMessage): """ Initialization of SBD device(s) started """ device_list: List[str] _code = codes.SBD_DEVICE_INITIALIZATION_STARTED @property def message(self) -> str: return "Initializing {device} {device_list}...".format( device=format_plural(self.device_list, "device"), device_list=format_list(self.device_list), ) @dataclass(frozen=True) class SbdDeviceInitializationSuccess(ReportItemMessage): """ Initialization of SBD device(s) successed """ device_list: List[str] _code = codes.SBD_DEVICE_INITIALIZATION_SUCCESS @property def message(self) -> str: device = format_plural(self.device_list, "Device") return f"{device} initialized successfully" @dataclass(frozen=True) class SbdDeviceInitializationError(ReportItemMessage): """ Initialization of SBD device failed """ device_list: List[str] reason: str _code = codes.SBD_DEVICE_INITIALIZATION_ERROR @property def message(self) -> str: return ( "Initialization of {device} {device_list} failed: {reason}" ).format( device=format_plural(self.device_list, "device"), device_list=format_list(self.device_list), reason=self.reason, ) @dataclass(frozen=True) class SbdDeviceListError(ReportItemMessage): """ Command 'sbd list' failed """ device: str reason: str _code = codes.SBD_DEVICE_LIST_ERROR @property def message(self) -> str: return ( f"Unable to get list of messages from device '{self.device}': " f"{self.reason}" ) @dataclass(frozen=True) class SbdDeviceMessageError(ReportItemMessage): """ Unable to set message 'message' on shared block device 'device' for node 'node'. """ device: str node: str sbd_message: str reason: str _code = codes.SBD_DEVICE_MESSAGE_ERROR @property def message(self) -> str: return ( f"Unable to set message '{self.sbd_message}' for node " f"'{self.node}' on device '{self.device}': {self.reason}" ) @dataclass(frozen=True) class SbdDeviceDumpError(ReportItemMessage): """ Command 'sbd dump' failed """ device: str reason: str _code = codes.SBD_DEVICE_DUMP_ERROR @property def message(self) -> str: return ( f"Unable to get SBD headers from device '{self.device}': " f"{self.reason}" ) @dataclass(frozen=True) class FilesDistributionStarted(ReportItemMessage): """ files are about to be sent to nodes file_list -- files to be sent node_list -- node names where the files are being sent """ file_list: List[str] = field(default_factory=list) node_list: List[str] = field(default_factory=list) _code = codes.FILES_DISTRIBUTION_STARTED @property def message(self) -> str: return "Sending {description}{where}".format( where=format_optional(format_list(self.node_list), " to {}"), description=format_list(self.file_list), ) @dataclass(frozen=True) class FilesDistributionSkipped(ReportItemMessage): """ Files distribution skipped due to unreachable nodes or not live cluster reason_type -- why was the action skipped (unreachable, not_live_cib) file_list -- contains description of files node_list -- where the files should have been sent to """ reason_type: types.ReasonType file_list: List[str] node_list: List[str] _code = codes.FILES_DISTRIBUTION_SKIPPED @property def message(self) -> str: return ( "Distribution of {files} to {nodes} was skipped because " "{reason}. Please, distribute the file(s) manually." ).format( files=format_list(self.file_list), nodes=format_list(self.node_list), reason=_skip_reason_to_string(self.reason_type), ) @dataclass(frozen=True) class FileDistributionSuccess(ReportItemMessage): """ A file has been successfuly distributed to a node node -- name of a destination node file_description -- name (code) of a sucessfully put file """ node: str file_description: str _code = codes.FILE_DISTRIBUTION_SUCCESS @property def message(self) -> str: return ( f"{self.node}: successful distribution of the file " f"'{self.file_description}'" ) @dataclass(frozen=True) class FileDistributionError(ReportItemMessage): """ Cannot put a file to a specific node node -- name of a destination node file_description -- code of a file reason -- an error message """ node: str file_description: str reason: str _code = codes.FILE_DISTRIBUTION_ERROR @property def message(self) -> str: return ( f"{self.node}: unable to distribute file " f"'{self.file_description}': {self.reason}" ) @dataclass(frozen=True) class FilesRemoveFromNodesStarted(ReportItemMessage): """ files are about to be removed from nodes file_list -- files to be sent node_list -- node names the files are being removed from """ file_list: List[str] = field(default_factory=list) node_list: List[str] = field(default_factory=list) _code = codes.FILES_REMOVE_FROM_NODES_STARTED @property def message(self) -> str: return "Requesting remove {description}{where}".format( where=format_optional(format_list(self.node_list), " from {}"), description=format_list(self.file_list), ) @dataclass(frozen=True) class FilesRemoveFromNodesSkipped(ReportItemMessage): """ Files removal skipped due to unreachable nodes or not live cluster reason_type -- why was the action skipped (unreachable, not_live_cib) file_list -- contains description of files node_list -- node names the files are being removed from """ reason_type: types.ReasonType file_list: List[str] node_list: List[str] _code = codes.FILES_REMOVE_FROM_NODES_SKIPPED @property def message(self) -> str: return ( "Removing {files} from {nodes} was skipped because {reason}. " "Please, remove the file(s) manually." ).format( files=format_list(self.file_list), nodes=format_list(self.node_list), reason=_skip_reason_to_string(self.reason_type), ) @dataclass(frozen=True) class FileRemoveFromNodeSuccess(ReportItemMessage): """ files was successfuly removed nodes node -- name of destination node file_description -- name (code) of sucessfully put files """ node: str file_description: str _code = codes.FILE_REMOVE_FROM_NODE_SUCCESS @property def message(self) -> str: return ( f"{self.node}: successful removal of the file " f"'{self.file_description}'" ) @dataclass(frozen=True) class FileRemoveFromNodeError(ReportItemMessage): """ cannot remove files from specific nodes node -- name of destination node file_description -- is file code reason -- is error message """ node: str file_description: str reason: str _code = codes.FILE_REMOVE_FROM_NODE_ERROR @property def message(self) -> str: return ( f"{self.node}: unable to remove file '{self.file_description}': " f"{self.reason}" ) @dataclass(frozen=True) class ServiceCommandsOnNodesStarted(ReportItemMessage): """ Node was requested for actions """ action_list: List[str] = field(default_factory=list) node_list: List[str] = field(default_factory=list) _code = codes.SERVICE_COMMANDS_ON_NODES_STARTED @property def message(self) -> str: return "Requesting {description}{where}".format( where=format_optional(format_list(self.node_list), " on {}"), description=format_list(self.action_list), ) @dataclass(frozen=True) class ServiceCommandsOnNodesSkipped(ReportItemMessage): """ Service actions skipped due to unreachable nodes or not live cluster reason_type -- why was the action skipped (unreachable, not_live_cib) action_list -- contains description of service actions node_list -- destinations where the action should have been executed """ reason_type: types.ReasonType action_list: List[str] node_list: List[str] _code = codes.SERVICE_COMMANDS_ON_NODES_SKIPPED @property def message(self) -> str: return ( "Running action(s) {actions} on {nodes} was skipped because " "{reason}. Please, run the action(s) manually." ).format( actions=format_list(self.action_list), nodes=format_list(self.node_list), reason=_skip_reason_to_string(self.reason_type), ) @dataclass(frozen=True) class ServiceCommandOnNodeSuccess(ReportItemMessage): """ Files was successfuly distributed on nodes service_command_description -- name (code) of sucessfully service command """ node: str service_command_description: str _code = codes.SERVICE_COMMAND_ON_NODE_SUCCESS @property def message(self) -> str: return ( f"{self.node}: successful run of " f"'{self.service_command_description}'" ) @dataclass(frozen=True) class ServiceCommandOnNodeError(ReportItemMessage): """ Action on nodes failed service_command_description -- name (code) of sucessfully service command reason -- is error message """ node: str service_command_description: str reason: str _code = codes.SERVICE_COMMAND_ON_NODE_ERROR @property def message(self) -> str: return ( f"{self.node}: service command failed: " f"{self.service_command_description}: {self.reason}" ) @dataclass(frozen=True) class InvalidResponseFormat(ReportItemMessage): """ Error message that response in invalid format has been received from specified node node -- node name """ node: str _code = codes.INVALID_RESPONSE_FORMAT @property def message(self) -> str: return f"{self.node}: Invalid format of response" @dataclass(frozen=True) class SbdNotUsedCannotSetSbdOptions(ReportItemMessage): """ The cluster is not using SBD, cannot specify SBD options options -- list of specified not allowed SBD options node -- node name """ options: List[str] node: str _code = codes.SBD_NOT_USED_CANNOT_SET_SBD_OPTIONS @property def message(self) -> str: return ( "Cluster is not configured to use SBD, cannot specify SBD " "option(s) {options} for node '{node}'" ).format( options=format_list(self.options), node=self.node, ) @dataclass(frozen=True) class SbdWithDevicesNotUsedCannotSetDevice(ReportItemMessage): """ The cluster is not using SBD with devices, cannot specify a device. node -- node name """ node: str _code = codes.SBD_WITH_DEVICES_NOT_USED_CANNOT_SET_DEVICE @property def message(self) -> str: return ( "Cluster is not configured to use SBD with shared storage, cannot " f"specify SBD devices for node '{self.node}'" ) @dataclass(frozen=True) class SbdNoDeviceForNode(ReportItemMessage): """ No SBD device defined for a node when it should be node -- node name sbd_enabled_in_cluster -- additional context for displaying the error """ node: str sbd_enabled_in_cluster: bool = False _code = codes.SBD_NO_DEVICE_FOR_NODE @property def message(self) -> str: if self.sbd_enabled_in_cluster: return ( "Cluster uses SBD with shared storage so SBD devices must be " "specified for all nodes, no device specified for node " f"'{self.node}'" ) return f"No SBD device specified for node '{self.node}'" @dataclass(frozen=True) class SbdTooManyDevicesForNode(ReportItemMessage): """ More than allowed number of SBD devices specified for a node node -- node name device_list -- list of SND devices specified for the node max_devices -- maximum number of SBD devices """ node: str device_list: List[str] max_devices: int _code = codes.SBD_TOO_MANY_DEVICES_FOR_NODE @property def message(self) -> str: devices = format_list(self.device_list) return ( f"At most {self.max_devices} SBD devices can be specified for a " f"node, {devices} specified for node '{self.node}'" ) @dataclass(frozen=True) class SbdDevicePathNotAbsolute(ReportItemMessage): """ Path of SBD device is not absolute """ device: str node: str _code = codes.SBD_DEVICE_PATH_NOT_ABSOLUTE @property def message(self) -> str: return ( f"Device path '{self.device}' on node '{self.node}' is not absolute" ) @dataclass(frozen=True) class SbdDeviceDoesNotExist(ReportItemMessage): """ Specified device on node doesn't exist """ device: str node: str _code = codes.SBD_DEVICE_DOES_NOT_EXIST @property def message(self) -> str: return f"{self.node}: device '{self.device}' not found" @dataclass(frozen=True) class SbdDeviceIsNotBlockDevice(ReportItemMessage): """ Specified device on node is not block device """ device: str node: str _code = codes.SBD_DEVICE_IS_NOT_BLOCK_DEVICE @property def message(self) -> str: return f"{self.node}: device '{self.device}' is not a block device" # TODO: generalize @dataclass(frozen=True) class SbdNotInstalled(ReportItemMessage): """ SBD is not installed on specified node node -- node name """ node: str _code = codes.SBD_NOT_INSTALLED @property def message(self) -> str: return f"SBD is not installed on node '{self.node}'" @dataclass(frozen=True) class WatchdogNotFound(ReportItemMessage): """ Watchdog doesn't exist on specified node node -- node name watchdog -- watchdog device path """ node: str watchdog: str _code = codes.WATCHDOG_NOT_FOUND @property def message(self) -> str: return ( f"Watchdog '{self.watchdog}' does not exist on node '{self.node}'" ) @dataclass(frozen=True) class WatchdogInvalid(ReportItemMessage): """ Watchdog path is not absolut path watchdog -- watchdog device path """ watchdog: str _code = codes.WATCHDOG_INVALID @property def message(self) -> str: return f"Watchdog path '{self.watchdog}' is invalid." @dataclass(frozen=True) class UnableToGetSbdStatus(ReportItemMessage): """ There was (communication or parsing) failure during obtaining status of SBD from specified node node -- node name reason -- reason of failure """ node: str reason: str _code = codes.UNABLE_TO_GET_SBD_STATUS @property def message(self) -> str: return "Unable to get status of SBD from node '{node}'{reason}".format( node=self.node, reason=format_optional(self.reason, ": {}"), ) @dataclass(frozen=True) class ClusterRestartRequiredToApplyChanges(ReportItemMessage): """ Warn user a cluster needs to be manually restarted to use new configuration """ _code = codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES @property def message(self) -> str: return "Cluster restart is required in order to apply these changes." @dataclass(frozen=True) class CibAlertRecipientAlreadyExists(ReportItemMessage): """ Recipient with specified value already exists in alert with id 'alert_id' alert_id -- id of alert to which recipient belongs recipient_value -- value of recipient """ alert: str recipient: str _code = codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS @property def message(self) -> str: return ( f"Recipient '{self.recipient}' in alert '{self.alert}' " "already exists" ) @dataclass(frozen=True) class CibAlertRecipientValueInvalid(ReportItemMessage): """ Invalid recipient value. recipient -- recipient value """ recipient: str _code = codes.CIB_ALERT_RECIPIENT_VALUE_INVALID @property def message(self) -> str: return f"Recipient value '{self.recipient}' is not valid." @dataclass(frozen=True) class CibUpgradeSuccessful(ReportItemMessage): """ Upgrade of CIB schema was successful. """ _code = codes.CIB_UPGRADE_SUCCESSFUL @property def message(self) -> str: return "CIB has been upgraded to the latest schema version." @dataclass(frozen=True) class CibUpgradeFailed(ReportItemMessage): """ Upgrade of CIB schema failed. reason -- reason of failure """ reason: str _code = codes.CIB_UPGRADE_FAILED @property def message(self) -> str: return f"Upgrading of CIB to the latest schema failed: {self.reason}" @dataclass(frozen=True) class CibUpgradeFailedToMinimalRequiredVersion(ReportItemMessage): """ Unable to upgrade CIB to minimal required schema version. current_version -- current version of CIB schema required_version -- required version of CIB schema """ current_version: str required_version: str _code = codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION @property def message(self) -> str: return ( "Unable to upgrade CIB to required schema version" f" {self.required_version} or higher. Current version is" f" {self.current_version}. Newer version of pacemaker is needed." ) @dataclass(frozen=True) class FileAlreadyExists(ReportItemMessage): file_type_code: file_type_codes.FileTypeCode file_path: str node: str = "" _code = codes.FILE_ALREADY_EXISTS @property def message(self) -> str: return "{node}{file_role} file '{file_path}' already exists".format( file_path=self.file_path, node=format_optional(self.node, NODE_PREFIX), file_role=_format_file_role(self.file_type_code), ) @dataclass(frozen=True) class FileIoError(ReportItemMessage): """ Unable to work with a file file_type_code -- file type, item of pcs.common.file_type_codes operation -- failed action, item of pcs.common.file.RawFileError reason -- an error message file_path -- file path, optional for cases when unknown (GhostFiles) """ file_type_code: file_type_codes.FileTypeCode operation: str reason: str file_path: str = "" _code = codes.FILE_IO_ERROR @property def message(self) -> str: return "Unable to {action} {file_role}{file_path}: {reason}".format( reason=self.reason, action=_format_file_action(self.operation), file_path=format_optional(self.file_path, " '{0}'"), file_role=_format_file_role(self.file_type_code), ) # TODO: not used? should be removed? @dataclass(frozen=True) class UnableToDetermineUserUid(ReportItemMessage): user: str _code = codes.UNABLE_TO_DETERMINE_USER_UID @property def message(self) -> str: return f"Unable to determine uid of user '{self.user}'" # TODO: not used? should be removed? @dataclass(frozen=True) class UnableToDetermineGroupGid(ReportItemMessage): group: str _code = codes.UNABLE_TO_DETERMINE_GROUP_GID @property def message(self) -> str: return f"Unable to determine gid of group '{self.group}'" @dataclass(frozen=True) class UnsupportedOperationOnNonSystemdSystems(ReportItemMessage): _code = codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS @property def message(self) -> str: return "unsupported operation on non systemd systems" @dataclass(frozen=True) class LiveEnvironmentRequired(ReportItemMessage): """ The command cannot operate in a non-live cluster (mocked / ghost files) forbidden_options -- list of items forbidden in the command """ forbidden_options: List[file_type_codes.FileTypeCode] _code = codes.LIVE_ENVIRONMENT_REQUIRED @property def message(self) -> str: return "This command does not support {forbidden_options}".format( forbidden_options=format_list( [str(item) for item in self.forbidden_options] ), ) @dataclass(frozen=True) class LiveEnvironmentRequiredForLocalNode(ReportItemMessage): """ The operation cannot be performed on CIB in file (not live cluster) if no node name is specified i.e. working with the local node """ _code = codes.LIVE_ENVIRONMENT_REQUIRED_FOR_LOCAL_NODE @property def message(self) -> str: return "Node(s) must be specified if mocked CIB is used" @dataclass(frozen=True) class LiveEnvironmentNotConsistent(ReportItemMessage): """ The command cannot operate with mixed live / non-live cluster configs mocked_files -- given mocked files (pcs.common.file_type_codes) required_files -- files that must be mocked as well """ mocked_files: List[file_type_codes.FileTypeCode] required_files: List[file_type_codes.FileTypeCode] _code = codes.LIVE_ENVIRONMENT_NOT_CONSISTENT @property def message(self) -> str: return ( "When {given} {_is} specified, {missing} must be specified as well" ).format( given=format_list([str(item) for item in self.mocked_files]), _is=format_plural(self.mocked_files, "is"), missing=format_list([str(item) for item in self.required_files]), ) @dataclass(frozen=True) class CorosyncNodeConflictCheckSkipped(ReportItemMessage): """ A command has been run with -f, can't check corosync.conf for node conflicts reason_type -- why was the action skipped (unreachable, not_live_cib) """ reason_type: types.ReasonType _code = codes.COROSYNC_NODE_CONFLICT_CHECK_SKIPPED @property def message(self) -> str: return ( "Unable to check if there is a conflict with nodes set in corosync " "because {reason}" ).format(reason=_skip_reason_to_string(self.reason_type)) @dataclass(frozen=True) class CorosyncQuorumAtbCannotBeDisabledDueToSbd(ReportItemMessage): """ Quorum option auto_tie_breaker cannot be disabled due to SBD. """ _code = codes.COROSYNC_QUORUM_ATB_CANNOT_BE_DISABLED_DUE_TO_SBD @property def message(self) -> str: return ( "Unable to disable auto_tie_breaker, SBD fencing would have no " "effect" ) @dataclass(frozen=True) class CorosyncQuorumAtbWillBeEnabledDueToSbd(ReportItemMessage): """ Quorum option auto_tie_breaker will be enabled due to a user action in order to make SBD fencing effective. The cluster has to be stopped to make this change. """ _code = codes.COROSYNC_QUORUM_ATB_WILL_BE_ENABLED_DUE_TO_SBD @property def message(self) -> str: return ( "auto_tie_breaker quorum option will be enabled to make SBD " "fencing effective. Cluster has to be offline to be able to make " "this change." ) @dataclass(frozen=True) class CibAclRoleIsAlreadyAssignedToTarget(ReportItemMessage): """ Error that ACL target or group has already assigned role. """ role_id: str target_id: str _code = codes.CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET @property def message(self) -> str: return ( f"Role '{self.role_id}' is already assigned to '{self.target_id}'" ) @dataclass(frozen=True) class CibAclRoleIsNotAssignedToTarget(ReportItemMessage): """ Error that acl role is not assigned to target or group """ role_id: str target_id: str _code = codes.CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET @property def message(self) -> str: return f"Role '{self.role_id}' is not assigned to '{self.target_id}'" @dataclass(frozen=True) class CibAclTargetAlreadyExists(ReportItemMessage): """ Error that target with specified id aleready axists in configuration. """ target_id: str _code = codes.CIB_ACL_TARGET_ALREADY_EXISTS @property def message(self) -> str: return f"'{self.target_id}' already exists" @dataclass(frozen=True) class CibFencingLevelAlreadyExists(ReportItemMessage): """ Fencing level already exists, it cannot be created """ level: str target_type: str target_value: Optional[Tuple[str, str]] devices: List[str] _code = codes.CIB_FENCING_LEVEL_ALREADY_EXISTS @property def message(self) -> str: return ( "Fencing level for '{target}' at level '{level}' " "with device(s) {device_list} already exists" ).format( level=self.level, device_list=format_list(self.devices), target=_format_fencing_level_target( self.target_type, self.target_value ), ) @dataclass(frozen=True) class CibFencingLevelDoesNotExist(ReportItemMessage): """ Fencing level does not exist, it cannot be updated or deleted """ level: str = "" target_type: Optional[str] = None target_value: Optional[Tuple[str, str]] = None devices: List[str] = field(default_factory=list) _code = codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST @property def message(self) -> str: return ( "Fencing level {part_target}{part_level}{part_devices}does not " "exist" ).format( part_target=( "for '{0}' ".format( _format_fencing_level_target( self.target_type, self.target_value ) ) if self.target_type and self.target_value else "" ), part_level=format_optional(self.level, "at level '{}' "), part_devices=format_optional( format_list(self.devices), "with device(s) {0} " ), ) @dataclass(frozen=True) class UseCommandNodeAddRemote(ReportItemMessage): """ Advise the user for more appropriate command. """ _code = codes.USE_COMMAND_NODE_ADD_REMOTE @property def message(self) -> str: return "this command is not sufficient for creating a remote connection" @dataclass(frozen=True) class UseCommandNodeAddGuest(ReportItemMessage): """ Advise the user for more appropriate command. """ _code = codes.USE_COMMAND_NODE_ADD_GUEST @property def message(self) -> str: return "this command is not sufficient for creating a guest node" @dataclass(frozen=True) class UseCommandNodeRemoveGuest(ReportItemMessage): """ Advise the user for more appropriate command. """ _code = codes.USE_COMMAND_NODE_REMOVE_GUEST @property def message(self) -> str: return "this command is not sufficient for removing a guest node" @dataclass(frozen=True) class TmpFileWrite(ReportItemMessage): """ It has been written into a temporary file file_path -- the file path content -- content which has been written """ file_path: str content: str _code = codes.TMP_FILE_WRITE @property def message(self) -> str: return ( f"Writing to a temporary file {self.file_path}:\n" f"--Debug Content Start--\n{self.content}\n--Debug Content End--\n" ) @dataclass(frozen=True) class NodeAddressesUnresolvable(ReportItemMessage): """ Unable to resolve addresses of cluster nodes to be added address_list -- a list of unresolvable addresses """ address_list: List[str] _code = codes.NODE_ADDRESSES_UNRESOLVABLE @property def message(self) -> str: addrs = format_list(self.address_list) return f"Unable to resolve addresses: {addrs}" @dataclass(frozen=True) class UnableToPerformOperationOnAnyNode(ReportItemMessage): """ This report is raised whenever pcs.lib.communication.tools.OneByOneStrategyMixin strategy mixin is used for network communication and operation failed on all available hosts and because of this it is not possible to continue. """ _code = codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE @property def message(self) -> str: return ( "Unable to perform operation on any available node/host, therefore " "it is not possible to continue" ) @dataclass(frozen=True) class HostNotFound(ReportItemMessage): """ Hosts with names in host_list are not included in pcs known hosts, therefore it is not possible to communicate with them. """ host_list: List[str] _code = codes.HOST_NOT_FOUND @property def message(self) -> str: pluralize = lambda word: format_plural(self.host_list, word) return "{host} {hosts_comma} {_is} not known to pcs".format( host=pluralize("host"), hosts_comma=format_list(self.host_list), _is=pluralize("is"), ).capitalize() @dataclass(frozen=True) class NoneHostFound(ReportItemMessage): _code = codes.NONE_HOST_FOUND @property def message(self) -> str: return "None of hosts is known to pcs." @dataclass(frozen=True) class HostAlreadyAuthorized(ReportItemMessage): host_name: str _code = codes.HOST_ALREADY_AUTHORIZED @property def message(self) -> str: return f"{self.host_name}: Already authorized" @dataclass(frozen=True) class ClusterDestroyStarted(ReportItemMessage): host_name_list: List[str] _code = codes.CLUSTER_DESTROY_STARTED @property def message(self) -> str: hosts = format_list(self.host_name_list) return f"Destroying cluster on hosts: {hosts}..." @dataclass(frozen=True) class ClusterDestroySuccess(ReportItemMessage): node: str _code = codes.CLUSTER_DESTROY_SUCCESS @property def message(self) -> str: return f"{self.node}: Successfully destroyed cluster" @dataclass(frozen=True) class ClusterEnableStarted(ReportItemMessage): host_name_list: List[str] _code = codes.CLUSTER_ENABLE_STARTED @property def message(self) -> str: hosts = format_list(self.host_name_list) return f"Enabling cluster on hosts: {hosts}..." @dataclass(frozen=True) class ClusterEnableSuccess(ReportItemMessage): node: str _code = codes.CLUSTER_ENABLE_SUCCESS @property def message(self) -> str: return f"{self.node}: Cluster enabled" @dataclass(frozen=True) class ClusterStartStarted(ReportItemMessage): host_name_list: List[str] _code = codes.CLUSTER_START_STARTED @property def message(self) -> str: hosts = format_list(self.host_name_list) return f"Starting cluster on hosts: {hosts}..." @dataclass(frozen=True) class ClusterStartSuccess(ReportItemMessage): node: str _code = codes.CLUSTER_START_SUCCESS @property def message(self) -> str: return f"{self.node}: Cluster started" @dataclass(frozen=True) class ServiceNotInstalled(ReportItemMessage): node: str service_list: List[str] _code = codes.SERVICE_NOT_INSTALLED @property def message(self) -> str: services = format_list(self.service_list) return ( f"{self.node}: Required cluster services not installed: {services}" ) @dataclass(frozen=True) class HostAlreadyInClusterConfig(ReportItemMessage): """ A host, which is being added to a cluster, already has cluster configs host_name -- a name of the host which is in a cluster already """ host_name: str _code = codes.HOST_ALREADY_IN_CLUSTER_CONFIG @property def message(self) -> str: return ( f"{self.host_name}: The host seems to be in a cluster already as " "cluster configuration files have been found on the host" ) @dataclass(frozen=True) class HostAlreadyInClusterServices(ReportItemMessage): """ A host, which is being added to a cluster, already runs cluster daemons host_name -- a name of the host which is in a cluster already service_list -- list of cluster daemons running on the host """ host_name: str service_list: List[str] _code = codes.HOST_ALREADY_IN_CLUSTER_SERVICES @property def message(self) -> str: services = format_list(self.service_list) services_plural = format_plural(self.service_list, "service") are_plural = format_plural(self.service_list, "is") return ( f"{self.host_name}: The host seems to be in a cluster already as " f"the following {services_plural} {are_plural} found to be " f"running: {services}. If the host is not part of a cluster, stop " f"the {services_plural} and retry" ) @dataclass(frozen=True) class ServiceVersionMismatch(ReportItemMessage): service: str hosts_version: Mapping[str, str] _code = codes.SERVICE_VERSION_MISMATCH @property def message(self) -> str: version_host: Dict[str, List[str]] = defaultdict(list) for host_name, version in self.hosts_version.items(): version_host[version].append(host_name) parts = [ "Hosts do not have the same version of '{}'".format(self.service) ] # List most common versions first. for version, hosts in sorted( version_host.items(), key=lambda pair: len(pair[1]), reverse=True ): # pylint: disable=cell-var-from-loop pluralize = lambda word: format_plural(hosts, word) parts.append( "{host} {hosts} {has} version {version}".format( host=pluralize("host"), hosts=format_list(hosts), has=pluralize("has"), version=version, ) ) return "; ".join(parts) @dataclass(frozen=True) class WaitForNodeStartupStarted(ReportItemMessage): node_name_list: List[str] _code = codes.WAIT_FOR_NODE_STARTUP_STARTED @property def message(self) -> str: nodes = format_list(self.node_name_list) return f"Waiting for node(s) to start: {nodes}..." @dataclass(frozen=True) class WaitForNodeStartupTimedOut(ReportItemMessage): _code = codes.WAIT_FOR_NODE_STARTUP_TIMED_OUT @property def message(self) -> str: return "Node(s) startup timed out" @dataclass(frozen=True) class WaitForNodeStartupError(ReportItemMessage): _code = codes.WAIT_FOR_NODE_STARTUP_ERROR @property def message(self) -> str: return "Unable to verify all nodes have started" @dataclass(frozen=True) class WaitForNodeStartupWithoutStart(ReportItemMessage): """ User requested waiting for nodes to start without instructing pcs to start the nodes """ _code = codes.WAIT_FOR_NODE_STARTUP_WITHOUT_START @property def message(self) -> str: return "Cannot specify 'wait' without specifying 'start'" @dataclass(frozen=True) class PcsdVersionTooOld(ReportItemMessage): node: str _code = codes.PCSD_VERSION_TOO_OLD @property def message(self) -> str: return ( f"{self.node}: Old version of pcsd is running on the node, " "therefore it is unable to perform the action" ) @dataclass(frozen=True) class PcsdSslCertAndKeyDistributionStarted(ReportItemMessage): """ We are about to distribute pcsd SSL certificate and key to nodes node_name_list -- node names to distribute to """ node_name_list: List[str] _code = codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED @property def message(self) -> str: nodes = format_list(self.node_name_list) return f"Synchronizing pcsd SSL certificates on node(s) {nodes}..." @dataclass(frozen=True) class PcsdSslCertAndKeySetSuccess(ReportItemMessage): """ Pcsd SSL certificate and key have been succesfuly saved on a node node -- node name """ node: str _code = codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS @property def message(self) -> str: return f"{self.node}: Success" @dataclass(frozen=True) class ClusterWillBeDestroyed(ReportItemMessage): """ If the user continues with force, cluster will be destroyed on some hosts """ _code = codes.CLUSTER_WILL_BE_DESTROYED @property def message(self) -> str: return ( "Some nodes are already in a cluster. Enforcing this will destroy " "existing cluster on those nodes. You should remove the nodes from " "their clusters instead to keep the clusters working properly" ) @dataclass(frozen=True) class ClusterSetupSuccess(ReportItemMessage): _code = codes.CLUSTER_SETUP_SUCCESS @property def message(self) -> str: return "Cluster has been successfully set up." @dataclass(frozen=True) class UsingDefaultAddressForHost(ReportItemMessage): """ When no address was specified for a host, a default address was used for it """ host_name: str address: str address_source: types.DefaultAddressSource _code = codes.USING_DEFAULT_ADDRESS_FOR_HOST @property def message(self) -> str: return ( f"No addresses specified for host '{self.host_name}', using " f"'{self.address}'" ) @dataclass(frozen=True) class ResourceInBundleNotAccessible(ReportItemMessage): bundle_id: str inner_resource_id: str _code = codes.RESOURCE_IN_BUNDLE_NOT_ACCESSIBLE @property def message(self) -> str: return ( f"Resource '{self.inner_resource_id}' will not be accessible by " f"the cluster inside bundle '{self.bundle_id}', at least one of " "bundle options 'control-port' or 'ip-range-start' has to be " "specified" ) @dataclass(frozen=True) class UsingDefaultWatchdog(ReportItemMessage): """ No watchdog has been specified for the node, therefore pcs will use a default watchdog. """ watchdog: str node: str _code = codes.USING_DEFAULT_WATCHDOG @property def message(self) -> str: return ( f"No watchdog has been specified for node '{self.node}'. Using " f"default watchdog '{self.watchdog}'" ) @dataclass(frozen=True) class CannotRemoveAllClusterNodes(ReportItemMessage): """ It is not possible to remove all cluster nodes using 'pcs cluster node remove' command. 'pcs cluster destroy --all' should be used in such case. """ _code = codes.CANNOT_REMOVE_ALL_CLUSTER_NODES @property def message(self) -> str: return "No nodes would be left in the cluster" @dataclass(frozen=True) class UnableToConnectToAnyRemainingNode(ReportItemMessage): _code = codes.UNABLE_TO_CONNECT_TO_ANY_REMAINING_NODE @property def message(self) -> str: return "Unable to connect to any remaining cluster node" @dataclass(frozen=True) class UnableToConnectToAllRemainingNodes(ReportItemMessage): """ Some of remaining cluster nodes are unreachable. 'pcs cluster sync' should be executed on now online nodes when the offline nodes come back online. node_list -- names of nodes which are staying in the cluster and are currently unreachable """ node_list: List[str] _code = codes.UNABLE_TO_CONNECT_TO_ALL_REMAINING_NODE @property def message(self) -> str: return ("Remaining cluster {node} {nodes} could not be reached").format( node=format_plural(self.node_list, "node"), nodes=format_list(self.node_list), ) @dataclass(frozen=True) class NodesToRemoveUnreachable(ReportItemMessage): """ Nodes which should be removed are currently unreachable. 'pcs cluster destroy' should be executed on these nodes when they come back online. node_list -- names of nodes which are being removed from the cluster but they are currently unreachable """ node_list: List[str] _code = codes.NODES_TO_REMOVE_UNREACHABLE @property def message(self) -> str: return ( "Removed {node} {nodes} could not be reached and subsequently " "deconfigured" ).format( node=format_plural(self.node_list, "node"), nodes=format_list(self.node_list), ) @dataclass(frozen=True) class NodeUsedAsTieBreaker(ReportItemMessage): """ Node which should be removed is currently used as a tie breaker for a qdevice, therefore it is not possible to remove it from the cluster. node -- node name node_id -- node id """ node: str node_id: int _code = codes.NODE_USED_AS_TIE_BREAKER @property def message(self) -> str: return ( f"Node '{self.node}' with id '{self.node_id}' is used as a tie " "breaker for a qdevice" ) @dataclass(frozen=True) class CorosyncQuorumWillBeLost(ReportItemMessage): """ Ongoing action will cause loss of the quorum in the cluster. """ _code = codes.COROSYNC_QUORUM_WILL_BE_LOST @property def message(self) -> str: return "This action will cause a loss of the quorum" @dataclass(frozen=True) class CorosyncQuorumLossUnableToCheck(ReportItemMessage): """ It is not possible to check if ongoing action will cause loss of the quorum """ _code = codes.COROSYNC_QUORUM_LOSS_UNABLE_TO_CHECK @property def message(self) -> str: return ( "Unable to determine whether this action will cause a loss of the " "quorum" ) @dataclass(frozen=True) class SbdListWatchdogError(ReportItemMessage): """ Unable to get list of available watchdogs from sbd. Sbd cmd reutrned non 0. reason -- stderr of command """ reason: str _code = codes.SBD_LIST_WATCHDOG_ERROR @property def message(self) -> str: return f"Unable to query available watchdogs from sbd: {self.reason}" @dataclass(frozen=True) class SbdWatchdogNotSupported(ReportItemMessage): """ Specified watchdog is not supported in sbd (softdog?). node -- node name watchdog -- watchdog path """ node: str watchdog: str _code = codes.SBD_WATCHDOG_NOT_SUPPORTED @property def message(self) -> str: return ( f"{self.node}: Watchdog '{self.watchdog}' is not supported (it " "may be a software watchdog)" ) @dataclass(frozen=True) class SbdWatchdogValidationInactive(ReportItemMessage): """ Warning message about not validating watchdog. """ _code = codes.SBD_WATCHDOG_VALIDATION_INACTIVE @property def message(self) -> str: return "Not validating the watchdog" @dataclass(frozen=True) class SbdWatchdogTestError(ReportItemMessage): """ Sbd test watchdog exited with an error. """ reason: str _code = codes.SBD_WATCHDOG_TEST_ERROR @property def message(self) -> str: return f"Unable to initialize test of the watchdog: {self.reason}" @dataclass(frozen=True) class SbdWatchdogTestMultipleDevices(ReportItemMessage): """ No watchdog device has been specified for test. Because of multiple available watchdogs, watchdog device to test has to be specified. """ _code = codes.SBD_WATCHDOG_TEST_MULTIPLE_DEVICES @property def message(self) -> str: return ( "Multiple watchdog devices available, therefore, watchdog which " "should be tested has to be specified." ) @dataclass(frozen=True) class SbdWatchdogTestFailed(ReportItemMessage): """ System has not been reset. """ _code = codes.SBD_WATCHDOG_TEST_FAILED @property def message(self) -> str: return "System should have been reset already" @dataclass(frozen=True) class SystemWillReset(ReportItemMessage): _code = codes.SYSTEM_WILL_RESET @property def message(self) -> str: return "System will reset shortly" @dataclass(frozen=True) class ResourceBundleUnsupportedContainerType(ReportItemMessage): bundle_id: str supported_container_types: List[str] _code = codes.RESOURCE_BUNDLE_UNSUPPORTED_CONTAINER_TYPE @property def message(self) -> str: container_types = format_list(self.supported_container_types) return ( f"Bundle '{self.bundle_id}' uses unsupported container type, " "therefore it is not possible to set its container options. " f"Supported container types are: {container_types}" ) @dataclass(frozen=True) class FenceHistoryCommandError(ReportItemMessage): """ Pacemaker command for working with fence history returned an error reason -- output of the pacemaker command command -- the action of the command - what it should have achieved """ reason: str command: types.FenceHistoryCommandType _code = codes.FENCE_HISTORY_COMMAND_ERROR @property def message(self) -> str: command_label = { const.FENCE_HISTORY_COMMAND_CLEANUP: "cleanup", const.FENCE_HISTORY_COMMAND_SHOW: "show", const.FENCE_HISTORY_COMMAND_UPDATE: "update", }.get(self.command, self.command) return f"Unable to {command_label} fence history: {self.reason}" @dataclass(frozen=True) class FenceHistoryNotSupported(ReportItemMessage): """ Pacemaker does not support the fence history feature """ _code = codes.FENCE_HISTORY_NOT_SUPPORTED @property def message(self) -> str: return "Fence history is not supported, please upgrade pacemaker" @dataclass(frozen=True) class ResourceInstanceAttrValueNotUnique(ReportItemMessage): """ Value of a resource instance attribute is not unique in the configuration when creating/updating a resource instance_attr_name -- name of attr which should be unique instance_attr_value -- value which is already used by some resources agent_name -- resource agent name of resource resource_id_list -- resource ids which already have the instance_attr_name set to instance_attr_value """ instance_attr_name: str instance_attr_value: str agent_name: str resource_id_list: List[str] _code = codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE @property def message(self) -> str: return ( "Value '{val}' of option '{attr}' is not unique across " "'{agent}' resources. Following resources are configured " "with the same value of the instance attribute: {res_id_list}" ).format( val=self.instance_attr_value, attr=self.instance_attr_name, agent=self.agent_name, res_id_list=format_list(self.resource_id_list), ) @dataclass(frozen=True) class CannotLeaveGroupEmptyAfterMove(ReportItemMessage): """ User is trying to add resources to another group and their old group would be left empty and need to be deleted. Deletion is not yet migrated to lib. str group_id -- ID of original group that would be deleted list inner_resource_ids -- List of group members """ group_id: str inner_resource_ids: List[str] _code = codes.CANNOT_LEAVE_GROUP_EMPTY_AFTER_MOVE @property def message(self) -> str: return ( "Unable to move {resource_pl} {resource_list} as it would leave " "group '{group_id}' empty." ).format( resource_pl=format_plural(self.inner_resource_ids, "resource"), resource_list=format_list(self.inner_resource_ids), group_id=self.group_id, ) @dataclass(frozen=True) class CannotMoveResourceBundle(ReportItemMessage): """ User is trying to move a bundle resource which is not possible resource_id -- id of the resource to be moved """ resource_id: str _code = codes.CANNOT_MOVE_RESOURCE_BUNDLE @property def message(self) -> str: return "cannot move bundle resources" @dataclass(frozen=True) class CannotMoveResourceClone(ReportItemMessage): """ User is trying to move a clone resource which is not possible resource_id -- id of the resource to be moved """ resource_id: str _code = codes.CANNOT_MOVE_RESOURCE_CLONE @property def message(self) -> str: return "cannot move cloned resources" @dataclass(frozen=True) class CannotMoveResourcePromotableInner(ReportItemMessage): """ User is trying to move a promotable clone's inner resource resource_id -- id of the resource to be moved promotable_id -- id of relevant parent promotable resource """ resource_id: str promotable_id: str _code = codes.CANNOT_MOVE_RESOURCE_PROMOTABLE_INNER @property def message(self) -> str: return ( "to move promotable clone resources you must use the " f"promotable clone id ({self.promotable_id})" ) @dataclass(frozen=True) class CannotMoveResourceMasterResourceNotPromotable(ReportItemMessage): """ User is trying to move a non-promotable resource and limit it to master role resource_id -- id of the resource to be moved promotable_id -- id of relevant parent promotable resource """ resource_id: str promotable_id: str = "" _code = codes.CANNOT_MOVE_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE @property def message(self) -> str: return _resource_move_ban_clear_master_resource_not_promotable( self.promotable_id ) @dataclass(frozen=True) class CannotMoveResourceStoppedNoNodeSpecified(ReportItemMessage): """ When moving a stopped resource, a node to move it to must be specified resource_id -- id of the resource to be moved """ resource_id: str _code = codes.CANNOT_MOVE_RESOURCE_STOPPED_NO_NODE_SPECIFIED @property def message(self) -> str: # Use both "moving" and "banning" to let user know using "ban" instead # of "move" will not help return "You must specify a node when moving/banning a stopped resource" @dataclass(frozen=True) class ResourceMovePcmkError(ReportItemMessage): """ crm_resource exited with an error when moving a resource resource_id -- id of the resource to be moved stdout -- stdout of crm_resource stderr -- stderr of crm_resource """ resource_id: str stdout: str stderr: str _code = codes.RESOURCE_MOVE_PCMK_ERROR @property def message(self) -> str: return _stdout_stderr_to_string( self.stdout, self.stderr, prefix=f"cannot move resource '{self.resource_id}'", ) @dataclass(frozen=True) class ResourceMovePcmkSuccess(ReportItemMessage): """ crm_resource exited successfully when moving a resource resource_id -- id of the resource to be moved stdout -- stdout of crm_resource stderr -- stderr of crm_resource """ resource_id: str stdout: str stderr: str _code = codes.RESOURCE_MOVE_PCMK_SUCCESS @property def message(self) -> str: return _resource_move_ban_pcmk_success(self.stdout, self.stderr) @dataclass(frozen=True) class CannotBanResourceMasterResourceNotPromotable(ReportItemMessage): """ User is trying to ban a non-promotable resource and limit it to master role resource_id -- id of the resource to be banned promotable_id -- id of relevant parent promotable resource """ resource_id: str promotable_id: str = "" _code = codes.CANNOT_BAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE @property def message(self) -> str: return _resource_move_ban_clear_master_resource_not_promotable( self.promotable_id ) @dataclass(frozen=True) class CannotBanResourceStoppedNoNodeSpecified(ReportItemMessage): """ When banning a stopped resource, a node to ban it on must be specified resource_id -- id of the resource to be banned """ resource_id: str _code = codes.CANNOT_BAN_RESOURCE_STOPPED_NO_NODE_SPECIFIED @property def message(self) -> str: # Use both "moving" and "banning" to let user know using "move" instead # of "ban" will not help return "You must specify a node when moving/banning a stopped resource" @dataclass(frozen=True) class ResourceBanPcmkError(ReportItemMessage): """ crm_resource exited with an error when banning a resource resource_id -- id of the resource to be banned stdout -- stdout of crm_resource stderr -- stderr of crm_resource """ resource_id: str stdout: str stderr: str _code = codes.RESOURCE_BAN_PCMK_ERROR @property def message(self) -> str: # Pacemaker no longer prints crm_resource specific options since commit # 8008a5f0c0aa728fbce25f60069d622d0bcbbc9f. There is no need to # translate them or anything else anymore. return _stdout_stderr_to_string( self.stdout, self.stderr, prefix=f"cannot ban resource '{self.resource_id}'", ) @dataclass(frozen=True) class ResourceBanPcmkSuccess(ReportItemMessage): """ crm_resource exited successfully when banning a resource resource_id -- id of the resource to be banned stdout -- stdout of crm_resource stderr -- stderr of crm_resource """ resource_id: str stdout: str stderr: str _code = codes.RESOURCE_BAN_PCMK_SUCCESS @property def message(self) -> str: return _resource_move_ban_pcmk_success(self.stdout, self.stderr) @dataclass(frozen=True) class CannotUnmoveUnbanResourceMasterResourceNotPromotable(ReportItemMessage): """ User is trying to unmove/unban master of a non-promotable resource resource_id -- id of the resource to be unmoved/unbanned promotable_id -- id of relevant parent promotable resource """ resource_id: str promotable_id: str = "" _code = codes.CANNOT_UNMOVE_UNBAN_RESOURCE_MASTER_RESOURCE_NOT_PROMOTABLE @property def message(self) -> str: return _resource_move_ban_clear_master_resource_not_promotable( self.promotable_id ) @dataclass(frozen=True) class ResourceUnmoveUnbanPcmkExpiredNotSupported(ReportItemMessage): """ crm_resource does not support --expired when unmoving/unbanning a resource """ _code = codes.RESOURCE_UNMOVE_UNBAN_PCMK_EXPIRED_NOT_SUPPORTED @property def message(self) -> str: return "expired is not supported, please upgrade pacemaker" @dataclass(frozen=True) class ResourceUnmoveUnbanPcmkError(ReportItemMessage): """ crm_resource exited with an error when unmoving/unbanning a resource resource_id -- id of the resource to be unmoved/unbanned stdout -- stdout of crm_resource stderr -- stderr of crm_resource """ resource_id: str stdout: str stderr: str _code = codes.RESOURCE_UNMOVE_UNBAN_PCMK_ERROR @property def message(self) -> str: return _stdout_stderr_to_string( self.stdout, self.stderr, prefix=f"cannot clear resource '{self.resource_id}'", ) @dataclass(frozen=True) class ResourceUnmoveUnbanPcmkSuccess(ReportItemMessage): """ crm_resource exited successfully when clearing unmoving/unbanning a resource resource_id -- id of the resource to be unmoved/unbanned stdout -- stdout of crm_resource stderr -- stderr of crm_resource """ resource_id: str stdout: str stderr: str _code = codes.RESOURCE_UNMOVE_UNBAN_PCMK_SUCCESS @property def message(self) -> str: return _stdout_stderr_to_string(self.stdout, self.stderr) @dataclass(frozen=True) class ResourceMoveConstraintCreated(ReportItemMessage): """ A constraint to move resource has been created. resource_id -- id of the resource to be moved """ resource_id: str _code = codes.RESOURCE_MOVE_CONSTRAINT_CREATED @property def message(self) -> str: return ( f"Location constraint to move resource '{self.resource_id}' has " "been created" ) @dataclass(frozen=True) class ResourceMoveConstraintRemoved(ReportItemMessage): """ A constraint to move resource has been removed. resource_id -- id of the resource to be moved """ resource_id: str _code = codes.RESOURCE_MOVE_CONSTRAINT_REMOVED @property def message(self) -> str: return ( f"Location constraint created to move resource " f"'{self.resource_id}' has been removed" ) @dataclass(frozen=True) class ResourceMoveAffectsOtherResources(ReportItemMessage): """ Moving a resource will also affect other resources. resource_id -- id of the resource to be moved affected_resources -- resources affected by the move operation """ resource_id: str affected_resources: List[str] _code = codes.RESOURCE_MOVE_AFFECTS_OTRHER_RESOURCES @property def message(self) -> str: return ( "Moving resource '{resource_id}' affects {resource_pl}: " "{affected_resources}" ).format( resource_id=self.resource_id, resource_pl=format_plural(self.affected_resources, "resource"), affected_resources=format_list(self.affected_resources), ) @dataclass(frozen=True) class ResourceMoveAutocleanSimulationFailure(ReportItemMessage): """ Autocleaning a constraint used for moving the resource would cause moving the resource itself or other resources. resource_id -- id of the resource to be moved others_affected -- True if also other resource would be affected, False otherwise """ resource_id: str others_affected: bool _code = codes.RESOURCE_MOVE_AUTOCLEAN_SIMULATION_FAILURE @property def message(self) -> str: return ( "Unable to ensure that moved resource '{resource_id}'{others} will " "stay on the same node after a constraint used for moving it is " "removed." ).format( resource_id=self.resource_id, others=" or other resources" if self.others_affected else "", ) @dataclass(frozen=True) class ParseErrorJsonFile(ReportItemMessage): # pylint: disable=too-many-instance-attributes """ Unable to parse a file with JSON data file_type_code -- item from pcs.common.file_type_codes line_number -- the line where parsing failed column_number -- the column where parsing failed position -- the start index of the file where parsing failed reason -- the unformatted error message full_msg -- full error message including above int attributes file_path -- path to the parsed file if available """ file_type_code: file_type_codes.FileTypeCode line_number: int column_number: int position: int reason: str full_msg: str file_path: str _code = codes.PARSE_ERROR_JSON_FILE @property def message(self) -> str: return ( "Unable to parse {_file_type} file{_file_path}: {full_msg}" ).format( _file_path=format_optional(self.file_path, " '{}'"), _file_type=_format_file_role(self.file_type_code), full_msg=self.full_msg, ) @dataclass(frozen=True) class ResourceDisableAffectsOtherResources(ReportItemMessage): """ User requested disabling resources without affecting other resources but some resources would be affected disabled_resource_list -- list of resources to disable affected_resource_list -- other affected resources """ disabled_resource_list: List[str] affected_resource_list: List[str] _code = codes.RESOURCE_DISABLE_AFFECTS_OTHER_RESOURCES @property def message(self) -> str: return ( "Disabling specified {disabled_resource_pl} would have an effect " "on {this_pl} {affected_resource_pl}: " "{affected_resource_list}".format( disabled_resource_pl=format_plural( self.disabled_resource_list, "resource" ), this_pl=format_plural( self.affected_resource_list, "this", "these" ), affected_resource_pl=format_plural( self.affected_resource_list, "resource" ), affected_resource_list=format_list(self.affected_resource_list), ) ) @dataclass(frozen=True) class DrConfigAlreadyExist(ReportItemMessage): """ Disaster recovery config exists when the opposite was expected """ _code = codes.DR_CONFIG_ALREADY_EXIST @property def message(self) -> str: return "Disaster-recovery already configured" @dataclass(frozen=True) class DrConfigDoesNotExist(ReportItemMessage): """ Disaster recovery config does not exist when the opposite was expected """ _code = codes.DR_CONFIG_DOES_NOT_EXIST @property def message(self) -> str: return "Disaster-recovery is not configured" @dataclass(frozen=True) class NodeInLocalCluster(ReportItemMessage): """ Node is part of local cluster and it cannot be used for example to set up disaster-recovery site node -- node which is part of local cluster """ node: str _code = codes.NODE_IN_LOCAL_CLUSTER @property def message(self) -> str: return f"Node '{self.node}' is part of local cluster" @dataclass(frozen=True) class BoothLackOfSites(ReportItemMessage): """ Less than 2 booth sites entered. But it does not make sense. sites -- contains currently entered sites """ sites: List[str] _code = codes.BOOTH_LACK_OF_SITES @property def message(self) -> str: sites = format_list(self.sites) if self.sites else "missing" return ( "lack of sites for booth configuration (need 2 at least): sites " f"{sites}" ) @dataclass(frozen=True) class BoothEvenPeersNumber(ReportItemMessage): """ Booth requires odd number of peers. But even number of peers was entered. number -- determines how many peers was entered """ number: int _code = codes.BOOTH_EVEN_PEERS_NUM @property def message(self) -> str: return f"odd number of peers is required (entered {self.number} peers)" @dataclass(frozen=True) class BoothAddressDuplication(ReportItemMessage): """ Address of each peer must be unique. But address duplication appeared. duplicate_addresses -- contains addreses entered multiple times """ duplicate_addresses: List[str] _code = codes.BOOTH_ADDRESS_DUPLICATION @property def message(self) -> str: addresses = format_list(self.duplicate_addresses) return f"duplicate address for booth configuration: {addresses}" @dataclass(frozen=True) class BoothConfigUnexpectedLines(ReportItemMessage): """ Lines not conforming to expected config structure found in a booth config line_list -- not valid lines file_path -- path to the conf file if available """ line_list: List[str] file_path: str = "" _code = codes.BOOTH_CONFIG_UNEXPECTED_LINES @property def message(self) -> str: return "unexpected {line_pl} in booth config{path}:\n{lines}".format( line_pl=format_plural(self.line_list, "line"), path=format_optional(self.file_path, " '{}'"), lines="\n".join(self.line_list), ) @dataclass(frozen=True) class BoothInvalidName(ReportItemMessage): """ Booth instance name is not valid name -- entered booth instance name forbidden_characters -- characters the name cannot contain """ name: str forbidden_characters: str _code = codes.BOOTH_INVALID_NAME @property def message(self) -> str: return ( f"booth name '{self.name}' is not valid, it cannot contain " f"{self.forbidden_characters} characters" ) @dataclass(frozen=True) class BoothTicketNameInvalid(ReportItemMessage): """ Name of booth ticket may consists of alphanumeric characters or dash. Entered ticket name is violating this rule. ticket_name -- entered booth ticket name """ ticket_name: str _code = codes.BOOTH_TICKET_NAME_INVALID @property def message(self) -> str: return ( f"booth ticket name '{self.ticket_name}' is not valid, use " "alphanumeric chars or dash" ) @dataclass(frozen=True) class BoothTicketDuplicate(ReportItemMessage): """ Each booth ticket name must be uniqe. But duplicate booth ticket name was entered. ticket_name -- entered booth ticket name """ ticket_name: str _code = codes.BOOTH_TICKET_DUPLICATE @property def message(self) -> str: return ( f"booth ticket name '{self.ticket_name}' already exists in " "configuration" ) @dataclass(frozen=True) class BoothTicketDoesNotExist(ReportItemMessage): """ Some operations (like ticket remove) expect the ticket name in booth configuration. But the ticket name was not found in booth configuration. ticket_name -- entered booth ticket name """ ticket_name: str _code = codes.BOOTH_TICKET_DOES_NOT_EXIST @property def message(self) -> str: return f"booth ticket name '{self.ticket_name}' does not exist" @dataclass(frozen=True) class BoothAlreadyInCib(ReportItemMessage): """ Each booth instance should be in a cib once maximally. Existence of booth instance in cib detected during creating new one. name -- booth instance name """ name: str _code = codes.BOOTH_ALREADY_IN_CIB @property def message(self) -> str: return ( f"booth instance '{self.name}' is already created as cluster " "resource" ) @dataclass(frozen=True) class BoothNotExistsInCib(ReportItemMessage): """ Remove booth instance from cib required. But no such instance found in cib. name -- booth instance name """ name: str _code = codes.BOOTH_NOT_EXISTS_IN_CIB @property def message(self) -> str: return f"booth instance '{self.name}' not found in cib" @dataclass(frozen=True) class BoothConfigIsUsed(ReportItemMessage): """ Booth config use detected during destroy request. name -- booth instance name detail -- provides more details (for example booth instance is used as cluster resource or is started/enabled under systemd) resource_name -- which resource uses the booth instance, only valid if detail == BOOTH_CONFIG_USED_IN_CLUSTER_RESOURCE """ name: str detail: types.BoothConfigUsedWhere resource_name: Optional[str] = None _code = codes.BOOTH_CONFIG_IS_USED @property def message(self) -> str: detail_map = { const.BOOTH_CONFIG_USED_IN_CLUSTER_RESOURCE: "in a cluster resource", const.BOOTH_CONFIG_USED_ENABLED_IN_SYSTEMD: "- it is enabled in systemd", const.BOOTH_CONFIG_USED_RUNNING_IN_SYSTEMD: "- it is running by systemd", } detail = detail_map.get(self.detail, str(self.detail)) if ( self.detail == const.BOOTH_CONFIG_USED_IN_CLUSTER_RESOURCE and self.resource_name ): detail = f"in cluster resource '{self.resource_name}'" return f"booth instance '{self.name}' is used {detail}" @dataclass(frozen=True) class BoothMultipleTimesInCib(ReportItemMessage): """ Each booth instance should be in a cib once maximally. But multiple occurences detected. For example during remove booth instance from cib. Notify user about this fact is required. When operation is forced user should be notified about multiple occurences. name -- booth instance name """ name: str _code = codes.BOOTH_MULTIPLE_TIMES_IN_CIB @property def message(self) -> str: return f"found more than one booth instance '{self.name}' in cib" @dataclass(frozen=True) class BoothConfigDistributionStarted(ReportItemMessage): """ Booth configuration is about to be sent to nodes """ _code = codes.BOOTH_CONFIG_DISTRIBUTION_STARTED @property def message(self) -> str: return "Sending booth configuration to cluster nodes..." @dataclass(frozen=True) class BoothConfigAcceptedByNode(ReportItemMessage): """ Booth config has been saved on specified node. node -- name of node name_list -- list of names of booth instance """ node: str = "" name_list: List[str] = field(default_factory=list) _code = codes.BOOTH_CONFIG_ACCEPTED_BY_NODE @property def message(self) -> str: desc = "" if self.name_list and self.name_list not in [["booth"]]: desc = "{_s} {_list}".format( _s="s" if len(self.name_list) > 1 else "", _list=format_list(self.name_list), ) return "{node}Booth config{desc} saved".format( node=format_optional(self.node, "{}: "), desc=desc, ) @dataclass(frozen=True) class BoothConfigDistributionNodeError(ReportItemMessage): """ Saving booth config failed on specified node. node -- node name reason -- reason of failure name -- name of booth instance """ node: str reason: str name: str = "" _code = codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR @property def message(self) -> str: desc = _format_booth_default(self.name, " '{}'") return ( f"Unable to save booth config{desc} on node '{self.node}': " f"{self.reason}" ) @dataclass(frozen=True) class BoothFetchingConfigFromNode(ReportItemMessage): """ Fetching of booth config from specified node started. node -- node from which config is fetching config -- config name """ node: str config: str = "" _code = codes.BOOTH_FETCHING_CONFIG_FROM_NODE @property def message(self) -> str: desc = _format_booth_default(self.config, " '{}'") return f"Fetching booth config{desc} from node '{self.node}'..." @dataclass(frozen=True) class BoothUnsupportedFileLocation(ReportItemMessage): """ A booth file (config, authfile) is not in the expected dir, skipping it. file_path -- the actual path of the file expected_dir -- where the file is supposed to be file_type_code -- item from pcs.common.file_type_codes """ file_path: str expected_dir: str file_type_code: file_type_codes.FileTypeCode _code = codes.BOOTH_UNSUPPORTED_FILE_LOCATION @property def message(self) -> str: file_role = _format_file_role(self.file_type_code) return ( f"{file_role} '{self.file_path}' is outside of supported booth " f"config directory '{self.expected_dir}', ignoring the file" ) @dataclass(frozen=True) class BoothDaemonStatusError(ReportItemMessage): """ Unable to get status of booth daemon because of error. reason -- reason """ reason: str _code = codes.BOOTH_DAEMON_STATUS_ERROR @property def message(self) -> str: return f"unable to get status of booth daemon: {self.reason}" @dataclass(frozen=True) class BoothTicketStatusError(ReportItemMessage): """ Unable to get status of booth tickets because of error. reason -- reason """ reason: str = "" _code = codes.BOOTH_TICKET_STATUS_ERROR @property def message(self) -> str: reason = format_optional(self.reason, ": {}") return f"unable to get status of booth tickets{reason}" @dataclass(frozen=True) class BoothPeersStatusError(ReportItemMessage): """ Unable to get status of booth peers because of error. reason -- reason """ reason: str = "" _code = codes.BOOTH_PEERS_STATUS_ERROR @property def message(self) -> str: reason = format_optional(self.reason, ": {}") return f"unable to get status of booth peers{reason}" @dataclass(frozen=True) class BoothCannotDetermineLocalSiteIp(ReportItemMessage): """ Some booth operations are performed on specific site and requires to specify site ip. When site specification omitted pcs can try determine local ip. But determine local site ip failed. """ _code = codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP @property def message(self) -> str: return "cannot determine local site ip, please specify site parameter" @dataclass(frozen=True) class BoothTicketOperationFailed(ReportItemMessage): """ Pcs uses external booth tools for some ticket_name operations. For example grand and revoke. But the external command failed. operation -- determine what was intended perform with ticket_name reason -- error description from external booth command site_ip -- specifiy what site had to run the command ticket_name -- specify with which ticket had to run the command """ operation: str reason: str site_ip: str ticket_name: str _code = codes.BOOTH_TICKET_OPERATION_FAILED @property def message(self) -> str: return ( f"unable to {self.operation} booth ticket '{self.ticket_name}'" f" for site '{self.site_ip}', reason: {self.reason}" ) # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagAddRemoveIdsDuplication(ReportItemMessage): """ Duplicate reference ids were found in tag create or update add/remove specification. """ duplicate_ids_list: List[str] add_or_not_remove: bool = True _code = codes.TAG_ADD_REMOVE_IDS_DUPLICATION @property def message(self) -> str: action = "add" if self.add_or_not_remove else "remove" duplicate_ids = format_list(self.duplicate_ids_list) return f"Ids to {action} must be unique, duplicate ids: {duplicate_ids}" # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagAdjacentReferenceIdNotInTheTag(ReportItemMessage): """ Cannot put reference ids next to an adjacent reference id in a tag, because the adjacent reference id does not belong to the tag. adjacent_id -- adjacent reference id tag_id -- tag id """ adjacent_id: str tag_id: str _code = codes.TAG_ADJACENT_REFERENCE_ID_NOT_IN_THE_TAG @property def message(self) -> str: return ( f"There is no reference id '{self.adjacent_id}' in the tag " f"'{self.tag_id}', cannot put reference ids next to it in the tag" ) # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagCannotAddAndRemoveIdsAtTheSameTime(ReportItemMessage): """ Cannot add and remove ids at the same time. Avoid operation without an effect. idref_list -- common ids from add and remove lists """ idref_list: List[str] _code = codes.TAG_CANNOT_ADD_AND_REMOVE_IDS_AT_THE_SAME_TIME @property def message(self) -> str: idref_list = format_list(self.idref_list) return f"Ids cannot be added and removed at the same time: {idref_list}" # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagCannotAddReferenceIdsAlreadyInTheTag(ReportItemMessage): """ Cannot add reference ids already in the tag. tag_id -- tag id idref_list -- reference ids already in tag """ tag_id: str idref_list: List[str] _code = codes.TAG_CANNOT_ADD_REFERENCE_IDS_ALREADY_IN_THE_TAG @property def message(self) -> str: return ( "Cannot add reference {ids} already in the tag '{tag_id}': " "{idref_list}" ).format( ids=format_plural(self.idref_list, "id"), tag_id=self.tag_id, idref_list=format_list(self.idref_list), ) @dataclass(frozen=True) class TagCannotContainItself(ReportItemMessage): """ List of object reference ids contains the same id as specified tag_id. """ _code = codes.TAG_CANNOT_CONTAIN_ITSELF @property def message(self) -> str: return "Tag cannot contain itself" @dataclass(frozen=True) class TagCannotCreateEmptyTagNoIdsSpecified(ReportItemMessage): """ Cannot create empty tag, no reference ids were specified. """ _code = codes.TAG_CANNOT_CREATE_EMPTY_TAG_NO_IDS_SPECIFIED @property def message(self) -> str: return "Cannot create empty tag, no resource ids specified" # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagCannotPutIdNextToItself(ReportItemMessage): """ Cannot put id next to itself. Wrong adjacent id. adjacent_id -- adjacent reference id """ adjacent_id: str _code = codes.TAG_CANNOT_PUT_ID_NEXT_TO_ITSELF @property def message(self) -> str: return f"Cannot put id '{self.adjacent_id}' next to itself." # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagCannotRemoveAdjacentId(ReportItemMessage): """ Cannot remove adjacent id. adjacent_id -- adjacent reference id """ adjacent_id: str _code = codes.TAG_CANNOT_REMOVE_ADJACENT_ID @property def message(self) -> str: return ( f"Cannot remove id '{self.adjacent_id}' next to which ids are being" " added" ) # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagCannotRemoveReferencesWithoutRemovingTag(ReportItemMessage): """ Cannot remove references without removing a tag. """ tag_id: str _code = codes.TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG @property def message(self) -> str: return f"There would be no references left in the tag '{self.tag_id}'" @dataclass(frozen=True) class TagCannotRemoveTagReferencedInConstraints(ReportItemMessage): """ Cannot remove tag which is referenced in constraints. tag_id -- tag id constraint_id_list -- list of constraint ids which are referencing tag """ tag_id: str constraint_id_list: List[str] _code = codes.TAG_CANNOT_REMOVE_TAG_REFERENCED_IN_CONSTRAINTS @property def message(self) -> str: return ( "Tag '{tag_id}' cannot be removed because it is referenced in " "{constraints} {constraint_id_list}" ).format( tag_id=self.tag_id, constraints=format_plural(self.constraint_id_list, "constraint"), constraint_id_list=format_list(self.constraint_id_list), ) @dataclass(frozen=True) class TagCannotRemoveTagsNoTagsSpecified(ReportItemMessage): """ Cannot remove tags, no tags were specified. """ _code = codes.TAG_CANNOT_REMOVE_TAGS_NO_TAGS_SPECIFIED @property def message(self) -> str: return "Cannot remove tags, no tags to remove specified" # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagCannotSpecifyAdjacentIdWithoutIdsToAdd(ReportItemMessage): """ Cannot specify adjacent id without ids to add. adjacent_id -- adjacent reference id """ adjacent_id: str _code = codes.TAG_CANNOT_SPECIFY_ADJACENT_ID_WITHOUT_IDS_TO_ADD @property def message(self) -> str: return ( f"Cannot specify adjacent id '{self.adjacent_id}' without ids to " "add" ) # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagCannotUpdateTagNoIdsSpecified(ReportItemMessage): """ Cannot update tag, no ids specified. """ _code = codes.TAG_CANNOT_UPDATE_TAG_NO_IDS_SPECIFIED @property def message(self) -> str: return "Cannot update tag, no ids to be added or removed specified" # TODO: remove, use ADD_REMOVE reports @dataclass(frozen=True) class TagIdsNotInTheTag(ReportItemMessage): """ Specified ids are not present in the specified tag. """ tag_id: str id_list: List[str] _code = codes.TAG_IDS_NOT_IN_THE_TAG @property def message(self) -> str: return "Tag '{tag_id}' does not contain {ids}: {id_list}".format( tag_id=self.tag_id, ids=format_plural(self.id_list, "id"), id_list=format_list(self.id_list), ) @dataclass(frozen=True) class RuleInEffectStatusDetectionNotSupported(ReportItemMessage): """ Pacemaker tool for detecting if a rule is expired or not is not available """ _code = codes.RULE_IN_EFFECT_STATUS_DETECTION_NOT_SUPPORTED @property def message(self) -> str: return ( "crm_rule is not available, therefore expired parts of " "configuration may not be detected. Consider upgrading pacemaker." ) @dataclass(frozen=True) class RuleExpressionOptionsDuplication(ReportItemMessage): """ Keys are specified more than once in a single rule (sub)expression duplicate_option_list -- list of keys duplicated in a single (sub)expression """ duplicate_option_list: List[str] _code = codes.RULE_EXPRESSION_OPTIONS_DUPLICATION @property def message(self) -> str: options = format_list(self.duplicate_option_list) return f"Duplicate options in a single (sub)expression: {options}" @dataclass(frozen=True) class RuleExpressionParseError(ReportItemMessage): """ Unable to parse pacemaker cib rule expression string rule_string -- the whole rule expression string reason -- error message from rule parser rule_line -- part of rule_string - the line where the error occurred line_number -- the line where parsing failed column_number -- the column where parsing failed position -- the start index where parsing failed """ rule_string: str reason: str rule_line: str line_number: int column_number: int position: int _code = codes.RULE_EXPRESSION_PARSE_ERROR @property def message(self) -> str: # Messages coming from the parser are not very useful and readable, # they mostly contain one line grammar expression covering the whole # rule. No user would be able to parse that. Therefore we omit the # messages. return ( f"'{self.rule_string}' is not a valid rule expression, parse error " f"near or after line {self.line_number} column {self.column_number}" ) @dataclass(frozen=True) class RuleExpressionNotAllowed(ReportItemMessage): """ Used rule expression is not allowed in current context expression_type -- disallowed expression type """ expression_type: CibRuleExpressionType _code = codes.RULE_EXPRESSION_NOT_ALLOWED @property def message(self) -> str: type_map = { CibRuleExpressionType.EXPRESSION: ( "Keywords 'defined', 'not_defined', 'eq', 'ne', 'gte', 'gt', " "'lte' and 'lt'" ), CibRuleExpressionType.OP_EXPRESSION: "Keyword 'op'", CibRuleExpressionType.RSC_EXPRESSION: "Keyword 'resource'", } return ( f"{type_map[self.expression_type]} cannot be used " "in a rule in this command" ) @dataclass(frozen=True) class RuleExpressionSinceGreaterThanUntil(ReportItemMessage): """ In a date expression, 'until' predates 'since' """ since: str until: str _code = codes.RULE_EXPRESSION_SINCE_GREATER_THAN_UNTIL @property def message(self) -> str: return f"Since '{self.since}' is not sooner than until '{self.until}'" @dataclass(frozen=True) class CibNvsetAmbiguousProvideNvsetId(ReportItemMessage): """ An old command supporting only one nvset have been used when several nvsets exist. We require an nvset ID the command should work with to be specified. """ pcs_command: types.PcsCommand _code = codes.CIB_NVSET_AMBIGUOUS_PROVIDE_NVSET_ID @property def message(self) -> str: return "Several options sets exist, please specify an option set ID" @dataclass(frozen=True) class AddRemoveItemsNotSpecified(ReportItemMessage): """ Cannot modify container, no add or remove items specified. container_type -- type of item container item_type -- type of item in a container container_id -- id of a container """ container_type: types.AddRemoveContainerType item_type: types.AddRemoveItemType container_id: str _code = codes.ADD_REMOVE_ITEMS_NOT_SPECIFIED @property def message(self) -> str: container = _add_remove_container_str(self.container_type) items = get_plural(_add_remove_item_str(self.item_type)) return ( f"Cannot modify {container} '{self.container_id}', no {items} to " "add or remove specified" ) @dataclass(frozen=True) class AddRemoveItemsDuplication(ReportItemMessage): """ Duplicate items were found in add/remove item lists. container_type -- type of item container item_type -- type of item in a container container_id -- id of a container duplicate_items_list -- list of duplicate items """ container_type: types.AddRemoveContainerType item_type: types.AddRemoveItemType container_id: str duplicate_items_list: List[str] _code = codes.ADD_REMOVE_ITEMS_DUPLICATION @property def message(self) -> str: items = get_plural(_add_remove_item_str(self.item_type)) duplicate_items = format_list(self.duplicate_items_list) return ( f"{items.capitalize()} to add or remove must be unique, duplicate " f"{items}: {duplicate_items}" ) @dataclass(frozen=True) class AddRemoveCannotAddItemsAlreadyInTheContainer(ReportItemMessage): """ Cannot add items already existing in the container. container_type -- type of item container item_type -- type of item in a container container_id -- id of a container item_list -- list of items already in the container """ container_type: types.AddRemoveContainerType item_type: types.AddRemoveItemType container_id: str item_list: List[str] _code = codes.ADD_REMOVE_CANNOT_ADD_ITEMS_ALREADY_IN_THE_CONTAINER @property def message(self) -> str: items = format_plural( self.item_list, _add_remove_item_str(self.item_type) ) item_list = format_list(self.item_list) they = format_plural(self.item_list, "it") are = format_plural(self.item_list, "is") container = _add_remove_container_str(self.container_type) return ( f"Cannot add {items} {item_list}, {they} {are} already present in " f"{container} '{self.container_id}'" ) @dataclass(frozen=True) class AddRemoveCannotRemoveItemsNotInTheContainer(ReportItemMessage): """ Cannot remove items not existing in the container. container_type -- type of item container item_type -- type of item in a container container_id -- id of a container item_list -- list of items not in the container """ container_type: types.AddRemoveContainerType item_type: types.AddRemoveItemType container_id: str item_list: List[str] _code = codes.ADD_REMOVE_CANNOT_REMOVE_ITEMS_NOT_IN_THE_CONTAINER @property def message(self) -> str: items = format_plural( self.item_list, _add_remove_item_str(self.item_type) ) item_list = format_list(self.item_list) they = format_plural(self.item_list, "it") are = format_plural(self.item_list, "is") container = _add_remove_container_str(self.container_type) items = format_plural( self.item_list, _add_remove_item_str(self.item_type) ) return ( f"Cannot remove {items} {item_list}, {they} {are} not present in " f"{container} '{self.container_id}'" ) @dataclass(frozen=True) class AddRemoveCannotAddAndRemoveItemsAtTheSameTime(ReportItemMessage): """ Cannot add and remove items at the same time. Avoid operation without an effect. container_type -- type of item container item_type -- type of item in a container container_id -- id of a container item_list -- common items from add and remove item lists """ container_type: types.AddRemoveContainerType item_type: types.AddRemoveItemType container_id: str item_list: List[str] _code = codes.ADD_REMOVE_CANNOT_ADD_AND_REMOVE_ITEMS_AT_THE_SAME_TIME @property def message(self) -> str: items = format_plural( self.item_list, _add_remove_item_str(self.item_type) ) item_list = format_list(self.item_list) return ( f"{items.capitalize()} cannot be added and removed at the same " f"time: {item_list}" ) @dataclass(frozen=True) class AddRemoveCannotRemoveAllItemsFromTheContainer(ReportItemMessage): """ Cannot remove all items from a container. container_type -- type of item container item_type -- type of item in a container container_id -- id of a container item_list -- common items from add and remove item lists """ container_type: types.AddRemoveContainerType item_type: types.AddRemoveItemType container_id: str item_list: List[str] _code = codes.ADD_REMOVE_CANNOT_REMOVE_ALL_ITEMS_FROM_THE_CONTAINER @property def message(self) -> str: container = _add_remove_container_str(self.container_type) items = get_plural(_add_remove_item_str(self.item_type)) return ( f"Cannot remove all {items} from {container} '{self.container_id}'" ) @dataclass(frozen=True) class AddRemoveAdjacentItemNotInTheContainer(ReportItemMessage): """ Cannot put items next to an adjacent item in the container, because the adjacent item does not exist in the container. container_type -- type of item container item_type -- type of item in a container container_id -- id of a container adjacent_item_id -- id of an adjacent item """ container_type: types.AddRemoveContainerType item_type: types.AddRemoveItemType container_id: str adjacent_item_id: str _code = codes.ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER @property def message(self) -> str: container = _add_remove_container_str(self.container_type) item = _add_remove_item_str(self.item_type) items = get_plural(item) return ( f"There is no {item} '{self.adjacent_item_id}' in the " f"{container} '{self.container_id}', cannot add {items} next to it" ) @dataclass(frozen=True) class AddRemoveCannotPutItemNextToItself(ReportItemMessage): """ Cannot put an item into a container next to itself. container_type -- type of item container item_type -- type of item in a container container_id -- id of a container adjacent_item_id -- id of an adjacent item """ container_type: types.AddRemoveContainerType item_type: types.AddRemoveItemType container_id: str adjacent_item_id: str _code = codes.ADD_REMOVE_CANNOT_PUT_ITEM_NEXT_TO_ITSELF @property def message(self) -> str: item = _add_remove_item_str(self.item_type) return f"Cannot put {item} '{self.adjacent_item_id}' next to itself" @dataclass(frozen=True) class AddRemoveCannotSpecifyAdjacentItemWithoutItemsToAdd(ReportItemMessage): """ Cannot specify adjacent item without items to add. container_type -- type of item container item_type -- type of item in a container container_id -- id of a container adjacent_item_id -- id of an adjacent item """ container_type: types.AddRemoveContainerType item_type: types.AddRemoveItemType container_id: str adjacent_item_id: str _code = codes.ADD_REMOVE_CANNOT_SPECIFY_ADJACENT_ITEM_WITHOUT_ITEMS_TO_ADD @property def message(self) -> str: item = _add_remove_item_str(self.item_type) items = get_plural(item) return ( f"Cannot specify adjacent {item} '{self.adjacent_item_id}' without " f"{items} to add" ) pcs-0.10.11/pcs/common/reports/processor.py000066400000000000000000000020101412706364600206050ustar00rootroot00000000000000import abc from .item import ReportItem, ReportItemList, ReportItemSeverity class ReportProcessor(abc.ABC): def __init__(self) -> None: self._has_errors = False @property def has_errors(self) -> bool: return self._has_errors def report(self, report_item: ReportItem) -> "ReportProcessor": if _is_error(report_item): self._has_errors = True self._do_report(report_item) return self def report_list(self, report_list: ReportItemList) -> "ReportProcessor": for report_item in report_list: self.report(report_item) return self @abc.abstractmethod def _do_report(self, report_item: ReportItem) -> None: raise NotImplementedError() def has_errors(report_list: ReportItemList) -> bool: for report_item in report_list: if _is_error(report_item): return True return False def _is_error(report_item: ReportItem) -> bool: return report_item.severity.level == ReportItemSeverity.ERROR pcs-0.10.11/pcs/common/reports/types.py000066400000000000000000000012741412706364600177450ustar00rootroot00000000000000from typing import NewType AddRemoveContainerType = NewType("AddRemoveContainerType", str) AddRemoveItemType = NewType("AddRemoveItemType", str) BoothConfigUsedWhere = NewType("BoothConfigUsedWhere", str) DefaultAddressSource = NewType("DefaultAddressSource", str) FenceHistoryCommandType = NewType("FenceHistoryCommandType", str) ForceCode = NewType("ForceCode", str) MessageCode = NewType("MessageCode", str) PcsCommand = NewType("PcsCommand", str) ReasonType = NewType("ReasonType", str) ServiceAction = NewType("ServiceAction", str) SeverityLevel = NewType("SeverityLevel", str) StonithRestartlessUpdateUnableToPerformReason = NewType( "StonithRestartlessUpdateUnableToPerformReason", str ) pcs-0.10.11/pcs/common/services/000077500000000000000000000000001412706364600163505ustar00rootroot00000000000000pcs-0.10.11/pcs/common/services/__init__.py000066400000000000000000000000611412706364600204560ustar00rootroot00000000000000from . import errors, drivers, interfaces, types pcs-0.10.11/pcs/common/services/common.py000066400000000000000000000001211412706364600202040ustar00rootroot00000000000000# pylint: disable=unused-import from pcs.common.str_tools import join_multilines pcs-0.10.11/pcs/common/services/drivers/000077500000000000000000000000001412706364600200265ustar00rootroot00000000000000pcs-0.10.11/pcs/common/services/drivers/__init__.py000066400000000000000000000001211412706364600221310ustar00rootroot00000000000000from .systemd import SystemdDriver from .sysvinit_rhel import SysVInitRhelDriver pcs-0.10.11/pcs/common/services/drivers/systemd.py000066400000000000000000000106171412706364600220750ustar00rootroot00000000000000import os.path import re from typing import ( Iterable, List, Optional, ) from .. import errors from ..interfaces import ( ExecutorInterface, ServiceManagerInterface, ) class SystemdDriver(ServiceManagerInterface): def __init__( self, executor: ExecutorInterface, systemctl_bin: str, systemd_unit_paths: Iterable[str], ) -> None: """ executor -- external commands used by this class are executed using this object systemctl_bin -- path to systemctl executable, it is used for managing services systemd_unit_paths -- paths to directories where unit files should be located. If at least one location is present on the system, systemd is considered as a current init system. """ self._executor = executor self._systemctl_bin = systemctl_bin self._systemd_unit_paths = systemd_unit_paths self._available_services: List[str] = [] def start(self, service: str, instance: Optional[str] = None) -> None: result = self._executor.run( [ self._systemctl_bin, "start", _format_service_name(service, instance), ] ) if result.retval != 0: raise errors.StartServiceError( service, result.joined_output, instance ) def stop(self, service: str, instance: Optional[str] = None) -> None: result = self._executor.run( [ self._systemctl_bin, "stop", _format_service_name(service, instance), ] ) if result.retval != 0: raise errors.StopServiceError( service, result.joined_output, instance ) def enable(self, service: str, instance: Optional[str] = None) -> None: result = self._executor.run( [ self._systemctl_bin, "enable", _format_service_name(service, instance), ] ) if result.retval != 0: raise errors.EnableServiceError( service, result.joined_output, instance ) def disable(self, service: str, instance: Optional[str] = None) -> None: if not self.is_installed(service): return result = self._executor.run( [ self._systemctl_bin, "disable", _format_service_name(service, instance), ] ) if result.retval != 0: raise errors.DisableServiceError( service, result.joined_output, instance ) def is_enabled(self, service: str, instance: Optional[str] = None) -> bool: result = self._executor.run( [ self._systemctl_bin, "is-enabled", _format_service_name(service, instance), ] ) return result.retval == 0 def is_running(self, service: str, instance: Optional[str] = None) -> bool: result = self._executor.run( [ self._systemctl_bin, "is-active", _format_service_name(service, instance), ] ) return result.retval == 0 def is_installed(self, service: str) -> bool: return service in self.get_available_services() def get_available_services(self) -> List[str]: if not self._available_services: self._available_services = self._get_available_services() return self._available_services def _get_available_services(self) -> List[str]: result = self._executor.run( [self._systemctl_bin, "list-unit-files", "--full"] ) if result.retval != 0: return [] service_list = [] for service in result.stdout.splitlines(): match = re.search(r"^([\S]*)\.service", service) if match: service_list.append(match.group(1)) return service_list def is_current_system_supported(self) -> bool: return any( os.path.isdir(path) for path in self._systemd_unit_paths ) and os.path.isfile(self._systemctl_bin) def _format_service_name(service: str, instance: Optional[str]) -> str: instance_str = f"@{instance}" if instance else "" return f"{service}{instance_str}.service" pcs-0.10.11/pcs/common/services/drivers/sysvinit_rhel.py000066400000000000000000000062411412706364600233050ustar00rootroot00000000000000import os.path from typing import ( List, Optional, ) from .. import errors from ..interfaces import ( ExecutorInterface, ServiceManagerInterface, ) class SysVInitRhelDriver(ServiceManagerInterface): def __init__( self, executor: ExecutorInterface, service_bin: str, chkconfig_bin: str ): """ executor -- external commands used by this class are executed using this object service_bin -- path to an executable used for starting and stopping services and to check if a service is running chkconfig_bin -- path to an executable used for enabling, disabling and listing available service and to check if service is enabled """ self._executor = executor self._service_bin = service_bin self._chkconfig_bin = chkconfig_bin self._available_services: List[str] = [] def start(self, service: str, instance: Optional[str] = None) -> None: result = self._executor.run([self._service_bin, service, "start"]) if result.retval != 0: raise errors.StartServiceError(service, result.joined_output) def stop(self, service: str, instance: Optional[str] = None) -> None: result = self._executor.run([self._service_bin, service, "stop"]) if result.retval != 0: raise errors.StopServiceError(service, result.joined_output) def enable(self, service: str, instance: Optional[str] = None) -> None: result = self._executor.run([self._chkconfig_bin, service, "on"]) if result.retval != 0: raise errors.EnableServiceError(service, result.joined_output) def disable(self, service: str, instance: Optional[str] = None) -> None: if not self.is_installed(service): return result = self._executor.run([self._chkconfig_bin, service, "off"]) if result.retval != 0: raise errors.DisableServiceError(service, result.joined_output) def is_enabled(self, service: str, instance: Optional[str] = None) -> bool: return self._executor.run([self._chkconfig_bin, service]).retval == 0 def is_running(self, service: str, instance: Optional[str] = None) -> bool: return ( self._executor.run([self._service_bin, service, "status"]).retval == 0 ) def is_installed(self, service: str) -> bool: return service in self.get_available_services() def get_available_services(self) -> List[str]: if not self._available_services: self._available_services = self._get_available_services() return self._available_services def _get_available_services(self) -> List[str]: result = self._executor.run([self._chkconfig_bin]) if result.retval != 0: return [] service_list = [] for service in result.stdout.splitlines(): service = service.split(" ", 1)[0] if service: service_list.append(service) return service_list def is_current_system_supported(self) -> bool: return all( os.path.isfile(binary) for binary in (self._service_bin, self._chkconfig_bin) ) pcs-0.10.11/pcs/common/services/errors.py000066400000000000000000000010531412706364600202350ustar00rootroot00000000000000from typing import Optional class ManageServiceError(Exception): # pylint: disable=super-init-not-called def __init__( self, service: str, message: str, instance: Optional[str] = None, ): self.service = service self.message = message self.instance = instance class DisableServiceError(ManageServiceError): pass class EnableServiceError(ManageServiceError): pass class StartServiceError(ManageServiceError): pass class StopServiceError(ManageServiceError): pass pcs-0.10.11/pcs/common/services/interfaces/000077500000000000000000000000001412706364600204735ustar00rootroot00000000000000pcs-0.10.11/pcs/common/services/interfaces/__init__.py000066400000000000000000000001251412706364600226020ustar00rootroot00000000000000from .executor import ExecutorInterface from .manager import ServiceManagerInterface pcs-0.10.11/pcs/common/services/interfaces/executor.py000066400000000000000000000010761412706364600227070ustar00rootroot00000000000000from typing import Sequence from ..types import ExecutorResult class ExecutorInterface: """ Simple interface for executing external programs. """ def run(self, args: Sequence[str]) -> ExecutorResult: """ args -- Program and its arguments to execute. First item is path to a executable and rest of the items are arguments which will be provided to the executable. Execute a specified program synchronously and return its result after it's finnished. """ raise NotImplementedError() pcs-0.10.11/pcs/common/services/interfaces/manager.py000066400000000000000000000061231412706364600224610ustar00rootroot00000000000000from typing import ( List, Optional, ) class ServiceManagerInterface: def start(self, service: str, instance: Optional[str] = None) -> None: """ service -- name of service to be started instance -- service instance identifier. Available only for system which supports multiple service instances (e.g. systemd) Start defined service. Raises StartServiceError on failure. """ raise NotImplementedError() def stop(self, service: str, instance: Optional[str] = None) -> None: """ service -- name of service to be stopped instance -- service instance identifier. Available only for system which supports multiple service instances (e.g. systemd) Stop defined service. Raises StopServiceError on failure. """ raise NotImplementedError() def enable(self, service: str, instance: Optional[str] = None) -> None: """ service -- name of service to be enabled instance -- service instance identifier. Available only for system which supports multiple service instances (e.g. systemd) Enable defined service. Raises EnableServiceError on failure. """ raise NotImplementedError() def disable(self, service: str, instance: Optional[str] = None) -> None: """ service -- name of service to be disabled instance -- service instance identifier. Available only for system which supports multiple service instances (e.g. systemd) Disable defined service. Raises DisableServiceError on failure. """ raise NotImplementedError() def is_enabled(self, service: str, instance: Optional[str] = None) -> bool: """ service -- name of service to be checked instance -- service instance identifier. Available only for system which supports multiple service instances (e.g. systemd) Returns True if specified service is enabled, False otherwise. """ raise NotImplementedError() def is_running(self, service: str, instance: Optional[str] = None) -> bool: """ service -- name of service to be checked instance -- service instance identifier. Available only for system which supports multiple service instances (e.g. systemd) Returns True if specified service is running (active), False otherwise. """ raise NotImplementedError() def is_installed(self, service: str) -> bool: """ service -- name of service to be checked Returns True if specified service is installed (managable by init system), False otherwise. """ raise NotImplementedError() def get_available_services(self) -> List[str]: """ Returns list of service names recognized by init system. """ raise NotImplementedError() def is_current_system_supported(self) -> bool: """ Returns True if the instance of this class is able to manage current init system. """ raise NotImplementedError() pcs-0.10.11/pcs/common/services/types.py000066400000000000000000000004251412706364600200670ustar00rootroot00000000000000from dataclasses import dataclass from .common import join_multilines @dataclass(frozen=True) class ExecutorResult: retval: int stdout: str stderr: str @property def joined_output(self) -> str: return join_multilines([self.stderr, self.stdout]) pcs-0.10.11/pcs/common/services_dto.py000066400000000000000000000006331412706364600175720ustar00rootroot00000000000000from dataclasses import dataclass from typing import List, Optional from pcs.common.interface.dto import DataTransferObject @dataclass(frozen=True) class ServiceStatusDto(DataTransferObject): service: str installed: Optional[bool] enabled: Optional[bool] running: Optional[bool] @dataclass(frozen=True) class ServicesInfoResultDto(DataTransferObject): services: List[ServiceStatusDto] pcs-0.10.11/pcs/common/ssl.py000066400000000000000000000042511412706364600157020ustar00rootroot00000000000000import datetime import ssl from typing import List from cryptography import x509 from cryptography.x509.oid import NameOID from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa def check_cert_key(cert_path: str, key_path: str) -> List[str]: errors = [] try: ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ssl_context.load_cert_chain(cert_path, key_path) except ssl.SSLError as e: errors.append(f"SSL certificate does not match the key: {e}") except EnvironmentError as e: errors.append(f"Unable to load SSL certificate and/or key: {e}") return errors def generate_key(length: int = 3072) -> rsa.RSAPrivateKeyWithSerialization: return rsa.generate_private_key( public_exponent=65537, key_size=length, backend=default_backend() ) def generate_cert(key: rsa.RSAPrivateKey, server_name: str) -> x509.Certificate: now = datetime.datetime.utcnow() subject = x509.Name( [ x509.NameAttribute(NameOID.COUNTRY_NAME, "US"), x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "MN"), x509.NameAttribute(NameOID.LOCALITY_NAME, "Minneapolis"), x509.NameAttribute(NameOID.ORGANIZATION_NAME, "pcsd"), x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "pcsd"), x509.NameAttribute(NameOID.COMMON_NAME, server_name), ] ) return ( x509.CertificateBuilder() .subject_name(subject) .issuer_name(subject) .public_key(key.public_key()) .serial_number(int(now.timestamp() * 1000)) .not_valid_before(now) .not_valid_after(now + datetime.timedelta(days=3650)) .sign(key, hashes.SHA256(), default_backend()) ) def dump_cert(certificate: x509.Certificate) -> bytes: return certificate.public_bytes(serialization.Encoding.PEM) def dump_key(key: rsa.RSAPrivateKeyWithSerialization) -> bytes: return key.private_bytes( serialization.Encoding.PEM, serialization.PrivateFormat.TraditionalOpenSSL, serialization.NoEncryption(), ) pcs-0.10.11/pcs/common/str_tools.py000066400000000000000000000112511412706364600171270ustar00rootroot00000000000000from collections.abc import Iterable as IterableAbc from typing import ( Any, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ) def indent(line_list: Iterable[str], indent_step: int = 2) -> List[str]: """ return line list where each line of input is prefixed by N spaces line_list -- original lines indent_step -- count of spaces for line prefix """ return [ "{0}{1}".format(" " * indent_step, line) if line else line for line in line_list ] def format_list( item_list: List[str], # Intetionaly not Sequence so string is prohibited separator: str = ", ", ) -> str: item_list = sorted(item_list) to_value = lambda item: f"'{item}'" if len(item_list) == 1: return to_value(item_list[0]) return separator.join(sorted([to_value(item) for item in item_list])) def format_list_custom_last_separator( item_list: List[str], # Intetionaly not Sequence so string is prohibited last_separator: str, separator: str = ", ", ) -> str: item_list = sorted(item_list) if len(item_list) < 2: return format_list(item_list, separator=separator) return "{}{}{}".format( format_list(item_list[:-1], separator=separator), last_separator, format_list(item_list[-1:], separator=separator), ) # For now, Tuple[str, str] is sufficient. Feel free to change it if needed, # e.g. when values can be integers. def format_name_value_list(item_list: Sequence[Tuple[str, str]]) -> List[str]: """ Turn 2-tuples to 'name=value' strings with standard quoting """ output = [] for name, value in item_list: name = quote(name, "= ") value = quote(value, "= ") output.append(f"{name}={value}") return output def quote(string: str, chars_to_quote: str) -> str: """ Quote a string if it contains specified characters string -- the string to be processed chars_to_quote -- the characters causing quoting """ if not frozenset(chars_to_quote) & frozenset(string): return string if '"' not in string: return f'"{string}"' if "'" not in string: return f"'{string}'" return '"{string}"'.format(string=string.replace('"', '\\"')) def join_multilines(strings: Iterable[str]) -> str: return "\n".join([a.strip() for a in strings if a.strip()]) def format_optional( value: Any, template: str = "{} ", empty_case: str = "", ) -> str: # Number 0 is considered False which does not suit our needs so we check # for it explicitly. Beware that False == 0 is true, so we must have an # additional check for that (bool is a subclass of int). if value or ( isinstance(value, int) and not isinstance(value, bool) and value == 0 ): return template.format(value) return empty_case def _is_multiple(what): """ Return True if 'what' does not mean one item, False otherwise iterable/int what -- this will be counted """ retval = False if isinstance(what, int): retval = abs(what) != 1 elif not isinstance(what, str): try: retval = len(what) != 1 except TypeError: pass return retval def _add_s(word): """ add "s" or "es" to the word based on its ending string word -- word where "s" or "es" should be added """ if word[-1:] in ("s", "x", "o") or word[-2:] in ("ss", "sh", "ch"): return word + "es" return word + "s" def get_plural(singular: str) -> str: """ Take singular word form and return plural. singular -- singular word (like: is, do, node) """ common_plurals = { "is": "are", "has": "have", "does": "do", "it": "they", } if singular in common_plurals: return common_plurals[singular] return _add_s(singular) def format_plural( depends_on: Union[int, Iterable[Any]], singular: str, plural: Optional[str] = None, ) -> str: """ Takes the singular word form and returns its plural form if depends_on is not equal to one/contains one item iterable/int/string depends_on -- if number (of items) isn't equal to one, returns plural singular -- singular word (like: is, do, node) plural -- optional irregular plural form """ if not _is_multiple(depends_on): return singular if plural: return plural return get_plural(singular) T = TypeVar("T") def transform(items: List[T], mapping: Mapping[T, str]) -> List[str]: return list(map(lambda item: mapping.get(item, str(item)), items)) def is_iterable_not_str(value): return isinstance(value, IterableAbc) and not isinstance(value, str) pcs-0.10.11/pcs/common/tools.py000066400000000000000000000073541412706364600162500ustar00rootroot00000000000000from collections import namedtuple from enum import Enum import threading from typing import ( MutableSet, Optional, TypeVar, Union, ) from lxml import etree from lxml.etree import _Element T = TypeVar("T", bound=type) def get_all_subclasses(cls: T) -> MutableSet[T]: subclasses = set(cls.__subclasses__()) return subclasses.union( {s for c in subclasses for s in get_all_subclasses(c)} ) def run_parallel(worker, data_list): thread_list = [] for args, kwargs in data_list: thread = threading.Thread(target=worker, args=args, kwargs=kwargs) thread.daemon = True thread_list.append(thread) thread.start() for thread in thread_list: thread.join() def format_environment_error(e): return format_os_error(e) def format_os_error(e): if e.filename: return "{0}: '{1}'".format(e.strerror, e.filename) return e.strerror def xml_fromstring(xml: str) -> _Element: # If the xml contains encoding declaration such as: # # we get an exception in python3: # ValueError: Unicode strings with encoding declaration are not supported. # Please use bytes input or XML fragments without declaration. # So we encode the string to bytes. return etree.fromstring( xml.encode("utf-8"), # it raises on a huge xml without the flag huge_tree=True # see https://bugzilla.redhat.com/show_bug.cgi?id=1506864 etree.XMLParser(huge_tree=True), ) class AutoNameEnum(str, Enum): def _generate_next_value_(name, start, count, last_values): # pylint: disable=no-self-argument del start, count, last_values return name def timeout_to_seconds(timeout: Union[int, str]) -> Optional[int]: """ Transform pacemaker style timeout to number of seconds. If `timeout` is not a valid timeout, `None` is returned. timeout -- timeout string """ try: candidate = int(timeout) if candidate >= 0: return candidate return None except ValueError: pass # Now we know the timeout is not an integer nor an integer string. # Let's make sure mypy knows the timeout is a string as well. timeout = str(timeout) suffix_multiplier = { "s": 1, "sec": 1, "m": 60, "min": 60, "h": 3600, "hr": 3600, } for suffix, multiplier in suffix_multiplier.items(): if timeout.endswith(suffix) and timeout[: -len(suffix)].isdigit(): return int(timeout[: -len(suffix)]) * multiplier return None class Version(namedtuple("Version", ["major", "minor", "revision"])): def __new__( cls, major: int, minor: Optional[int] = None, revision: Optional[int] = None, ): return super(Version, cls).__new__(cls, major, minor, revision) @property def as_full_tuple(self): return ( self.major, self.minor if self.minor is not None else 0, self.revision if self.revision is not None else 0, ) def normalize(self): return self.__class__(*self.as_full_tuple) def __str__(self): return ".".join([str(x) for x in self if x is not None]) def __lt__(self, other): return self.as_full_tuple < other.as_full_tuple def __le__(self, other): return self.as_full_tuple <= other.as_full_tuple def __eq__(self, other): return self.as_full_tuple == other.as_full_tuple def __ne__(self, other): return self.as_full_tuple != other.as_full_tuple def __gt__(self, other): return self.as_full_tuple > other.as_full_tuple def __ge__(self, other): return self.as_full_tuple >= other.as_full_tuple pcs-0.10.11/pcs/common/types.py000066400000000000000000000025141412706364600162450ustar00rootroot00000000000000from enum import auto from pcs.common.tools import AutoNameEnum class CibNvsetType(AutoNameEnum): INSTANCE = auto() META = auto() class CibRuleExpressionType(AutoNameEnum): RULE = auto() EXPRESSION = auto() # node attribute expression, named 'expression' in CIB DATE_EXPRESSION = auto() OP_EXPRESSION = auto() RSC_EXPRESSION = auto() class CibRuleInEffectStatus(AutoNameEnum): NOT_YET_IN_EFFECT = auto() IN_EFFECT = auto() EXPIRED = auto() UNKNOWN = auto() class ResourceRelationType(AutoNameEnum): ORDER = auto() ORDER_SET = auto() INNER_RESOURCES = auto() OUTER_RESOURCE = auto() RSC_PRIMITIVE = auto() RSC_CLONE = auto() RSC_GROUP = auto() RSC_BUNDLE = auto() RSC_UNKNOWN = auto() class DrRole(AutoNameEnum): PRIMARY = auto() RECOVERY = auto() class UnknownCorosyncTransportTypeException(Exception): def __init__(self, transport: str) -> None: super().__init__() self.transport = transport class CorosyncTransportType(AutoNameEnum): UDP = auto() UDPU = auto() KNET = auto() @classmethod def from_str(cls, transport: str) -> "CorosyncTransportType": try: return cls(transport.upper()) except ValueError: raise UnknownCorosyncTransportTypeException(transport) from None pcs-0.10.11/pcs/common/validate.py000066400000000000000000000020411412706364600166650ustar00rootroot00000000000000import re from typing import ( Optional, Union, ) _INTEGER_RE = re.compile(r"^[+-]?[0-9]+$") def is_integer( value: Union[str, int, float], at_least: Optional[int] = None, at_most: Optional[int] = None, ) -> bool: """ Check if the specified value is an integer, optionally check a range value -- value to check at_least -- minimal allowed value at_most -- maximal allowed value """ try: if value is None or isinstance(value, float): return False if isinstance(value, str) and not _INTEGER_RE.fullmatch(value): return False value_int = int(value) if at_least is not None and value_int < at_least: return False if at_most is not None and value_int > at_most: return False except ValueError: return False return True def is_port_number(value: str) -> bool: """ Check if the specified value is a TCP or UDP port number value -- value to check """ return is_integer(value, 1, 65535) pcs-0.10.11/pcs/config.py000066400000000000000000001103141412706364600150540ustar00rootroot00000000000000import sys import os import os.path import re import datetime from io import BytesIO import tarfile import json from xml.dom.minidom import parse import logging import pwd import grp import tempfile import time import shutil import difflib try: import distro no_distro_package = False except ImportError: no_distro_package = True import platform # TODO remove, deprecated try: import clufter.facts import clufter.format_manager import clufter.filter_manager import clufter.command_manager no_clufter = False except ImportError: no_clufter = True from pcs import ( cluster, constraint, quorum, resource, settings, status, stonith, usage, utils, alert, ) from pcs.cli.common import middleware from pcs.cli.common.errors import CmdLineInputError from pcs.cli.constraint import command as constraint_command from pcs.cli.nvset import nvset_dto_list_to_lines from pcs.cli.reports import process_library_reports from pcs.cli.reports.output import warn from pcs.common.reports import constraints as constraints_reports from pcs.common.str_tools import indent from pcs.lib.commands import quorum as lib_quorum from pcs.lib.errors import LibraryError from pcs.lib.node import get_existing_nodes_names # pylint: disable=too-many-branches, too-many-locals, too-many-statements def config_show(lib, argv, modifiers): """ Options: * -f - CIB file, when getting cluster name on remote node (corosync.conf doesn't exist) * --corosync_conf - corosync.conf file """ modifiers.ensure_only_supported("-f", "--corosync_conf") if argv: raise CmdLineInputError() print("Cluster Name: %s" % utils.getClusterName()) status.nodes_status(lib, ["config"], modifiers.get_subset("-f")) print() print("\n".join(_config_show_cib_lines(lib))) if ( utils.hasCorosyncConf() and not modifiers.is_specified("-f") and not modifiers.is_specified("--corosync_conf") ): cluster.cluster_uidgid( lib, [], modifiers.get_subset(), silent_list=True ) if modifiers.is_specified("--corosync_conf") or utils.hasCorosyncConf(): print() print("Quorum:") try: config = lib_quorum.get_config(utils.get_lib_env()) print("\n".join(indent(quorum.quorum_config_to_str(config)))) except LibraryError as e: process_library_reports(e.args) def _config_show_cib_lines(lib): """ Commandline options: * -f - CIB file """ # update of pcs_options will change output of constraint show and # displaying resources and operations defaults utils.pcs_options["--full"] = 1 # get latest modifiers object after updating pcs_options modifiers = utils.get_input_modifiers() cib_xml = utils.get_cib() cib_etree = utils.get_cib_etree(cib_xml=cib_xml) cib_dom = utils.get_cib_dom(cib_xml=cib_xml) resource_lines = [] stonith_lines = [] for resource_el in cib_etree.find(".//resources"): is_stonith = ( "class" in resource_el.attrib and resource_el.attrib["class"] == "stonith" ) resource_el_lines = resource.resource_node_lines(resource_el) if is_stonith: stonith_lines += resource_el_lines else: resource_lines += resource_el_lines all_lines = [] all_lines.append("Resources:") all_lines.extend(indent(resource_lines, indent_step=1)) all_lines.append("") all_lines.append("Stonith Devices:") all_lines.extend(indent(stonith_lines, indent_step=1)) all_lines.append("Fencing Levels:") levels_lines = stonith.stonith_level_config_to_str( lib.fencing_topology.get_config() ) if levels_lines: all_lines.extend(indent(levels_lines, indent_step=2)) all_lines.append("") constraints_element = cib_dom.getElementsByTagName("constraints")[0] all_lines.extend( constraint.location_lines( constraints_element, showDetail=True, show_expired=True, verify_expiration=False, ) ) all_lines.extend( constraint_command.config_cmd( "Ordering Constraints:", lib.constraint_order.config, constraints_reports.order_plain, modifiers.get_subset("-f", "--full"), ) ) all_lines.extend( constraint_command.config_cmd( "Colocation Constraints:", lib.constraint_colocation.config, constraints_reports.colocation_plain, modifiers.get_subset("-f", "--full"), ) ) all_lines.extend( constraint_command.config_cmd( "Ticket Constraints:", lib.constraint_ticket.config, constraints_reports.ticket_plain, modifiers.get_subset("-f", "--full"), ) ) all_lines.append("") all_lines.extend(alert.alert_config_lines(lib)) all_lines.append("") all_lines.append("Resources Defaults:") all_lines.extend( indent( nvset_dto_list_to_lines( lib.cib_options.resource_defaults_config( evaluate_expired=False ), with_ids=modifiers.get("--full"), text_if_empty="No defaults set", ) ) ) all_lines.append("Operations Defaults:") all_lines.extend( indent( nvset_dto_list_to_lines( lib.cib_options.operation_defaults_config( evaluate_expired=False ), with_ids=modifiers.get("--full"), text_if_empty="No defaults set", ) ) ) all_lines.append("") all_lines.append("Cluster Properties:") properties = utils.get_set_properties() all_lines.extend( indent( [ "{0}: {1}".format(prop, val) for prop, val in sorted(properties.items()) ], indent_step=1, ) ) all_lines.append("") all_lines.append("Tags:") tags = lib.tag.config([]) if not tags: all_lines.append(" No tags defined") tag_lines = [] for tag in tags: tag_lines.append(tag["tag_id"]) tag_lines.extend(indent(tag["idref_list"])) all_lines.extend(indent(tag_lines, indent_step=1)) return all_lines def config_backup(lib, argv, modifiers): """ Options: * --force - overwrite file if already exists """ del lib modifiers.ensure_only_supported("--force") if len(argv) > 1: usage.config(["backup"]) sys.exit(1) outfile_name = None if argv: outfile_name = argv[0] if not outfile_name.endswith(".tar.bz2"): outfile_name += ".tar.bz2" tar_data = config_backup_local() if outfile_name: ok, message = utils.write_file( outfile_name, tar_data, permissions=0o600, binary=True ) if not ok: utils.err(message) else: # in python3 stdout accepts str so we need to use buffer sys.stdout.buffer.write(tar_data) def config_backup_local(): """ Commandline options: no options """ file_list = config_backup_path_list() tar_data = BytesIO() try: with tarfile.open(fileobj=tar_data, mode="w|bz2") as tarball: config_backup_add_version_to_tarball(tarball) for tar_path, path_info in file_list.items(): if ( not os.path.exists(path_info["path"]) and not path_info["required"] ): continue tarball.add(path_info["path"], tar_path) except (tarfile.TarError, EnvironmentError) as e: utils.err("unable to create tarball: %s" % e) tar = tar_data.getvalue() tar_data.close() return tar def config_restore(lib, argv, modifiers): """ Options: * --local - restore config only on local node * --request-timeout - timeout for HTTP requests, used only if --local was not defined or user is not root """ del lib modifiers.ensure_only_supported("--local", "--request-timeout") if len(argv) > 1: usage.config(["restore"]) sys.exit(1) infile_name = infile_obj = None if argv: infile_name = argv[0] if not infile_name: # in python3 stdin returns str so we need to use buffer infile_obj = BytesIO(sys.stdin.buffer.read()) if os.getuid() == 0: if modifiers.get("--local"): config_restore_local(infile_name, infile_obj) else: config_restore_remote(infile_name, infile_obj) else: new_argv = ["config", "restore"] new_stdin = None if modifiers.get("--local"): new_argv.append("--local") if infile_name: new_argv.append(os.path.abspath(infile_name)) else: new_stdin = infile_obj.read() err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( new_argv, new_stdin ) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) print(std_out) sys.stderr.write(std_err) sys.exit(exitcode) def config_restore_remote(infile_name, infile_obj): """ Commandline options: * --request-timeout - timeout for HTTP requests """ extracted = { "version.txt": "", "corosync.conf": "", } try: with tarfile.open(infile_name, "r|*", infile_obj) as tarball: while True: # next(tarball) does not work in python2.6 tar_member_info = tarball.next() if tar_member_info is None: break if tar_member_info.name in extracted: tar_member = tarball.extractfile(tar_member_info) extracted[tar_member_info.name] = tar_member.read() tar_member.close() except (tarfile.TarError, EnvironmentError) as e: utils.err("unable to read the tarball: %s" % e) config_backup_check_version(extracted["version.txt"]) node_list, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade( conf_text=extracted["corosync.conf"].decode("utf-8") ) ) if report_list: process_library_reports(report_list) if not node_list: utils.err("no nodes found in the tarball") err_msgs = [] for node in node_list: try: retval, output = utils.checkStatus(node) if retval != 0: err_msgs.append(output) continue _status = json.loads(output) if ( _status["corosync"] or _status["pacemaker"] or # not supported by older pcsd, do not fail if not present _status.get("pacemaker_remote", False) ): err_msgs.append( "Cluster is currently running on node %s. You need to stop " "the cluster in order to restore the configuration." % node ) continue except (ValueError, NameError, LookupError): err_msgs.append("unable to determine status of the node %s" % node) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) # Temporarily disable config files syncing thread in pcsd so it will not # rewrite restored files. 10 minutes should be enough time to restore. # If node returns HTTP 404 it does not support config syncing at all. for node in node_list: retval, output = utils.pauseConfigSyncing(node, 10 * 60) if not (retval == 0 or "(HTTP error: 404)" in output): utils.err(output) if infile_obj: infile_obj.seek(0) tarball_data = infile_obj.read() else: with open(infile_name, "rb") as tarball: tarball_data = tarball.read() error_list = [] for node in node_list: retval, error = utils.restoreConfig(node, tarball_data) if retval != 0: error_list.append(error) if error_list: utils.err("unable to restore all nodes\n" + "\n".join(error_list)) def config_restore_local(infile_name, infile_obj): """ Commandline options: no options """ service_manager = utils.get_service_manager() if ( service_manager.is_running("corosync") or service_manager.is_running("pacemaker") or service_manager.is_running("pacemaker_remote") ): utils.err( "Cluster is currently running on this node. You need to stop " "the cluster in order to restore the configuration." ) file_list = config_backup_path_list(with_uid_gid=True) tarball_file_list = [] version = None tmp_dir = None try: with tarfile.open(infile_name, "r|*", infile_obj) as tarball: while True: # next(tarball) does not work in python2.6 tar_member_info = tarball.next() if tar_member_info is None: break if tar_member_info.name == "version.txt": version_data = tarball.extractfile(tar_member_info) version = version_data.read() version_data.close() continue tarball_file_list.append(tar_member_info.name) required_file_list = [ tar_path for tar_path, path_info in file_list.items() if path_info["required"] ] missing = set(required_file_list) - set(tarball_file_list) if missing: utils.err( "unable to restore the cluster, missing files in backup: %s" % ", ".join(missing) ) config_backup_check_version(version) if infile_obj: infile_obj.seek(0) with tarfile.open(infile_name, "r|*", infile_obj) as tarball: while True: # next(tarball) does not work in python2.6 tar_member_info = tarball.next() if tar_member_info is None: break extract_info = None path = tar_member_info.name while path: if path in file_list: extract_info = file_list[path] break path = os.path.dirname(path) if not extract_info: continue path_full = None if hasattr(extract_info.get("pre_store_call"), "__call__"): extract_info["pre_store_call"]() if "rename" in extract_info and extract_info["rename"]: if tmp_dir is None: tmp_dir = tempfile.mkdtemp() tarball.extractall(tmp_dir, [tar_member_info]) path_full = extract_info["path"] shutil.move( os.path.join(tmp_dir, tar_member_info.name), path_full ) else: dir_path = os.path.dirname(extract_info["path"]) tarball.extractall(dir_path, [tar_member_info]) path_full = os.path.join(dir_path, tar_member_info.name) file_attrs = extract_info["attrs"] os.chmod(path_full, file_attrs["mode"]) os.chown(path_full, file_attrs["uid"], file_attrs["gid"]) except (tarfile.TarError, EnvironmentError, OSError) as e: utils.err("unable to restore the cluster: %s" % e) finally: if tmp_dir: shutil.rmtree(tmp_dir, ignore_errors=True) try: sig_path = os.path.join(settings.cib_dir, "cib.xml.sig") if os.path.exists(sig_path): os.remove(sig_path) except EnvironmentError as e: utils.err("unable to remove %s: %s" % (sig_path, e)) def config_backup_path_list(with_uid_gid=False): """ Commandline options: no option NOTE: corosync.conf path may be altered using --corosync_conf """ corosync_attrs = { "mtime": int(time.time()), "mode": 0o644, "uname": "root", "gname": "root", "uid": 0, "gid": 0, } corosync_authkey_attrs = dict(corosync_attrs) corosync_authkey_attrs["mode"] = 0o400 cib_attrs = { "mtime": int(time.time()), "mode": 0o600, "uname": settings.pacemaker_uname, "gname": settings.pacemaker_gname, } if with_uid_gid: cib_attrs["uid"] = _get_uid(cib_attrs["uname"]) cib_attrs["gid"] = _get_gid(cib_attrs["gname"]) pcmk_authkey_attrs = dict(cib_attrs) pcmk_authkey_attrs["mode"] = 0o440 file_list = { "cib.xml": { "path": os.path.join(settings.cib_dir, "cib.xml"), "required": True, "attrs": dict(cib_attrs), }, "corosync_authkey": { "path": settings.corosync_authkey_file, "required": False, "attrs": corosync_authkey_attrs, "restore_procedure": None, "rename": True, }, "pacemaker_authkey": { "path": settings.pacemaker_authkey_file, "required": False, "attrs": pcmk_authkey_attrs, "restore_procedure": None, "rename": True, "pre_store_call": _ensure_etc_pacemaker_exists, }, "corosync.conf": { "path": settings.corosync_conf_file, "required": True, "attrs": dict(corosync_attrs), }, "uidgid.d": { "path": settings.corosync_uidgid_dir, "required": False, "attrs": dict(corosync_attrs), }, "pcs_settings.conf": { "path": settings.pcsd_settings_conf_location, "required": False, "attrs": { "mtime": int(time.time()), "mode": 0o644, "uname": "root", "gname": "root", "uid": 0, "gid": 0, }, }, } return file_list def _get_uid(user_name): """ Commandline options: no options """ try: return pwd.getpwnam(user_name).pw_uid except KeyError: return utils.err( "Unable to determine uid of user '{0}'".format(user_name) ) def _get_gid(group_name): """ Commandline options: no options """ try: return grp.getgrnam(group_name).gr_gid except KeyError: return utils.err( "Unable to determine gid of group '{0}'".format(group_name) ) def _ensure_etc_pacemaker_exists(): """ Commandline options: no options """ dir_name = os.path.dirname(settings.pacemaker_authkey_file) if not os.path.exists(dir_name): os.mkdir(dir_name) os.chmod(dir_name, 0o750) os.chown( dir_name, _get_uid(settings.pacemaker_uname), _get_gid(settings.pacemaker_gname), ) def config_backup_check_version(version): """ Commandline options: no options """ try: version_number = int(version) supported_version = config_backup_version() if version_number > supported_version: utils.err( "Unsupported version of the backup, " "supported version is %d, backup version is %d" % (supported_version, version_number) ) if version_number < supported_version: print( "Warning: restoring from the backup version %d, " "current supported version is %s" % (version_number, supported_version) ) except TypeError: utils.err("Cannot determine version of the backup") def config_backup_add_version_to_tarball(tarball, version=None): """ Commandline options: no options """ ver = version if version is not None else str(config_backup_version()) return utils.tar_add_file_data(tarball, ver.encode("utf-8"), "version.txt") def config_backup_version(): """ Commandline options: no options """ return 1 def config_checkpoint_list(lib, argv, modifiers): """ Options: no options """ del lib modifiers.ensure_only_supported() if argv: raise CmdLineInputError() try: file_list = os.listdir(settings.cib_dir) except OSError as e: utils.err("unable to list checkpoints: %s" % e) cib_list = [] cib_name_re = re.compile(r"^cib-(\d+)\.raw$") for filename in file_list: match = cib_name_re.match(filename) if not match: continue file_path = os.path.join(settings.cib_dir, filename) try: if os.path.isfile(file_path): cib_list.append( (float(os.path.getmtime(file_path)), match.group(1)) ) except OSError: pass cib_list.sort() if not cib_list: print("No checkpoints available") return for cib_info in cib_list: print( "checkpoint %s: date %s" % (cib_info[1], datetime.datetime.fromtimestamp(round(cib_info[0]))) ) def _checkpoint_to_lines(lib, checkpoint_number): # backup current settings orig_usefile = utils.usefile orig_filename = utils.filename orig_middleware = lib.middleware_factory # configure old code to read the CIB from a file utils.usefile = True utils.filename = os.path.join( settings.cib_dir, "cib-%s.raw" % checkpoint_number ) # configure new code to read the CIB from a file lib.middleware_factory = orig_middleware._replace( cib=middleware.cib(utils.filename, utils.touch_cib_file) ) # export the CIB to text result = False, [] if os.path.isfile(utils.filename): result = True, _config_show_cib_lines(lib) # restore original settings utils.usefile = orig_usefile utils.filename = orig_filename lib.middleware_factory = orig_middleware return result def config_checkpoint_view(lib, argv, modifiers): """ Options: no options """ modifiers.ensure_only_supported() if len(argv) != 1: usage.config(["checkpoint view"]) sys.exit(1) loaded, lines = _checkpoint_to_lines(lib, argv[0]) if not loaded: utils.err("unable to read the checkpoint") print("\n".join(lines)) def config_checkpoint_diff(lib, argv, modifiers): """ Commandline options: * -f - CIB file """ modifiers.ensure_only_supported("-f") if len(argv) != 2: usage.config(["checkpoint diff"]) sys.exit(1) if argv[0] == argv[1]: utils.err("cannot diff a checkpoint against itself") errors = [] checkpoints_lines = [] for checkpoint in argv: if checkpoint == "live": lines = _config_show_cib_lines(lib) if not lines: errors.append("unable to read live configuration") else: checkpoints_lines.append(lines) else: loaded, lines = _checkpoint_to_lines(lib, checkpoint) if not loaded: errors.append( "unable to read checkpoint '{0}'".format(checkpoint) ) else: checkpoints_lines.append(lines) if errors: utils.err("\n".join(errors)) print( "Differences between {0} (-) and {1} (+):".format( *[ "live configuration" if label == "live" else f"checkpoint {label}" for label in argv ] ) ) print( "\n".join( [ line.rstrip() for line in difflib.Differ().compare( checkpoints_lines[0], checkpoints_lines[1] ) ] ) ) def config_checkpoint_restore(lib, argv, modifiers): """ Options: * -f - CIB file, a checkpoint will be restored into a specified file """ # pylint: disable=broad-except del lib modifiers.ensure_only_supported("-f") if len(argv) != 1: usage.config(["checkpoint restore"]) sys.exit(1) cib_path = os.path.join(settings.cib_dir, "cib-%s.raw" % argv[0]) try: snapshot_dom = parse(cib_path) except Exception as e: utils.err("unable to read the checkpoint: %s" % e) utils.replace_cib_configuration(snapshot_dom) # TODO remove, deprecated command def config_import_cman(lib, argv, modifiers): """ Options: * --force - skip checks, overwrite files * --interactive - interactive issue resolving * --request-timeout - effective only when ouput is not specified """ # pylint: disable=no-member del lib warn("This command is deprecated and will be removed.") modifiers.ensure_only_supported( "--force", "interactive", "--request-timeout", ) if no_clufter: utils.err( "Unable to perform a CMAN cluster conversion due to missing " "python-clufter package" ) clufter_supports_corosync3 = hasattr(clufter.facts, "cluster_pcs_camelback") # prepare convertor options cluster_conf = settings.cluster_conf_file dry_run_output = None output_format = "corosync.conf" dist = None invalid_args = False for arg in argv: if "=" in arg: name, value = arg.split("=", 1) if name == "input": cluster_conf = value elif name == "output": dry_run_output = value elif name == "output-format": if value in ( "corosync.conf", "pcs-commands", "pcs-commands-verbose", ): output_format = value else: invalid_args = True elif name == "dist": dist = value else: invalid_args = True else: invalid_args = True if output_format not in ("pcs-commands", "pcs-commands-verbose") and ( dry_run_output and not dry_run_output.endswith(".tar.bz2") ): dry_run_output += ".tar.bz2" if invalid_args or not dry_run_output: usage.config(["import-cman"]) sys.exit(1) debug = modifiers.get("--debug") force = modifiers.get("--force") interactive = modifiers.get("--interactive") if dist is not None: if not clufter_supports_corosync3: utils.err( "Unable to perform a CMAN cluster conversion due to clufter " "not supporting Corosync 3. Please, upgrade clufter packages." ) if not clufter.facts.cluster_pcs_camelback("linux", dist.split(",")): utils.err("dist does not match output-format") elif output_format == "corosync.conf": dist = _get_linux_dist() else: # for output-format=pcs-command[-verbose] dist = _get_linux_dist() clufter_args = { "input": str(cluster_conf), "cib": {"passin": "bytestring"}, "nocheck": force, "batch": True, "sys": "linux", "dist": dist, } if interactive: if "EDITOR" not in os.environ: utils.err("$EDITOR environment variable is not set") clufter_args["batch"] = False clufter_args["editor"] = os.environ["EDITOR"] if debug: logging.getLogger("clufter").setLevel(logging.DEBUG) if output_format == "corosync.conf": clufter_args["coro"] = {"passin": "struct"} cmd_name = "ccs2pcs-camelback" elif output_format in ("pcs-commands", "pcs-commands-verbose"): clufter_args["output"] = {"passin": "bytestring"} clufter_args["start_wait"] = "60" clufter_args["tmp_cib"] = "tmp-cib.xml" clufter_args["force"] = force clufter_args["text_width"] = "80" clufter_args["silent"] = True clufter_args["noguidance"] = True if output_format == "pcs-commands-verbose": clufter_args["text_width"] = "-1" clufter_args["silent"] = False clufter_args["noguidance"] = False if clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")): cmd_name = "ccs2pcscmd-flatiron" elif clufter.facts.cluster_pcs_needle("linux", dist.split(",")): cmd_name = "ccs2pcscmd-needle" elif clufter_supports_corosync3 and clufter.facts.cluster_pcs_camelback( "linux", dist.split(",") ): cmd_name = "ccs2pcscmd-camelback" else: utils.err( "unrecognized dist, try something recognized" + " (e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty)" ) clufter_args_obj = type(str("ClufterOptions"), (object,), clufter_args) # run convertor run_clufter( cmd_name, clufter_args_obj, debug, force, "Error: unable to import cluster configuration", ) # save commands if output_format in ("pcs-commands", "pcs-commands-verbose"): ok, message = utils.write_file( dry_run_output, clufter_args_obj.output["passout"].decode() ) if not ok: utils.err(message) return # put new config files into tarball file_list = config_backup_path_list() for file_item in file_list.values(): file_item["attrs"]["uname"] = "root" file_item["attrs"]["gname"] = "root" file_item["attrs"]["uid"] = 0 file_item["attrs"]["gid"] = 0 file_item["attrs"]["mode"] = 0o600 tar_data = BytesIO() try: with tarfile.open(fileobj=tar_data, mode="w|bz2") as tarball: config_backup_add_version_to_tarball(tarball) utils.tar_add_file_data( tarball, clufter_args_obj.cib["passout"], "cib.xml", **file_list["cib.xml"]["attrs"], ) # put uidgid into separate files fmt_simpleconfig = clufter.format_manager.FormatManager.init_lookup( "simpleconfig" ).plugins["simpleconfig"] corosync_struct = [] uidgid_list = [] for section in clufter_args_obj.coro["passout"][2]: if section[0] == "uidgid": uidgid_list.append(section[1]) else: corosync_struct.append(section) corosync_conf_data = fmt_simpleconfig( "struct", ("corosync", (), corosync_struct) )("bytestring") utils.tar_add_file_data( tarball, corosync_conf_data, "corosync.conf", **file_list["corosync.conf"]["attrs"], ) for uidgid in uidgid_list: uid = "" gid = "" for item in uidgid: if item[0] == "uid": uid = item[1] if item[0] == "gid": gid = item[1] filename = utils.get_uid_gid_file_name(uid, gid) uidgid_data = fmt_simpleconfig( "struct", ("corosync", (), [("uidgid", uidgid, None)]) )("bytestring") utils.tar_add_file_data( tarball, uidgid_data, "uidgid.d/" + filename, **file_list["uidgid.d"]["attrs"], ) except (tarfile.TarError, EnvironmentError) as e: utils.err("unable to create tarball: %s" % e) tar_data.seek(0) # save tarball / remote restore if dry_run_output: ok, message = utils.write_file( dry_run_output, tar_data.read(), permissions=0o600, binary=True ) if not ok: utils.err(message) else: config_restore_remote(None, tar_data) tar_data.close() def _get_linux_dist(): if no_distro_package: # For Python 3.8+, python3-distro is a required dependency and we # should never get here. Pylint, of course, cannot know that. # pylint: disable=deprecated-method # pylint: disable=no-member distribution = platform.linux_distribution(full_distribution_name=False) else: distribution = distro.linux_distribution(full_distribution_name=False) return ",".join(distribution) # TODO remove, deprecated command def config_export_pcs_commands(lib, argv, modifiers, verbose=False): """ Options: * --force - skip checks, overwrite files * --interactive - interactive issue resolving * -f - CIB file * --corosync_conf """ del lib warn("This command is deprecated and will be removed.") modifiers.ensure_only_supported( "--force", "--interactive", "-f", "--corosync_conf" ) if no_clufter: utils.err( "Unable to perform export due to missing python-clufter package" ) # parse options debug = modifiers.get("--debug") force = modifiers.get("--force") interactive = modifiers.get("--interactive") invalid_args = False output_file = None dist = None for arg in argv: if "=" in arg: name, value = arg.split("=", 1) if name == "output": output_file = value elif name == "dist": dist = value else: invalid_args = True else: invalid_args = True # check options if invalid_args: usage.config(["export pcs-commands"]) sys.exit(1) # complete optional options if dist is None: dist = _get_linux_dist() # prepare convertor options clufter_args = { "nocheck": force, "batch": True, "sys": "linux", "dist": dist, "coro": settings.corosync_conf_file, "start_wait": "60", "tmp_cib": "tmp-cib.xml", "force": force, "text_width": "80", "silent": True, "noguidance": True, } if output_file: clufter_args["output"] = {"passin": "bytestring"} else: clufter_args["output"] = "-" if interactive: if "EDITOR" not in os.environ: utils.err("$EDITOR environment variable is not set") clufter_args["batch"] = False clufter_args["editor"] = os.environ["EDITOR"] if debug: logging.getLogger("clufter").setLevel(logging.DEBUG) if utils.usefile: clufter_args["cib"] = os.path.abspath(utils.filename) else: clufter_args["cib"] = ("bytestring", utils.get_cib()) if verbose: clufter_args["text_width"] = "-1" clufter_args["silent"] = False clufter_args["noguidance"] = False clufter_args_obj = type(str("ClufterOptions"), (object,), clufter_args) cmd_name = "pcs2pcscmd-camelback" # run convertor run_clufter( cmd_name, clufter_args_obj, debug, force, "Error: unable to export cluster configuration", ) # save commands if not printed to stdout by clufter if output_file: # pylint: disable=no-member ok, message = utils.write_file( output_file, clufter_args_obj.output["passout"].decode() ) if not ok: utils.err(message) # TODO remove, deprecated def run_clufter(cmd_name, cmd_args, debug, force, err_prefix): """ Commandline options: no options used but messages which include --force, --debug and --interactive are generated """ # pylint: disable=broad-except try: result = None cmd_manager = clufter.command_manager.CommandManager.init_lookup( cmd_name ) result = cmd_manager.commands[cmd_name](cmd_args) error_message = "" except Exception as e: error_message = str(e) if error_message or result != 0: hints = [] hints.append("--interactive to solve the issues manually") if not debug: hints.append("--debug to get more information") if not force: hints.append("--force to override") hints_string = "\nTry using %s." % ", ".join(hints) if hints else "" sys.stderr.write( err_prefix + (": %s" % error_message if error_message else "") + hints_string + "\n" ) sys.exit(1 if result is None else result) pcs-0.10.11/pcs/constraint.py000066400000000000000000001505141412706364600160010ustar00rootroot00000000000000import sys from collections import defaultdict from os.path import isfile import xml.dom.minidom from xml.dom.minidom import parseString from enum import Enum from pcs import ( rule as rule_utils, settings, usage, utils, ) from pcs.cli.common import parse_args from pcs.cli.common.errors import CmdLineInputError import pcs.cli.constraint_colocation.command as colocation_command import pcs.cli.constraint_order.command as order_command from pcs.cli.constraint_ticket import command as ticket_command from pcs.cli.reports import process_library_reports from pcs.cli.reports.output import warn from pcs.common import ( const, pacemaker, reports, ) from pcs.common.reports import ReportItem from pcs.common.reports.constraints import ( colocation as colocation_format, order as order_format, ) from pcs.common.str_tools import format_list from pcs.lib.cib.constraint import resource_set from pcs.lib.cib.constraint.order import ATTRIB as order_attrib from pcs.lib.node import get_existing_nodes_names from pcs.lib.pacemaker.values import ( sanitize_id, SCORE_INFINITY, ) # pylint: disable=too-many-branches, too-many-statements # pylint: disable=invalid-name, too-many-nested-blocks # pylint: disable=too-many-locals, too-many-lines OPTIONS_ACTION = resource_set.ATTRIB["action"] DEFAULT_ACTION = "start" DEFAULT_ROLE = const.PCMK_ROLE_STARTED OPTIONS_SYMMETRICAL = order_attrib["symmetrical"] OPTIONS_KIND = order_attrib["kind"] LOCATION_NODE_VALIDATION_SKIP_MSG = ( "Validation for node existence in the cluster will be skipped" ) CRM_RULE_MISSING_MSG = ( "Warning: crm_rule is not available, therefore expired constraints may be " "shown. Consider upgrading pacemaker.\n" ) RESOURCE_TYPE_RESOURCE = "resource" RESOURCE_TYPE_REGEXP = "regexp" RULE_IN_EFFECT = "in effect" RULE_EXPIRED = "expired" RULE_NOT_IN_EFFECT = "not yet in effect" RULE_UNKNOWN_STATUS = "unknown status" class CrmRuleReturnCode(Enum): IN_EFFECT = 0 EXPIRED = 110 TO_BE_IN_EFFECT = 111 def constraint_location_cmd(lib, argv, modifiers): if not argv: sub_cmd = "config" else: sub_cmd = argv.pop(0) try: if sub_cmd == "add": location_add(lib, argv, modifiers) elif sub_cmd in ["remove", "delete"]: location_remove(lib, argv, modifiers) elif sub_cmd == "show": location_show(lib, argv, modifiers) elif sub_cmd == "config": location_config_cmd(lib, argv, modifiers) elif len(argv) >= 2: if argv[0] == "rule": location_rule(lib, [sub_cmd] + argv, modifiers) else: location_prefer(lib, [sub_cmd] + argv, modifiers) else: raise CmdLineInputError() except CmdLineInputError as e: utils.exit_on_cmdline_input_errror( e, "constraint", ["location", sub_cmd] ) def constraint_order_cmd(lib, argv, modifiers): if not argv: sub_cmd = "config" else: sub_cmd = argv.pop(0) try: if sub_cmd == "set": order_command.create_with_set(lib, argv, modifiers) elif sub_cmd in ["remove", "delete"]: order_rm(lib, argv, modifiers) elif sub_cmd == "show": order_command.show(lib, argv, modifiers) elif sub_cmd == "config": order_command.config_cmd(lib, argv, modifiers) else: order_start(lib, [sub_cmd] + argv, modifiers) except CmdLineInputError as e: utils.exit_on_cmdline_input_errror(e, "constraint", ["order", sub_cmd]) def constraint_show(lib, argv, modifiers): warn( "This command is deprecated and will be removed. " "Please use 'pcs constraint config' instead.", stderr=True, ) return constraint_config_cmd(lib, argv, modifiers) def constraint_config_cmd(lib, argv, modifiers): """ Options: * --all - print expired constraints * -f - CIB file * --full """ location_config_cmd(lib, argv, modifiers) order_command.config_cmd(lib, argv, modifiers.get_subset("--full", "-f")) colocation_command.config_cmd( lib, argv, modifiers.get_subset("--full", "-f") ) ticket_command.config_cmd(lib, argv, modifiers.get_subset("--full", "-f")) def colocation_rm(lib, argv, modifiers): """ Options: * -f - CIB file """ del lib modifiers.ensure_only_supported("-f") elementFound = False if len(argv) < 2: raise CmdLineInputError() (dom, constraintsElement) = getCurrentConstraints() resource1 = argv[0] resource2 = argv[1] for co_loc in constraintsElement.getElementsByTagName("rsc_colocation")[:]: if ( co_loc.getAttribute("rsc") == resource1 and co_loc.getAttribute("with-rsc") == resource2 ): constraintsElement.removeChild(co_loc) elementFound = True if ( co_loc.getAttribute("rsc") == resource2 and co_loc.getAttribute("with-rsc") == resource1 ): constraintsElement.removeChild(co_loc) elementFound = True if elementFound: utils.replace_cib_configuration(dom) else: print("No matching resources found in ordering list") def _validate_constraint_resource(cib_dom, resource_id): ( resource_valid, resource_error, dummy_correct_id, ) = utils.validate_constraint_resource(cib_dom, resource_id) if not resource_valid: utils.err(resource_error) # Syntax: colocation add [role] with [role] [score] [options] # possible commands: # with [score] [options] # with [score] [options] # with [score] [options] # with [score] [options] def colocation_add(lib, argv, modifiers): """ Options: * -f - CIB file * --force - allow constraint on any resource, allow duplicate constraints """ def _parse_score_options(argv): # When passed an array of arguments if the first argument doesn't have # an '=' then it's the score, otherwise they're all arguments. Return a # tuple with the score and array of name,value pairs """ Commandline options: no options """ if not argv: return SCORE_INFINITY, [] score = SCORE_INFINITY if "=" in argv[0] else argv.pop(0) # create a list of 2-tuples (name, value) arg_array = [ parse_args.split_option(arg, allow_empty_value=False) for arg in argv ] return score, arg_array del lib modifiers.ensure_only_supported("-f", "--force") if len(argv) < 3: raise CmdLineInputError() role1 = "" role2 = "" cib_dom = utils.get_cib_dom() new_roles_supported = utils.isCibVersionSatisfied( cib_dom, const.PCMK_NEW_ROLES_CIB_VERSION ) def _validate_and_prepare_role(role): role_cleaned = role.lower().capitalize() if role_cleaned not in const.PCMK_ROLES: utils.err( "invalid role value '{0}', allowed values are: {1}".format( role, format_list(const.PCMK_ROLES) ) ) return pacemaker.role.get_value_for_cib( role_cleaned, new_roles_supported ) if argv[2] == "with": role1 = _validate_and_prepare_role(argv.pop(0)) resource1 = argv.pop(0) elif argv[1] == "with": resource1 = argv.pop(0) else: raise CmdLineInputError() if argv.pop(0) != "with": raise CmdLineInputError() if "with" in argv: raise CmdLineInputError( message="Multiple 'with's cannot be specified.", hint=( "Use the 'pcs constraint colocation set' command if you want " "to create a constraint for more than two resources." ), show_both_usage_and_message=True, ) if not argv: raise CmdLineInputError() if len(argv) == 1: resource2 = argv.pop(0) else: if utils.is_score_or_opt(argv[1]): resource2 = argv.pop(0) else: role2 = _validate_and_prepare_role(argv.pop(0)) resource2 = argv.pop(0) score, nv_pairs = _parse_score_options(argv) _validate_constraint_resource(cib_dom, resource1) _validate_constraint_resource(cib_dom, resource2) id_in_nvpairs = None for name, value in nv_pairs: if name == "id": id_valid, id_error = utils.validate_xml_id(value, "constraint id") if not id_valid: utils.err(id_error) if utils.does_id_exist(cib_dom, value): utils.err( "id '%s' is already in use, please specify another one" % value ) id_in_nvpairs = True if not id_in_nvpairs: nv_pairs.append( ( "id", utils.find_unique_id( cib_dom, "colocation-%s-%s-%s" % (resource1, resource2, score), ), ) ) (dom, constraintsElement) = getCurrentConstraints(cib_dom) # If one role is specified, the other should default to "started" if role1 != "" and role2 == "": role2 = DEFAULT_ROLE if role2 != "" and role1 == "": role1 = DEFAULT_ROLE element = dom.createElement("rsc_colocation") element.setAttribute("rsc", resource1) element.setAttribute("with-rsc", resource2) element.setAttribute("score", score) if role1 != "": element.setAttribute("rsc-role", role1) if role2 != "": element.setAttribute("with-rsc-role", role2) for nv_pair in nv_pairs: element.setAttribute(nv_pair[0], nv_pair[1]) if not modifiers.get("--force"): duplicates = colocation_find_duplicates(constraintsElement, element) if duplicates: utils.err( "duplicate constraint already exists, use --force to override\n" + "\n".join( [ " " + colocation_format.constraint_plain( {"options": dict(dup.attributes.items())}, True ) for dup in duplicates ] ) ) constraintsElement.appendChild(element) utils.replace_cib_configuration(dom) def colocation_find_duplicates(dom, constraint_el): """ Commandline options: no options """ new_roles_supported = utils.isCibVersionSatisfied( dom, const.PCMK_NEW_ROLES_CIB_VERSION ) def normalize(const_el): return ( const_el.getAttribute("rsc"), const_el.getAttribute("with-rsc"), pacemaker.role.get_value_for_cib( const_el.getAttribute("rsc-role").capitalize() or DEFAULT_ROLE, new_roles_supported, ), pacemaker.role.get_value_for_cib( const_el.getAttribute("with-rsc-role").capitalize() or DEFAULT_ROLE, new_roles_supported, ), ) normalized_el = normalize(constraint_el) return [ other_el for other_el in dom.getElementsByTagName("rsc_colocation") if not other_el.getElementsByTagName("resource_set") and constraint_el is not other_el and normalized_el == normalize(other_el) ] def order_rm(lib, argv, modifiers): """ Options: * -f - CIB file """ del lib modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() elementFound = False (dom, constraintsElement) = getCurrentConstraints() for resource in argv: for ord_loc in constraintsElement.getElementsByTagName("rsc_order")[:]: if ( ord_loc.getAttribute("first") == resource or ord_loc.getAttribute("then") == resource ): constraintsElement.removeChild(ord_loc) elementFound = True resource_refs_to_remove = [] for ord_set in constraintsElement.getElementsByTagName("resource_ref"): if ord_set.getAttribute("id") == resource: resource_refs_to_remove.append(ord_set) elementFound = True for res_ref in resource_refs_to_remove: res_set = res_ref.parentNode res_order = res_set.parentNode res_ref.parentNode.removeChild(res_ref) if not res_set.getElementsByTagName("resource_ref"): res_set.parentNode.removeChild(res_set) if not res_order.getElementsByTagName("resource_set"): res_order.parentNode.removeChild(res_order) if elementFound: utils.replace_cib_configuration(dom) else: utils.err("No matching resources found in ordering list") def order_start(lib, argv, modifiers): """ Options: * -f - CIB file * --force - allow constraint for any resource, allow duplicate constraints """ del lib modifiers.ensure_only_supported("-f", "--force") if len(argv) < 3: raise CmdLineInputError() first_action = DEFAULT_ACTION then_action = DEFAULT_ACTION action = argv[0] if action in OPTIONS_ACTION: first_action = action argv.pop(0) resource1 = argv.pop(0) if argv.pop(0) != "then": raise CmdLineInputError() if not argv: raise CmdLineInputError() action = argv[0] if action in OPTIONS_ACTION: then_action = action argv.pop(0) if not argv: raise CmdLineInputError() resource2 = argv.pop(0) order_options = [] if argv: order_options = order_options + argv[:] if "then" in order_options: raise CmdLineInputError( message="Multiple 'then's cannot be specified.", hint=( "Use the 'pcs constraint order set' command if you want to " "create a constraint for more than two resources." ), show_both_usage_and_message=True, ) order_options.append("first-action=" + first_action) order_options.append("then-action=" + then_action) _order_add(resource1, resource2, order_options, modifiers) def _order_add(resource1, resource2, options_list, modifiers): """ Commandline options: * -f - CIB file * --force - allow constraint for any resource, allow duplicate constraints """ cib_dom = utils.get_cib_dom() _validate_constraint_resource(cib_dom, resource1) _validate_constraint_resource(cib_dom, resource2) order_options = [] id_specified = False sym = None for arg in options_list: if arg == "symmetrical": sym = "true" elif arg == "nonsymmetrical": sym = "false" else: name, value = parse_args.split_option(arg, allow_empty_value=False) if name == "id": id_valid, id_error = utils.validate_xml_id( value, "constraint id" ) if not id_valid: utils.err(id_error) if utils.does_id_exist(cib_dom, value): utils.err( "id '%s' is already in use, please specify another one" % value ) id_specified = True order_options.append((name, value)) elif name == "symmetrical": if value.lower() in OPTIONS_SYMMETRICAL: sym = value.lower() else: utils.err( "invalid symmetrical value '%s', allowed values are: %s" % (value, ", ".join(OPTIONS_SYMMETRICAL)) ) else: order_options.append((name, value)) if sym: order_options.append(("symmetrical", sym)) options = "" if order_options: options = " (Options: %s)" % " ".join( [ "%s=%s" % (name, value) for name, value in order_options if name not in ("kind", "score") ] ) scorekind = "kind: Mandatory" id_suffix = "mandatory" for opt in order_options: if opt[0] == "score": scorekind = "score: " + opt[1] id_suffix = opt[1] break if opt[0] == "kind": scorekind = "kind: " + opt[1] id_suffix = opt[1] break if not id_specified: order_id = "order-" + resource1 + "-" + resource2 + "-" + id_suffix order_id = utils.find_unique_id(cib_dom, order_id) order_options.append(("id", order_id)) (dom, constraintsElement) = getCurrentConstraints() element = dom.createElement("rsc_order") element.setAttribute("first", resource1) element.setAttribute("then", resource2) for order_opt in order_options: element.setAttribute(order_opt[0], order_opt[1]) constraintsElement.appendChild(element) if not modifiers.get("--force"): duplicates = order_find_duplicates(constraintsElement, element) if duplicates: utils.err( "duplicate constraint already exists, use --force to override\n" + "\n".join( [ " " + order_format.constraint_plain( {"options": dict(dup.attributes.items())}, True ) for dup in duplicates ] ) ) print( "Adding " + resource1 + " " + resource2 + " (" + scorekind + ")" + options ) utils.replace_cib_configuration(dom) def order_find_duplicates(dom, constraint_el): """ Commandline options: no options """ def normalize(constraint_el): return ( constraint_el.getAttribute("first"), constraint_el.getAttribute("then"), constraint_el.getAttribute("first-action").lower() or DEFAULT_ACTION, constraint_el.getAttribute("then-action").lower() or DEFAULT_ACTION, ) normalized_el = normalize(constraint_el) return [ other_el for other_el in dom.getElementsByTagName("rsc_order") if not other_el.getElementsByTagName("resource_set") and constraint_el is not other_el and normalized_el == normalize(other_el) ] def location_show(lib, argv, modifiers): warn( "This command is deprecated and will be removed. " "Please use 'pcs constraint location config' instead.", stderr=True, ) return location_config_cmd(lib, argv, modifiers) # Show the currently configured location constraints by node or resource def location_config_cmd(lib, argv, modifiers): """ Options: * --all - print expired constraints * --full - print all details * -f - CIB file """ del lib modifiers.ensure_only_supported("-f", "--full", "--all") by_node = False if argv and argv[0] == "nodes": by_node = True if len(argv) > 1: if by_node: valid_noderes = argv[1:] else: valid_noderes = [ parse_args.parse_typed_arg( arg, [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP], RESOURCE_TYPE_RESOURCE, ) for arg in argv[1:] ] else: valid_noderes = [] (dummy_dom, constraintsElement) = getCurrentConstraints() print( "\n".join( location_lines( constraintsElement, showDetail=modifiers.get("--full"), byNode=by_node, valid_noderes=valid_noderes, show_expired=modifiers.get("--all"), ) ) ) def location_lines( constraintsElement, showDetail=False, byNode=False, valid_noderes=None, show_expired=False, verify_expiration=True, ): """ Commandline options: no options """ all_lines = [] nodehashon = {} nodehashoff = {} rschashon = {} rschashoff = {} ruleshash = defaultdict(list) all_loc_constraints = constraintsElement.getElementsByTagName( "rsc_location" ) cib = utils.get_cib() if not isfile(settings.crm_rule): if verify_expiration: sys.stderr.write(CRM_RULE_MISSING_MSG) verify_expiration = False all_lines.append("Location Constraints:") for rsc_loc in all_loc_constraints: if rsc_loc.hasAttribute("rsc-pattern"): lc_rsc_type = RESOURCE_TYPE_REGEXP lc_rsc_value = rsc_loc.getAttribute("rsc-pattern") lc_name = "Resource pattern: {0}".format(lc_rsc_value) else: lc_rsc_type = RESOURCE_TYPE_RESOURCE lc_rsc_value = rsc_loc.getAttribute("rsc") lc_name = "Resource: {0}".format(lc_rsc_value) lc_rsc = lc_rsc_type, lc_rsc_value, lc_name lc_id = rsc_loc.getAttribute("id") lc_node = rsc_loc.getAttribute("node") lc_score = rsc_loc.getAttribute("score") lc_role = rsc_loc.getAttribute("role") lc_resource_discovery = rsc_loc.getAttribute("resource-discovery") for child in rsc_loc.childNodes: if child.nodeType == child.ELEMENT_NODE and child.tagName == "rule": ruleshash[lc_rsc].append(child) # NEED TO FIX FOR GROUP LOCATION CONSTRAINTS (where there are children # of # rsc_location) if lc_score == "": lc_score = "0" if lc_score == "INFINITY": positive = True elif lc_score == "-INFINITY": positive = False elif int(lc_score) >= 0: positive = True else: positive = False if positive: nodeshash = nodehashon rschash = rschashon else: nodeshash = nodehashoff rschash = rschashoff hash_element = { "id": lc_id, "rsc_type": lc_rsc_type, "rsc_value": lc_rsc_value, "rsc_label": lc_name, "node": lc_node, "score": lc_score, "role": lc_role, "resource-discovery": lc_resource_discovery, } if lc_node in nodeshash: nodeshash[lc_node].append(hash_element) else: nodeshash[lc_node] = [hash_element] if lc_rsc in rschash: rschash[lc_rsc].append(hash_element) else: rschash[lc_rsc] = [hash_element] nodelist = sorted(set(list(nodehashon.keys()) + list(nodehashoff.keys()))) rsclist = sorted( set(list(rschashon.keys()) + list(rschashoff.keys())), key=lambda item: ( { RESOURCE_TYPE_RESOURCE: 1, RESOURCE_TYPE_REGEXP: 0, }[item[0]], item[1], ), ) if byNode: for node in nodelist: if valid_noderes: if node not in valid_noderes: continue all_lines.append(" Node: " + node) nodehash_label = ( (nodehashon, " Allowed to run:"), (nodehashoff, " Not allowed to run:"), ) all_lines += _hashtable_to_lines( nodehash_label, "rsc_label", node, showDetail ) all_lines += _show_location_rules( ruleshash, cib, show_detail=showDetail, show_expired=show_expired, verify_expiration=verify_expiration, ) else: for rsc in rsclist: rsc_lines = [] if valid_noderes: if rsc[0:2] not in valid_noderes: continue rsc_lines.append(" {0}".format(rsc[2])) rschash_label = ( (rschashon, " Enabled on:"), (rschashoff, " Disabled on:"), ) rsc_lines += _hashtable_to_lines( rschash_label, "node", rsc, showDetail ) miniruleshash = {} miniruleshash[rsc] = ruleshash[rsc] rsc_lines += _show_location_rules( miniruleshash, cib, show_detail=showDetail, show_expired=show_expired, verify_expiration=verify_expiration, noheader=True, ) # Append to all_lines only if the resource has any constraints if len(rsc_lines) > 2: all_lines += rsc_lines return all_lines def _hashtable_to_lines(hash_label, hash_type, hash_key, show_detail): hash_lines = [] for hashtable, label in hash_label: if hash_key in hashtable: labeled_lines = [] for options in hashtable[hash_key]: # Skips nodeless constraints and prints nodes/resources if not options[hash_type]: continue line_parts = [ " {0}{1}".format( "Node: " if hash_type == "node" else "", options[hash_type], ) ] line_parts.append(f"(score:{options['score']})") if options["role"]: line_parts.append(f"(role:{options['role']})") if options["resource-discovery"]: line_parts.append( "(resource-discovery={0})".format( options["resource-discovery"] ) ) if show_detail: line_parts.append(f"(id:{options['id']})") labeled_lines.append(" ".join(line_parts)) if labeled_lines: labeled_lines.insert(0, label) hash_lines += labeled_lines return hash_lines def _show_location_rules( ruleshash, cib, show_detail, show_expired=False, verify_expiration=True, noheader=False, ): """ Commandline options: no options """ all_lines = [] constraint_options = {} for rsc in sorted( ruleshash.keys(), key=lambda item: ( { RESOURCE_TYPE_RESOURCE: 1, RESOURCE_TYPE_REGEXP: 0, }[item[0]], item[1], ), ): constrainthash = defaultdict(list) if not noheader: all_lines.append(" {0}".format(rsc[2])) for rule in ruleshash[rsc]: constraint_id = rule.parentNode.getAttribute("id") constrainthash[constraint_id].append(rule) constraint_options[constraint_id] = [] if rule.parentNode.getAttribute("resource-discovery"): constraint_options[constraint_id].append( "resource-discovery=%s" % rule.parentNode.getAttribute("resource-discovery") ) for constraint_id in sorted(constrainthash.keys()): if ( constraint_id in constraint_options and constraint_options[constraint_id] ): constraint_option_info = ( " (" + " ".join(constraint_options[constraint_id]) + ")" ) else: constraint_option_info = "" rule_lines = [] # When expiration check is needed, starting value should be True and # when it's not, check is skipped so the initial value must be False # to print the constraint is_constraint_expired = verify_expiration for rule in constrainthash[constraint_id]: rule_status = RULE_UNKNOWN_STATUS if verify_expiration: rule_status = _get_rule_status(rule.getAttribute("id"), cib) if rule_status != RULE_EXPIRED: is_constraint_expired = False rule_lines.append( rule_utils.ExportDetailed().get_string( rule, rule_status == RULE_EXPIRED and show_expired, show_detail, indent=" ", ) ) if not show_expired and is_constraint_expired: continue all_lines.append( " Constraint{0}: {1}{2}".format( " (expired)" if is_constraint_expired else "", constraint_id, constraint_option_info, ) ) all_lines += rule_lines return all_lines def _verify_node_name(node, existing_nodes): report_list = [] if node not in existing_nodes: report_list.append( ReportItem.error( reports.messages.NodeNotFound(node), force_code=reports.codes.FORCE, ) ) return report_list def _verify_score(score): if not utils.is_score(score): utils.err( "invalid score '%s', use integer or INFINITY or -INFINITY" % score ) def _get_rule_status(rule_id, cib): _, _, retval = utils.cmd_runner().run( [settings.crm_rule, "--check", "--rule", rule_id, "--xml-text", "-"], cib, ) translation_map = { CrmRuleReturnCode.IN_EFFECT.value: RULE_IN_EFFECT, CrmRuleReturnCode.EXPIRED.value: RULE_EXPIRED, CrmRuleReturnCode.TO_BE_IN_EFFECT.value: RULE_NOT_IN_EFFECT, } return translation_map.get(retval, RULE_UNKNOWN_STATUS) def location_prefer(lib, argv, modifiers): """ Options: * --force - allow unknown options, allow constraint for any resource type * -f - CIB file """ modifiers.ensure_only_supported("--force", "-f") rsc = argv.pop(0) prefer_option = argv.pop(0) dummy_rsc_type, rsc_value = parse_args.parse_typed_arg( rsc, [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP], RESOURCE_TYPE_RESOURCE, ) if prefer_option == "prefers": prefer = True elif prefer_option == "avoids": prefer = False else: raise CmdLineInputError() skip_node_check = False if modifiers.is_specified("-f") or modifiers.get("--force"): skip_node_check = True warn(LOCATION_NODE_VALIDATION_SKIP_MSG) else: lib_env = utils.get_lib_env() existing_nodes, report_list = get_existing_nodes_names( corosync_conf=lib_env.get_corosync_conf(), cib=lib_env.get_cib(), ) if report_list: process_library_reports(report_list) report_list = [] parameters_list = [] for nodeconf in argv: nodeconf_a = nodeconf.split("=", 1) node = nodeconf_a[0] if not skip_node_check: report_list += _verify_node_name(node, existing_nodes) if len(nodeconf_a) == 1: if prefer: score = "INFINITY" else: score = "-INFINITY" else: score = nodeconf_a[1] _verify_score(score) if not prefer: if score[0] == "-": score = score[1:] else: score = "-" + score parameters_list.append( [ sanitize_id(f"location-{rsc_value}-{node}-{score}"), rsc, node, score, ] ) if report_list: process_library_reports(report_list) modifiers = modifiers.get_subset("--force", "-f") for parameters in parameters_list: location_add(lib, parameters, modifiers, skip_score_and_node_check=True) def location_add(lib, argv, modifiers, skip_score_and_node_check=False): """ Options: * --force - allow unknown options, allow constraint for any resource type * -f - CIB file """ del lib modifiers.ensure_only_supported("--force", "-f") if len(argv) < 4: raise CmdLineInputError() constraint_id = argv.pop(0) rsc_type, rsc_value = parse_args.parse_typed_arg( argv.pop(0), [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP], RESOURCE_TYPE_RESOURCE, ) node = argv.pop(0) score = argv.pop(0) options = [] # For now we only allow setting resource-discovery if argv: for arg in argv: if "=" in arg: options.append(arg.split("=", 1)) else: raise CmdLineInputError(f"bad option '{arg}'") if options[-1][0] != "resource-discovery" and not modifiers.get( "--force" ): utils.err( "bad option '%s', use --force to override" % options[-1][0] ) # Verify that specified node exists in the cluster and score is valid if not skip_score_and_node_check: if modifiers.is_specified("-f") or modifiers.get("--force"): warn(LOCATION_NODE_VALIDATION_SKIP_MSG) else: lib_env = utils.get_lib_env() existing_nodes, report_list = get_existing_nodes_names( corosync_conf=lib_env.get_corosync_conf(), cib=lib_env.get_cib(), ) report_list += _verify_node_name(node, existing_nodes) if report_list: process_library_reports(report_list) _verify_score(score) id_valid, id_error = utils.validate_xml_id(constraint_id, "constraint id") if not id_valid: utils.err(id_error) dom = utils.get_cib_dom() if rsc_type == RESOURCE_TYPE_RESOURCE: ( rsc_valid, rsc_error, dummy_correct_id, ) = utils.validate_constraint_resource(dom, rsc_value) if not rsc_valid: utils.err(rsc_error) # Verify current constraint doesn't already exist # If it does we replace it with the new constraint dummy_dom, constraintsElement = getCurrentConstraints(dom) elementsToRemove = [] # If the id matches, or the rsc & node match, then we replace/remove for rsc_loc in constraintsElement.getElementsByTagName("rsc_location"): # pylint: disable=too-many-boolean-expressions if rsc_loc.getAttribute("id") == constraint_id or ( rsc_loc.getAttribute("node") == node and ( ( RESOURCE_TYPE_RESOURCE == rsc_type and rsc_loc.getAttribute("rsc") == rsc_value ) or ( RESOURCE_TYPE_REGEXP == rsc_type and rsc_loc.getAttribute("rsc-pattern") == rsc_value ) ) ): elementsToRemove.append(rsc_loc) for etr in elementsToRemove: constraintsElement.removeChild(etr) element = dom.createElement("rsc_location") element.setAttribute("id", constraint_id) if rsc_type == RESOURCE_TYPE_RESOURCE: element.setAttribute("rsc", rsc_value) elif rsc_type == RESOURCE_TYPE_REGEXP: element.setAttribute("rsc-pattern", rsc_value) element.setAttribute("node", node) element.setAttribute("score", score) for option in options: element.setAttribute(option[0], option[1]) constraintsElement.appendChild(element) utils.replace_cib_configuration(dom) def location_remove(lib, argv, modifiers): """ Options: * -f - CIB file """ # This code was originally merged in the location_add function and was # documented to take 1 or 4 arguments: # location remove [ ] # However it has always ignored all arguments but constraint id. Therefore # this command / function has no use as it can be fully replaced by "pcs # constraint remove" which also removes constraints by id. For now I keep # things as they are but we should solve this when moving these functions # to pcs.lib. del lib modifiers.ensure_only_supported("-f") if len(argv) != 1: raise CmdLineInputError() constraint_id = argv.pop(0) dom, constraintsElement = getCurrentConstraints() elementsToRemove = [] for rsc_loc in constraintsElement.getElementsByTagName("rsc_location"): if constraint_id == rsc_loc.getAttribute("id"): elementsToRemove.append(rsc_loc) if not elementsToRemove: utils.err("resource location id: " + constraint_id + " not found.") for etr in elementsToRemove: constraintsElement.removeChild(etr) utils.replace_cib_configuration(dom) def location_rule(lib, argv, modifiers): """ Options: * -f - CIB file * --force - allow constraint on any resource type, allow duplicate constraints """ del lib modifiers.ensure_only_supported("-f", "--force") if len(argv) < 3: usage.constraint(["location", "rule"]) sys.exit(1) rsc_type, rsc_value = parse_args.parse_typed_arg( argv.pop(0), [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP], RESOURCE_TYPE_RESOURCE, ) argv.pop(0) # pop "rule" options, rule_argv = rule_utils.parse_argv( argv, { "constraint-id": None, "resource-discovery": None, }, ) resource_discovery = ( "resource-discovery" in options and options["resource-discovery"] ) try: # Parse the rule to see if we need to upgrade CIB schema. All errors # would be properly reported by a validator called bellow, so we can # safely ignore them here. parsed_rule = rule_utils.RuleParser().parse( rule_utils.TokenPreprocessor().run(rule_argv) ) if rule_utils.has_node_attr_expr_with_type_integer(parsed_rule): utils.checkAndUpgradeCIB( const.PCMK_RULES_NODE_ATTR_EXPR_WITH_INT_TYPE_CIB_VERSION ) except (rule_utils.ParserException, rule_utils.CibBuilderException): pass dom = utils.get_cib_dom() if rsc_type == RESOURCE_TYPE_RESOURCE: ( rsc_valid, rsc_error, dummy_correct_id, ) = utils.validate_constraint_resource(dom, rsc_value) if not rsc_valid: utils.err(rsc_error) cib, constraints = getCurrentConstraints(dom) lc = cib.createElement("rsc_location") # If resource-discovery is specified, we use it with the rsc_location # element not the rule if resource_discovery: lc.setAttribute("resource-discovery", options.pop("resource-discovery")) constraints.appendChild(lc) if options.get("constraint-id"): id_valid, id_error = utils.validate_xml_id( options["constraint-id"], "constraint id" ) if not id_valid: utils.err(id_error) if utils.does_id_exist(dom, options["constraint-id"]): utils.err( "id '%s' is already in use, please specify another one" % options["constraint-id"] ) lc.setAttribute("id", options["constraint-id"]) del options["constraint-id"] else: lc.setAttribute( "id", utils.find_unique_id(dom, sanitize_id("location-" + rsc_value)), ) if rsc_type == RESOURCE_TYPE_RESOURCE: lc.setAttribute("rsc", rsc_value) elif rsc_type == RESOURCE_TYPE_REGEXP: lc.setAttribute("rsc-pattern", rsc_value) rule_utils.dom_rule_add( lc, options, rule_argv, utils.getValidateWithVersion(cib) ) location_rule_check_duplicates(constraints, lc, modifiers.get("--force")) utils.replace_cib_configuration(cib) def location_rule_check_duplicates(dom, constraint_el, force): """ Commandline options: no options """ if not force: duplicates = location_rule_find_duplicates(dom, constraint_el) if duplicates: lines = [] for dup in duplicates: lines.append(" Constraint: %s" % dup.getAttribute("id")) for dup_rule in utils.dom_get_children_by_tag_name(dup, "rule"): lines.append( rule_utils.ExportDetailed().get_string( dup_rule, False, True, indent=" " ) ) utils.err( "duplicate constraint already exists, use --force to override\n" + "\n".join(lines) ) def location_rule_find_duplicates(dom, constraint_el): """ Commandline options: no options """ def normalize(constraint_el): if constraint_el.hasAttribute("rsc-pattern"): rsc = ( RESOURCE_TYPE_REGEXP, constraint_el.getAttribute("rsc-pattern"), ) else: rsc = (RESOURCE_TYPE_RESOURCE, constraint_el.getAttribute("rsc")) return ( rsc, [ rule_utils.ExportAsExpression().get_string(rule_el, True) for rule_el in constraint_el.getElementsByTagName("rule") ], ) normalized_el = normalize(constraint_el) return [ other_el for other_el in dom.getElementsByTagName("rsc_location") if other_el.getElementsByTagName("rule") and constraint_el is not other_el and normalized_el == normalize(other_el) ] # Grabs the current constraints and returns the dom and constraint element def getCurrentConstraints(passed_dom=None): """ Commandline options: * -f - CIB file, only if passed_dom is None """ if passed_dom: dom = passed_dom else: current_constraints_xml = utils.get_cib_xpath("//constraints") if current_constraints_xml == "": utils.err("unable to process cib") # Verify current constraint doesn't already exist # If it does we replace it with the new constraint dom = parseString(current_constraints_xml) constraintsElement = dom.getElementsByTagName("constraints")[0] return (dom, constraintsElement) # If returnStatus is set, then we don't error out, we just print the error # and return false def constraint_rm( lib, argv, modifiers, returnStatus=False, constraintsElement=None, passed_dom=None, ): """ Options: * -f - CIB file, effective only if passed_dom is None """ if passed_dom is None: modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() bad_constraint = False if len(argv) != 1: for arg in argv: if not constraint_rm( lib, [arg], modifiers, returnStatus=True, passed_dom=passed_dom ): bad_constraint = True if bad_constraint: sys.exit(1) return None c_id = argv.pop(0) elementFound = False if not constraintsElement: (dom, constraintsElement) = getCurrentConstraints(passed_dom) use_cibadmin = True else: use_cibadmin = False for co in constraintsElement.childNodes[:]: if co.nodeType != xml.dom.Node.ELEMENT_NODE: continue if co.getAttribute("id") == c_id: constraintsElement.removeChild(co) elementFound = True if not elementFound: for rule in constraintsElement.getElementsByTagName("rule")[:]: if rule.getAttribute("id") == c_id: elementFound = True parent = rule.parentNode parent.removeChild(rule) if not parent.getElementsByTagName("rule"): parent.parentNode.removeChild(parent) if elementFound: if passed_dom: return dom if use_cibadmin: utils.replace_cib_configuration(dom) if returnStatus: return True else: utils.err("Unable to find constraint - '%s'" % c_id, False) if returnStatus: return False sys.exit(1) return None def constraint_ref(lib, argv, modifiers): """ Options: * -f - CIB file """ del lib modifiers.ensure_only_supported("-f") if not argv: raise CmdLineInputError() for arg in argv: print("Resource: %s" % arg) constraints, set_constraints = find_constraints_containing(arg) if not constraints and not set_constraints: print(" No Matches.") else: for constraint in constraints: print(" " + constraint) for constraint in sorted(set_constraints): print(" " + constraint) def remove_constraints_containing( resource_id, output=False, constraints_element=None, passed_dom=None ): """ Commandline options: * -f - CIB file, effective only if passed_dom is None """ lib = utils.get_library_wrapper() modifiers = utils.get_input_modifiers() constraints, set_constraints = find_constraints_containing( resource_id, passed_dom ) for c in constraints: if output: print("Removing Constraint - " + c) if constraints_element is not None: constraint_rm( lib, [c], modifiers, True, constraints_element, passed_dom=passed_dom, ) else: constraint_rm(lib, [c], modifiers, passed_dom=passed_dom) if set_constraints: (dom, constraintsElement) = getCurrentConstraints(passed_dom) for c in constraintsElement.getElementsByTagName("resource_ref")[:]: # If resource id is in a set, remove it from the set, if the set # is empty, then we remove the set, if the parent of the set # is empty then we remove it if c.getAttribute("id") == resource_id: pn = c.parentNode pn.removeChild(c) if output: print( "Removing %s from set %s" % (resource_id, pn.getAttribute("id")) ) if pn.getElementsByTagName("resource_ref").length == 0: print("Removing set %s" % pn.getAttribute("id")) pn2 = pn.parentNode pn2.removeChild(pn) if pn2.getElementsByTagName("resource_set").length == 0: pn2.parentNode.removeChild(pn2) print("Removing constraint %s" % pn2.getAttribute("id")) if passed_dom: return dom utils.replace_cib_configuration(dom) return None def find_constraints_containing(resource_id, passed_dom=None): """ Commandline options: * -f - CIB file, effective only if passed_dom is None """ if passed_dom: dom = passed_dom else: dom = utils.get_cib_dom() constraints_found = [] set_constraints = [] resources = dom.getElementsByTagName("primitive") resource_match = None for res in resources: if res.getAttribute("id") == resource_id: resource_match = res break if resource_match: if ( resource_match.parentNode.tagName == "master" or resource_match.parentNode.tagName == "clone" ): constraints_found, set_constraints = find_constraints_containing( resource_match.parentNode.getAttribute("id"), dom ) constraints = dom.getElementsByTagName("constraints") if not constraints: return [], [] constraints = constraints[0] myConstraints = constraints.getElementsByTagName("rsc_colocation") myConstraints += constraints.getElementsByTagName("rsc_location") myConstraints += constraints.getElementsByTagName("rsc_order") myConstraints += constraints.getElementsByTagName("rsc_ticket") attr_to_match = ["rsc", "first", "then", "with-rsc", "first", "then"] for c in myConstraints: for attr in attr_to_match: if c.getAttribute(attr) == resource_id: constraints_found.append(c.getAttribute("id")) break setConstraints = constraints.getElementsByTagName("resource_ref") for c in setConstraints: if c.getAttribute("id") == resource_id: set_constraints.append(c.parentNode.parentNode.getAttribute("id")) # Remove duplicates set_constraints = list(set(set_constraints)) return constraints_found, set_constraints def remove_constraints_containing_node(dom, node, output=False): """ Commandline options: no options """ for constraint in find_constraints_containing_node(dom, node): if output: print("Removing Constraint - %s" % constraint.getAttribute("id")) constraint.parentNode.removeChild(constraint) return dom def find_constraints_containing_node(dom, node): """ Commandline options: no options """ return [ constraint for constraint in dom.getElementsByTagName("rsc_location") if constraint.getAttribute("node") == node ] # Re-assign any constraints referencing a resource to its parent (a clone # or master) def constraint_resource_update(old_id, dom): """ Commandline options: no options """ new_id = None clone_ms_parent = utils.dom_get_resource_clone_ms_parent(dom, old_id) if clone_ms_parent: new_id = clone_ms_parent.getAttribute("id") if new_id: constraints = dom.getElementsByTagName("rsc_location") constraints += dom.getElementsByTagName("rsc_order") constraints += dom.getElementsByTagName("rsc_colocation") attrs_to_update = ["rsc", "first", "then", "with-rsc"] for constraint in constraints: for attr in attrs_to_update: if constraint.getAttribute(attr) == old_id: constraint.setAttribute(attr, new_id) return dom def constraint_rule(lib, argv, modifiers): """ Options: * -f - CIB file * --force - allow duplicate constraints, only for add command NOTE: modifiers check is in subcommand """ del lib if len(argv) < 2: raise CmdLineInputError() found = False command = argv.pop(0) constraint_id = None if command == "add": modifiers.ensure_only_supported("-f", "--force") constraint_id = argv.pop(0) options, rule_argv = rule_utils.parse_argv(argv) try: # Parse the rule to see if we need to upgrade CIB schema. All errors # would be properly reported by a validator called bellow, so we can # safely ignore them here. parsed_rule = rule_utils.RuleParser().parse( rule_utils.TokenPreprocessor().run(rule_argv) ) if rule_utils.has_node_attr_expr_with_type_integer(parsed_rule): utils.checkAndUpgradeCIB( const.PCMK_RULES_NODE_ATTR_EXPR_WITH_INT_TYPE_CIB_VERSION ) except (rule_utils.ParserException, rule_utils.CibBuilderException): pass cib = utils.get_cib_dom() constraint = utils.dom_get_element_with_id( cib.getElementsByTagName("constraints")[0], "rsc_location", constraint_id, ) if not constraint: utils.err("Unable to find constraint: " + constraint_id) rule_utils.dom_rule_add( constraint, options, rule_argv, utils.getValidateWithVersion(cib), ) location_rule_check_duplicates( cib, constraint, modifiers.get("--force") ) utils.replace_cib_configuration(cib) elif command in ["remove", "delete"]: modifiers.ensure_only_supported("-f") cib = utils.get_cib_etree() temp_id = argv.pop(0) constraints = cib.find(".//constraints") loc_cons = cib.findall(str(".//rsc_location")) for loc_con in loc_cons: for rule in loc_con: if rule.get("id") == temp_id: if len(loc_con) > 1: print("Removing Rule: {0}".format(rule.get("id"))) loc_con.remove(rule) found = True else: print( "Removing Constraint: {0}".format(loc_con.get("id")) ) constraints.remove(loc_con) found = True break if found: break if found: utils.replace_cib_configuration(cib) else: utils.err("unable to find rule with id: %s" % temp_id) else: raise CmdLineInputError() pcs-0.10.11/pcs/daemon/000077500000000000000000000000001412706364600145005ustar00rootroot00000000000000pcs-0.10.11/pcs/daemon/__init__.py000066400000000000000000000000001412706364600165770ustar00rootroot00000000000000pcs-0.10.11/pcs/daemon/app/000077500000000000000000000000001412706364600152605ustar00rootroot00000000000000pcs-0.10.11/pcs/daemon/app/__init__.py000066400000000000000000000000001412706364600173570ustar00rootroot00000000000000pcs-0.10.11/pcs/daemon/app/common.py000066400000000000000000000067021412706364600171270ustar00rootroot00000000000000from tornado.web import ( RequestHandler, RedirectHandler as TornadoRedirectHandler, ) class EnhanceHeadersMixin: """ EnhanceHeadersMixin allows to add security headers to GUI urls. """ def set_strict_transport_security(self): # rhbz 1558063 # The HTTP Strict-Transport-Security response header (often abbreviated # as HSTS) lets a web site tell browsers that it should only be # accessed using HTTPS, instead of using HTTP. self.set_header("Strict-Transport-Security", "max-age=604800") def set_header_nosniff_content_type(self): # The X-Content-Type-Options response HTTP header is a marker used by # the server to indicate that the MIME types advertised in the # Content-Type headers should not be changed and be followed. This # allows to opt-out of MIME type sniffing, or, in other words, it is a # way to say that the webmasters knew what they were doing. self.set_header("X-Content-Type-Options", "nosniff") def enhance_headers(self): self.set_header_nosniff_content_type() # The X-Frame-Options HTTP response header can be used to indicate # whether or not a browser should be allowed to render a page in a # ,